mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-05-07 00:46:44 -03:00
Compare commits
107 Commits
2dae4c1291
...
v1.0.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c4fa1631ee | ||
|
|
506d763dc2 | ||
|
|
a2cd09b619 | ||
|
|
cdd77029b6 | ||
|
|
439679e15f | ||
|
|
2640258902 | ||
|
|
b910388d54 | ||
|
|
083de395b1 | ||
|
|
4514ca94b7 | ||
|
|
62247bdd87 | ||
|
|
6d0d9600a7 | ||
|
|
70cd3f4e1b | ||
|
|
a95c518b30 | ||
|
|
ba1800095e | ||
|
|
39c083db79 | ||
|
|
55e9e4bb6f | ||
|
|
0253d001e6 | ||
|
|
9998da3241 | ||
|
|
6666a72775 | ||
|
|
5f1bd894b9 | ||
|
|
1817142a7b | ||
|
|
25fa175aa2 | ||
|
|
39643eb2bc | ||
|
|
4ac78f8aa8 | ||
|
|
0bcca0ba68 | ||
|
|
72f8e0d1be | ||
|
|
85b6c91192 | ||
|
|
908016cbd6 | ||
|
|
a5ac9cf81b | ||
|
|
32875042bd | ||
|
|
51fe7aa07e | ||
|
|
db4726a961 | ||
|
|
e13d70248a | ||
|
|
1c4919a3e8 | ||
|
|
18ddadc9ec | ||
|
|
b6dd6938b0 | ||
|
|
b711ac468a | ||
|
|
727d0ef043 | ||
|
|
9344d86332 | ||
|
|
d36b16c213 | ||
|
|
33a7f07558 | ||
|
|
4f599aeced | ||
|
|
30db8c3d1d | ||
|
|
05636712f0 | ||
|
|
d8e5fe1247 | ||
|
|
3e9210394a | ||
|
|
4dd2c0526f | ||
|
|
9bdb337962 | ||
|
|
f93baf5fc0 | ||
|
|
14cb7fec47 | ||
|
|
f3b3e0adad | ||
|
|
ba3f15dbc6 | ||
|
|
8dc2a2f76b | ||
|
|
316f17dd46 | ||
|
|
3dc10b1404 | ||
|
|
331889d872 | ||
|
|
06f1a82d4c | ||
|
|
267082c712 | ||
|
|
a4cb51e96c | ||
|
|
ca44c367b3 | ||
|
|
301ab14781 | ||
|
|
2626dbab8e | ||
|
|
12bbb0572d | ||
|
|
00f5c1e887 | ||
|
|
89b1675ec7 | ||
|
|
dcc7bd33b5 | ||
|
|
e5152108ba | ||
|
|
1ed5eef985 | ||
|
|
a82f89d14a | ||
|
|
16e30ea689 | ||
|
|
ad3bdddb72 | ||
|
|
9121306b06 | ||
|
|
ca0baf9462 | ||
|
|
20e50156a2 | ||
|
|
0b66bf5479 | ||
|
|
1e8aca4787 | ||
|
|
76ee59cdb9 | ||
|
|
a5191414cc | ||
|
|
5b065b47d4 | ||
|
|
ceeab0c998 | ||
|
|
3b001a6cd8 | ||
|
|
95e5bc26d1 | ||
|
|
de3d0571f8 | ||
|
|
6f2a01dc86 | ||
|
|
c5c1b8fd2a | ||
|
|
e97648c70b | ||
|
|
8b85e083e2 | ||
|
|
9112cd3b62 | ||
|
|
7df4e8d037 | ||
|
|
4000b7f7e7 | ||
|
|
76c15105e6 | ||
|
|
b11c90e19b | ||
|
|
9f5d2d0c18 | ||
|
|
a0dc5229f4 | ||
|
|
61c31ecbd0 | ||
|
|
1ae1b0d607 | ||
|
|
8dd849892d | ||
|
|
03e1fa75c5 | ||
|
|
fefcaa4a45 | ||
|
|
701a6a6c44 | ||
|
|
0ef414d17e | ||
|
|
75dccaef87 | ||
|
|
7e87ec9521 | ||
|
|
46522edb1b | ||
|
|
a32325402e | ||
|
|
05ebd7493d | ||
|
|
90986bd795 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -14,6 +14,8 @@ model_cache/
|
||||
|
||||
# agent
|
||||
.opencode/
|
||||
.claude/
|
||||
.codex
|
||||
|
||||
# Vue widgets development cache (but keep build output)
|
||||
vue-widgets/node_modules/
|
||||
|
||||
@@ -138,6 +138,13 @@ npm run test:coverage # Generate coverage report
|
||||
- Run `python scripts/sync_translation_keys.py` after adding UI strings to `locales/en.json`
|
||||
- Symlinks require normalized paths
|
||||
|
||||
## Git / Commit Messages
|
||||
|
||||
- Follow the style of recent repository commits when writing commit messages
|
||||
- Prefer the repo's existing `feat(...)`, `fix(...)`, `chore:` style where applicable
|
||||
- If the user has provided a GitHub issue link or issue ID for the task, mention that issue in the commit message, for example `(#871)`
|
||||
- When unrelated local changes exist, stage and commit only the files relevant to the requested task
|
||||
|
||||
## Frontend UI Architecture
|
||||
|
||||
### 1. Standalone Web UI
|
||||
|
||||
@@ -7,6 +7,7 @@ try: # pragma: no cover - import fallback for pytest collection
|
||||
from .py.nodes.prompt import PromptLM
|
||||
from .py.nodes.text import TextLM
|
||||
from .py.nodes.lora_stacker import LoraStackerLM
|
||||
from .py.nodes.lora_stack_combiner import LoraStackCombinerLM
|
||||
from .py.nodes.save_image import SaveImageLM
|
||||
from .py.nodes.debug_metadata import DebugMetadataLM
|
||||
from .py.nodes.wanvideo_lora_select import WanVideoLoraSelectLM
|
||||
@@ -39,6 +40,9 @@ except (
|
||||
"py.nodes.trigger_word_toggle"
|
||||
).TriggerWordToggleLM
|
||||
LoraStackerLM = importlib.import_module("py.nodes.lora_stacker").LoraStackerLM
|
||||
LoraStackCombinerLM = importlib.import_module(
|
||||
"py.nodes.lora_stack_combiner"
|
||||
).LoraStackCombinerLM
|
||||
SaveImageLM = importlib.import_module("py.nodes.save_image").SaveImageLM
|
||||
DebugMetadataLM = importlib.import_module("py.nodes.debug_metadata").DebugMetadataLM
|
||||
WanVideoLoraSelectLM = importlib.import_module(
|
||||
@@ -63,6 +67,7 @@ NODE_CLASS_MAPPINGS = {
|
||||
UNETLoaderLM.NAME: UNETLoaderLM,
|
||||
TriggerWordToggleLM.NAME: TriggerWordToggleLM,
|
||||
LoraStackerLM.NAME: LoraStackerLM,
|
||||
LoraStackCombinerLM.NAME: LoraStackCombinerLM,
|
||||
SaveImageLM.NAME: SaveImageLM,
|
||||
DebugMetadataLM.NAME: DebugMetadataLM,
|
||||
WanVideoLoraSelectLM.NAME: WanVideoLoraSelectLM,
|
||||
|
||||
@@ -9,17 +9,17 @@
|
||||
"Insomnia Art Designs",
|
||||
"megakirbs",
|
||||
"Brennok",
|
||||
"wackop",
|
||||
"2018cfh",
|
||||
"W+K+White",
|
||||
"wackop",
|
||||
"Takkan",
|
||||
"stone9k",
|
||||
"Carl G.",
|
||||
"$MetaSamsara",
|
||||
"itismyelement",
|
||||
"onesecondinosaur",
|
||||
"Carl G.",
|
||||
"stone9k",
|
||||
"Rosenthal",
|
||||
"Francisco Tatis",
|
||||
"Tobi_Swagg",
|
||||
"Andrew Wilson",
|
||||
"Greybush",
|
||||
"Gooohokrbe",
|
||||
@@ -29,18 +29,16 @@
|
||||
"VantAI",
|
||||
"runte3221",
|
||||
"FreelancerZ",
|
||||
"Julian V",
|
||||
"Edgar Tejeda",
|
||||
"Birdy",
|
||||
"Liam MacDougal",
|
||||
"Fraser Cross",
|
||||
"Polymorphic Indeterminate",
|
||||
"Birdy",
|
||||
"Marc Whiffen",
|
||||
"Kiba",
|
||||
"Jorge Hussni",
|
||||
"Reno Lam",
|
||||
"Kiba",
|
||||
"Skalabananen",
|
||||
"esthe",
|
||||
"Reno Lam",
|
||||
"sig",
|
||||
"Christian Byrne",
|
||||
"DM",
|
||||
@@ -49,24 +47,22 @@
|
||||
"J\\B/ 8r0wns0n",
|
||||
"Snaggwort",
|
||||
"Arlecchino Shion",
|
||||
"Charles Blakemore",
|
||||
"Rob Williams",
|
||||
"ClockDaemon",
|
||||
"KD",
|
||||
"Omnidex",
|
||||
"Tyler Trebuchon",
|
||||
"Release Cabrakan",
|
||||
"confiscated Zyra",
|
||||
"Tobi_Swagg",
|
||||
"SG",
|
||||
"carozzz",
|
||||
"James Dooley",
|
||||
"zenbound",
|
||||
"Buzzard",
|
||||
"jmack",
|
||||
"Adam Shaw",
|
||||
"Tee Gee",
|
||||
"Mark Corneglio",
|
||||
"SarcasticHashtag",
|
||||
"Anthony Rizzo",
|
||||
"tarek helmi",
|
||||
"Cosmosis",
|
||||
"iamresist",
|
||||
"RedrockVP",
|
||||
@@ -75,45 +71,34 @@
|
||||
"James Todd",
|
||||
"Steven Pfeiffer",
|
||||
"Tim",
|
||||
"Timmy",
|
||||
"Johnny",
|
||||
"Lisster",
|
||||
"Michael Wong",
|
||||
"Illrigger",
|
||||
"whudunit",
|
||||
"Tom Corrigan",
|
||||
"JackieWang",
|
||||
"fnkylove",
|
||||
"Julian V",
|
||||
"Steven Owens",
|
||||
"Yushio",
|
||||
"Vik71it",
|
||||
"lh qwe",
|
||||
"Echo",
|
||||
"Lilleman",
|
||||
"Robert Stacey",
|
||||
"PM",
|
||||
"Todd Keck",
|
||||
"Briton Heilbrun",
|
||||
"Mozzel",
|
||||
"Gingko Biloba",
|
||||
"Felipe dos Santos",
|
||||
"Penfore",
|
||||
"BadassArabianMofo",
|
||||
"Sterilized",
|
||||
"BadassArabianMofo",
|
||||
"Pascal Dahle",
|
||||
"Markus",
|
||||
"quarz",
|
||||
"Greg",
|
||||
"Douglas Gaspar",
|
||||
"Penfore",
|
||||
"JSST",
|
||||
"AlexDuKaNa",
|
||||
"George",
|
||||
"esthe",
|
||||
"lmsupporter",
|
||||
"Phil",
|
||||
"Charles Blakemore",
|
||||
"IamAyam",
|
||||
"wfpearl",
|
||||
"Rob Williams",
|
||||
"Baekdoosixt",
|
||||
"Jonathan Ross",
|
||||
"Jack B Nimble",
|
||||
@@ -125,127 +110,118 @@
|
||||
"contrite831",
|
||||
"Alex",
|
||||
"bh",
|
||||
"confiscated Zyra",
|
||||
"Marlon Daniels",
|
||||
"Starkselle",
|
||||
"Aaron Bleuer",
|
||||
"LacesOut!",
|
||||
"Graham Colehour",
|
||||
"greebles",
|
||||
"Adam Shaw",
|
||||
"Tee Gee",
|
||||
"Anthony Rizzo",
|
||||
"tarek helmi",
|
||||
"M Postkasse",
|
||||
"Tomohiro Baba",
|
||||
"David Ortega",
|
||||
"ASLPro3D",
|
||||
"Jacob Hoehler",
|
||||
"FinalyFree",
|
||||
"Weasyl",
|
||||
"Lex Song",
|
||||
"Timmy",
|
||||
"Johnny",
|
||||
"Cory Paza",
|
||||
"Tak",
|
||||
"Gonzalo Andre Allendes Lopez",
|
||||
"Zach Gonser",
|
||||
"Big Red",
|
||||
"Jimmy Ledbetter",
|
||||
"whudunit",
|
||||
"Luc Job",
|
||||
"dl0901dm",
|
||||
"Philip Hempel",
|
||||
"corde",
|
||||
"Nick Walker",
|
||||
"lh qwe",
|
||||
"Bishoujoker",
|
||||
"conner",
|
||||
"aai",
|
||||
"Yaboi",
|
||||
"Briton Heilbrun",
|
||||
"Tori",
|
||||
"wildnut",
|
||||
"Princess Bright Eyes",
|
||||
"Damon Cunliffe",
|
||||
"CryptoTraderJK",
|
||||
"Davaitamin",
|
||||
"AbstractAss",
|
||||
"Felipe dos Santos",
|
||||
"ViperC",
|
||||
"jean jahren",
|
||||
"Aleksander Wujczyk",
|
||||
"AM Kuro",
|
||||
"jean jahren",
|
||||
"Ran C",
|
||||
"tedcor",
|
||||
"Markus",
|
||||
"S Sang",
|
||||
"MagnaInsomnia",
|
||||
"Akira_HentAI",
|
||||
"Karl P.",
|
||||
"Akira_HentAI",
|
||||
"MagnaInsomnia",
|
||||
"Gordon Cole",
|
||||
"yuxz69",
|
||||
"MadSpin",
|
||||
"Douglas Gaspar",
|
||||
"AlexDuKaNa",
|
||||
"George",
|
||||
"andrew.tappan",
|
||||
"dw",
|
||||
"N/A",
|
||||
"The Spawn",
|
||||
"Phil",
|
||||
"graysock",
|
||||
"Greenmoustache",
|
||||
"zounic",
|
||||
"Gamalonia",
|
||||
"fancypants",
|
||||
"Vir",
|
||||
"Joboshy",
|
||||
"Digital",
|
||||
"JaxMax",
|
||||
"takyamtom",
|
||||
"Bohemian Corporal",
|
||||
"奚明 刘",
|
||||
"Dan",
|
||||
"Seth Christensen",
|
||||
"Jwk0205",
|
||||
"Bro Xie",
|
||||
"Draven T",
|
||||
"yer fey",
|
||||
"준희 김",
|
||||
"batblue",
|
||||
"carey6409",
|
||||
"Olive",
|
||||
"太郎 ゲーム",
|
||||
"Some Guy Named Barry",
|
||||
"jinxedx",
|
||||
"Aquatic Coffee",
|
||||
"Max Marklund",
|
||||
"Tomohiro Baba",
|
||||
"David Ortega",
|
||||
"AELOX",
|
||||
"Dankin",
|
||||
"Nicfit23",
|
||||
"Noora",
|
||||
"ethanfel",
|
||||
"wamekukyouzin",
|
||||
"drum matthieu",
|
||||
"Dogmaster",
|
||||
"Matt Wenzel",
|
||||
"Mattssn",
|
||||
"Frank Nitty",
|
||||
"Lex Song",
|
||||
"John Saveas",
|
||||
"Focuschannel",
|
||||
"Christopher Michel",
|
||||
"Serge Bekenkamp",
|
||||
"Jimmy Ledbetter",
|
||||
"LeoZero",
|
||||
"Antonio Pontes",
|
||||
"ApathyJones",
|
||||
"nahinahi9",
|
||||
"Anthony Faxlandez",
|
||||
"Dustin Chen",
|
||||
"dan",
|
||||
"Blackfish95",
|
||||
"Yaboi",
|
||||
"Mouthlessman",
|
||||
"Steam Steam",
|
||||
"Paul Kroll",
|
||||
"Damon Cunliffe",
|
||||
"CryptoTraderJK",
|
||||
"Davaitamin",
|
||||
"otaku fra",
|
||||
"semicolon drainpipe",
|
||||
"Thesharingbrother",
|
||||
"Ran C",
|
||||
"tedcor",
|
||||
"Fotek Design",
|
||||
"Bas Imagineer",
|
||||
"Pat Hen",
|
||||
"ResidentDeviant",
|
||||
"Adam Taylor",
|
||||
"JC",
|
||||
"Weird_With_A_Beard",
|
||||
"Prompt Pirate",
|
||||
"MadSpin",
|
||||
"Pozadine1",
|
||||
"uwutismxd",
|
||||
"Qarob",
|
||||
"AIGooner",
|
||||
"inbijiburu",
|
||||
"decoy",
|
||||
"Luc",
|
||||
"ProtonPrince",
|
||||
"DiffDuck",
|
||||
@@ -258,53 +234,54 @@
|
||||
"thesoftwaredruid",
|
||||
"wundershark",
|
||||
"mr_dinosaur",
|
||||
"Tyrswood",
|
||||
"linnfrey",
|
||||
"zenobeus",
|
||||
"Jackthemind",
|
||||
"Stryker",
|
||||
"Gamalonia",
|
||||
"Vir",
|
||||
"Pkrsky",
|
||||
"raf8osz",
|
||||
"blikkies",
|
||||
"Joboshy",
|
||||
"Bohemian Corporal",
|
||||
"Dan",
|
||||
"Josef Lanzl",
|
||||
"Seth Christensen",
|
||||
"Griffin Dahlberg",
|
||||
"준희 김",
|
||||
"Draven T",
|
||||
"yer fey",
|
||||
"Error_Rule34_Not_found",
|
||||
"Gerald Welly",
|
||||
"Shock Shockor",
|
||||
"Roslynd",
|
||||
"Geolog",
|
||||
"Goldwaters",
|
||||
"jinxedx",
|
||||
"Neco28",
|
||||
"Zude",
|
||||
"Aquatic Coffee",
|
||||
"Dankin",
|
||||
"ethanfel",
|
||||
"Cristian Vazquez",
|
||||
"Kyler",
|
||||
"Frank Nitty",
|
||||
"Magic Noob",
|
||||
"aRtFuL_DodGeR",
|
||||
"X",
|
||||
"Focuschannel",
|
||||
"DougPeterson",
|
||||
"Jeff",
|
||||
"Bruce",
|
||||
"CrimsonDX",
|
||||
"Kevin John Duck",
|
||||
"Anthony Faxlandez",
|
||||
"Kevin Christopher",
|
||||
"Ouro Boros",
|
||||
"DarkSunset",
|
||||
"Blackfish95",
|
||||
"dd",
|
||||
"Billy Gladky",
|
||||
"Probis",
|
||||
"shrshpp",
|
||||
"Dušan Ryban",
|
||||
"ItsGeneralButtNaked",
|
||||
"sjon kreutz",
|
||||
"Nimess",
|
||||
"Paul Kroll",
|
||||
"MiraiKuriyamaSy",
|
||||
"semicolon drainpipe",
|
||||
"Thesharingbrother",
|
||||
"Bas Imagineer",
|
||||
"Pat Hen",
|
||||
"John Statham",
|
||||
"Youguang",
|
||||
"ResidentDeviant",
|
||||
"Nihongasuki",
|
||||
"Metryman55",
|
||||
"andrewzpong",
|
||||
"FrxzenSnxw",
|
||||
"BossGame",
|
||||
"JC",
|
||||
"Prompt Pirate",
|
||||
"uwutismxd",
|
||||
"decoy",
|
||||
"Tyrswood",
|
||||
"Ray Wing",
|
||||
"Ranzitho",
|
||||
"Gus",
|
||||
@@ -316,7 +293,6 @@
|
||||
"WRL_SPR",
|
||||
"capn",
|
||||
"Joseph",
|
||||
"lrdchs",
|
||||
"Mirko Katzula",
|
||||
"dan",
|
||||
"Piccio08",
|
||||
@@ -326,51 +302,135 @@
|
||||
"Moon Knight",
|
||||
"몽타주",
|
||||
"Kland",
|
||||
"Hailshem",
|
||||
"zenobeus",
|
||||
"Jackthemind",
|
||||
"ryoma",
|
||||
"John Martin",
|
||||
"Stryker",
|
||||
"raf8osz",
|
||||
"ElitaSSJ4",
|
||||
"blikkies",
|
||||
"Chris",
|
||||
"Brian M",
|
||||
"Nerezza",
|
||||
"sanborondon",
|
||||
"moranqianlong",
|
||||
"Taylor Funk",
|
||||
"aezin",
|
||||
"Thought2Form",
|
||||
"jcay015",
|
||||
"Kevin Picco",
|
||||
"Erik Lopez",
|
||||
"Shock Shockor",
|
||||
"Mateo Curić",
|
||||
"Haru Yotu",
|
||||
"Goldwaters",
|
||||
"Zude",
|
||||
"Eris3D",
|
||||
"m",
|
||||
"Pierce McBride",
|
||||
"Joshua Gray",
|
||||
"Kyler",
|
||||
"Mikko Hemilä",
|
||||
"Matura Arbeit",
|
||||
"aRtFuL_DodGeR",
|
||||
"Jamie Ogletree",
|
||||
"TBitz33",
|
||||
"Emil Bernhoff",
|
||||
"a _",
|
||||
"SendingRavens",
|
||||
"James Coleman",
|
||||
"CrimsonDX",
|
||||
"Martial",
|
||||
"battu",
|
||||
"Emil Andersson",
|
||||
"Chad Idk",
|
||||
"Michael Docherty",
|
||||
"DarkSunset",
|
||||
"Billy Gladky",
|
||||
"Yuji Kaneko",
|
||||
"Probis",
|
||||
"Dušan Ryban",
|
||||
"ItsGeneralButtNaked",
|
||||
"Jordan Shaw",
|
||||
"Rops Alot",
|
||||
"Sam",
|
||||
"sjon kreutz",
|
||||
"Nimess",
|
||||
"SRDB",
|
||||
"Ace Ventura",
|
||||
"g unit",
|
||||
"Youguang",
|
||||
"Metryman55",
|
||||
"andrewzpong",
|
||||
"FrxzenSnxw",
|
||||
"BossGame",
|
||||
"lrdchs",
|
||||
"momokai",
|
||||
"Hailshem",
|
||||
"kudari",
|
||||
"Naomi Hale Danchi",
|
||||
"dc7431",
|
||||
"ken",
|
||||
"Inversity",
|
||||
"AIVORY3D",
|
||||
"epicgamer0020690",
|
||||
"Joshua Porrata",
|
||||
"keemun",
|
||||
"SuBu",
|
||||
"RedPIXel",
|
||||
"Kevinj",
|
||||
"Wind",
|
||||
"Nexus",
|
||||
"Ramneek“Guy”Ashok",
|
||||
"squid_actually",
|
||||
"Nat_20",
|
||||
"Edward Weeks",
|
||||
"kyoumei",
|
||||
"RadStorm04",
|
||||
"JohnDoe42054",
|
||||
"BillyHill",
|
||||
"emyth",
|
||||
"chriphost",
|
||||
"KitKatM",
|
||||
"socrasteeze",
|
||||
"ResidentDeviant",
|
||||
"gzmzmvp",
|
||||
"Welkor",
|
||||
"John Martin",
|
||||
"Richard",
|
||||
"Andrew",
|
||||
"Robert Wegemund",
|
||||
"Littlehuggy",
|
||||
"moranqianlong",
|
||||
"Gregory Kozhemiak",
|
||||
"mrjuan",
|
||||
"Brian Buie",
|
||||
"Sadlip",
|
||||
"Haru Yotu",
|
||||
"Eric Whitney",
|
||||
"Joey Callahan",
|
||||
"Ivan Tadic",
|
||||
"Mike Simone",
|
||||
"Morgandel",
|
||||
"Kyron Mahan",
|
||||
"Matura Arbeit",
|
||||
"Noah",
|
||||
"Jacob McDaniel",
|
||||
"X",
|
||||
"Sloan Steddy",
|
||||
"TBitz33",
|
||||
"Anonym dkjglfleeoeldldldlkf",
|
||||
"Temikus",
|
||||
"Artokun",
|
||||
"Michael Taylor",
|
||||
"SendingRavens",
|
||||
"Derek Baker",
|
||||
"Michael Anthony Scott",
|
||||
"Atilla Berke Pekduyar",
|
||||
"Michael Docherty",
|
||||
"Nathan",
|
||||
"Decx _",
|
||||
"Paul Hartsuyker",
|
||||
"elitassj",
|
||||
"Jacob Winter",
|
||||
"Jordan Shaw",
|
||||
"Sam",
|
||||
"Rops Alot",
|
||||
"SRDB",
|
||||
"g unit",
|
||||
"Ace Ventura",
|
||||
"Distortik",
|
||||
"David",
|
||||
"Meilo",
|
||||
"Pen Bouryoung",
|
||||
"四糸凜音",
|
||||
"shinonomeiro",
|
||||
"Snille",
|
||||
"MaartenAlbers",
|
||||
@@ -378,101 +438,104 @@
|
||||
"xybrightsummer",
|
||||
"jreedatchison",
|
||||
"PhilW",
|
||||
"momokai",
|
||||
"Tree Tagger",
|
||||
"Janik",
|
||||
"kudari",
|
||||
"Naomi Hale Danchi",
|
||||
"dc7431",
|
||||
"ken",
|
||||
"Inversity",
|
||||
"Crocket",
|
||||
"AIVORY3D",
|
||||
"epicgamer0020690",
|
||||
"Joshua Porrata",
|
||||
"Cruel",
|
||||
"keemun",
|
||||
"SuBu",
|
||||
"RedPIXel",
|
||||
"MRBlack",
|
||||
"Kevinj",
|
||||
"Wind",
|
||||
"Nexus",
|
||||
"Mitchell Robson",
|
||||
"Ramneek“Guy”Ashok",
|
||||
"squid_actually",
|
||||
"Nat_20",
|
||||
"Kiyoe",
|
||||
"Edward Weeks",
|
||||
"kyoumei",
|
||||
"RadStorm04",
|
||||
"JohnDoe42054",
|
||||
"BillyHill",
|
||||
"humptynutz",
|
||||
"emyth",
|
||||
"michael.isaza",
|
||||
"Kalnei",
|
||||
"chriphost",
|
||||
"KitKatM",
|
||||
"socrasteeze",
|
||||
"ResidentDeviant",
|
||||
"Whitepinetrader",
|
||||
"OrganicArtifact",
|
||||
"Scott",
|
||||
"gzmzmvp",
|
||||
"Welkor",
|
||||
"MudkipMedkitz",
|
||||
"deanbrian",
|
||||
"POPPIN",
|
||||
"Alex Wortman",
|
||||
"Cody",
|
||||
"Raku",
|
||||
"smart.edge5178",
|
||||
"emadsultan",
|
||||
"InformedViewz",
|
||||
"CHKeeho80",
|
||||
"Bubbafett",
|
||||
"leaf",
|
||||
"Menard",
|
||||
"Skyfire83",
|
||||
"Adam Rinehart",
|
||||
"D",
|
||||
"Pitpe11",
|
||||
"TheD1rtyD03",
|
||||
"moonpetal",
|
||||
"SomeDude",
|
||||
"g9p0o",
|
||||
"nanana",
|
||||
"TheHolySheep",
|
||||
"Monte Won",
|
||||
"SpringBootisTrash",
|
||||
"carsten",
|
||||
"ikok",
|
||||
"Buecyb99",
|
||||
"4IXplr0r3r",
|
||||
"dfklsjfkljslfjd",
|
||||
"hayden",
|
||||
"Richard",
|
||||
"ahoystan",
|
||||
"Leland Saunders",
|
||||
"Andrew",
|
||||
"Wolfe7D1",
|
||||
"Ink Temptation",
|
||||
"Bob Barker",
|
||||
"Robert Wegemund",
|
||||
"Littlehuggy",
|
||||
"Gregory Kozhemiak",
|
||||
"mrjuan",
|
||||
"edk",
|
||||
"Kalli Core",
|
||||
"Aeternyx",
|
||||
"Brian Buie",
|
||||
"elleshar666",
|
||||
"YOU SINWOO",
|
||||
"Sadlip",
|
||||
"ja s",
|
||||
"Eric Whitney",
|
||||
"Doug Mason",
|
||||
"Joey Callahan",
|
||||
"Ivan Tadic",
|
||||
"y2Rxy7FdXzWo",
|
||||
"Kauffy",
|
||||
"Jeremy Townsend",
|
||||
"Mike Simone",
|
||||
"EpicElric",
|
||||
"Sean voets",
|
||||
"Owen Gwosdz",
|
||||
"Morgandel",
|
||||
"John J Linehan",
|
||||
"Elliot E",
|
||||
"Thomas Wanner",
|
||||
"Kyron Mahan",
|
||||
"Theerat Jiramate",
|
||||
"Noah",
|
||||
"Jacob McDaniel",
|
||||
"Edward Kennedy",
|
||||
"Justin Blaylock",
|
||||
"Devil Lude",
|
||||
"Nick Kage",
|
||||
"kevin stoddard",
|
||||
"Sloan Steddy",
|
||||
"Jack Dole",
|
||||
"Vane Holzer",
|
||||
"psytrax",
|
||||
"Ezokewn",
|
||||
"Temikus",
|
||||
"Artokun",
|
||||
"Michael Taylor",
|
||||
"Derek Baker",
|
||||
"Michael Anthony Scott",
|
||||
"Atilla Berke Pekduyar",
|
||||
"hexxish",
|
||||
"CptNeo",
|
||||
"notedfakes",
|
||||
"Maso",
|
||||
"Nathan",
|
||||
"Decx _",
|
||||
"Eric Ketchum",
|
||||
"NICHOLAS BAXLEY",
|
||||
"Michael Scott",
|
||||
"Kevin Wallace",
|
||||
"Matheus Couto",
|
||||
"Paul Hartsuyker",
|
||||
"Saya",
|
||||
"ChicRic",
|
||||
"mercur",
|
||||
"J C",
|
||||
"Distortik",
|
||||
"Ed Wang",
|
||||
"Ryan Presley Ng",
|
||||
"Wes Sims",
|
||||
"Donor4115",
|
||||
"Yves Poezevara",
|
||||
"Teriak47",
|
||||
"Just me",
|
||||
"Raf Stahelin",
|
||||
"Вячеслав Маринин",
|
||||
"Lyavph",
|
||||
"Filippo Ferrari",
|
||||
"Cola Matthew",
|
||||
"OniNoKen",
|
||||
"Iain Wisely",
|
||||
@@ -505,117 +568,100 @@
|
||||
"RevyHiep",
|
||||
"Captain_Swag",
|
||||
"obkircher",
|
||||
"Tree Tagger",
|
||||
"gwyar",
|
||||
"D",
|
||||
"edgecase",
|
||||
"Neoxena",
|
||||
"mrmhalo",
|
||||
"dg",
|
||||
"Whitepinetrader",
|
||||
"Maarten Harms",
|
||||
"OrganicArtifact",
|
||||
"四糸凜音",
|
||||
"MudkipMedkitz",
|
||||
"Israel",
|
||||
"deanbrian",
|
||||
"POPPIN",
|
||||
"Muratoraccio",
|
||||
"SelfishMedic",
|
||||
"Ginnie",
|
||||
"Alex Wortman",
|
||||
"Cody",
|
||||
"adderleighn",
|
||||
"Raku",
|
||||
"smart.edge5178",
|
||||
"emadsultan",
|
||||
"InformedViewz",
|
||||
"CHKeeho80",
|
||||
"Bubbafett",
|
||||
"leaf",
|
||||
"Menard",
|
||||
"Skyfire83",
|
||||
"Adam Rinehart",
|
||||
"D",
|
||||
"Pitpe11",
|
||||
"TheD1rtyD03",
|
||||
"EnragedAntelope",
|
||||
"moonpetal",
|
||||
"SomeDude",
|
||||
"g9p0o",
|
||||
"nanana",
|
||||
"TheHolySheep",
|
||||
"Monte Won",
|
||||
"SpringBootisTrash",
|
||||
"carsten",
|
||||
"ikok",
|
||||
"Buecyb99",
|
||||
"4IXplr0r3r",
|
||||
"Alan+Cano",
|
||||
"FeralOpticsAI",
|
||||
"Pavlaki",
|
||||
"generic404",
|
||||
"Mateusz+Kosela",
|
||||
"Doug+Rintoul",
|
||||
"Noor",
|
||||
"Yorunai",
|
||||
"Bula",
|
||||
"quantenmecha",
|
||||
"abattoirblues",
|
||||
"Jason+Nash",
|
||||
"BillyBoy84",
|
||||
"DarkRoast",
|
||||
"zounik",
|
||||
"letzte",
|
||||
"Nasty+Hobbit",
|
||||
"SgtFluffles",
|
||||
"lrdchs2",
|
||||
"Duk3+Rand0m",
|
||||
"KUJYAKU",
|
||||
"NathenChoi",
|
||||
"Thomas+Reck",
|
||||
"Larses",
|
||||
"cocona",
|
||||
"Coeur+de+cochon",
|
||||
"David Schenck",
|
||||
"han b",
|
||||
"Nico",
|
||||
"Wolfe7D1",
|
||||
"Banana Joe",
|
||||
"_ G3n",
|
||||
"Donovan Jenkins",
|
||||
"Ink Temptation",
|
||||
"edk",
|
||||
"JBsuede",
|
||||
"Michael Eid",
|
||||
"beersandbacon",
|
||||
"Maximilian Pyko",
|
||||
"Invis",
|
||||
"Kalli Core",
|
||||
"Justin Houston",
|
||||
"Time Valentine",
|
||||
"james",
|
||||
"elleshar666",
|
||||
"OrochiNights",
|
||||
"Michael Zhu",
|
||||
"ACTUALLY_the_Real_Willem_Dafoe",
|
||||
"gonzalo",
|
||||
"Seraphy",
|
||||
"Михал Михалыч",
|
||||
"雨の心 落",
|
||||
"Matt",
|
||||
"AllTimeNoobie",
|
||||
"jumpd",
|
||||
"John C",
|
||||
"Kauffy",
|
||||
"Rim",
|
||||
"Dismem",
|
||||
"EpicElric",
|
||||
"John J Linehan",
|
||||
"Frogmilk",
|
||||
"SPJ",
|
||||
"Xan Dionysus",
|
||||
"Nathan lee",
|
||||
"Mewtora",
|
||||
"Elliot E",
|
||||
"Middo",
|
||||
"Forbidden Atelier",
|
||||
"Edward Kennedy",
|
||||
"Justin Blaylock",
|
||||
"Bryan Rutkowski",
|
||||
"Adictedtohumping",
|
||||
"Devil Lude",
|
||||
"Nick Kage",
|
||||
"Towelie",
|
||||
"Vane Holzer",
|
||||
"psytrax",
|
||||
"Cyrus Fett",
|
||||
"Jean-françois SEMA",
|
||||
"Kurt",
|
||||
"hexxish",
|
||||
"giani kidd",
|
||||
"CptNeo",
|
||||
"notedfakes",
|
||||
"max blo",
|
||||
"Xenon Xue",
|
||||
"JackJohnnyJim",
|
||||
"Edward Ten Eyck",
|
||||
"Chase Kwon",
|
||||
"Inyoshu",
|
||||
"Goober719",
|
||||
"Eric Ketchum",
|
||||
"Chad Barnes",
|
||||
"NICHOLAS BAXLEY",
|
||||
"Michael Scott",
|
||||
"James Ming",
|
||||
"vanditking",
|
||||
"kripitonga",
|
||||
"Rizzi",
|
||||
"nimin",
|
||||
"OMAR LUCIANO",
|
||||
"hannibal",
|
||||
"Jo+Example",
|
||||
"BrentBertram",
|
||||
"eumelzocker",
|
||||
@@ -623,5 +669,5 @@
|
||||
"L C",
|
||||
"Dude"
|
||||
],
|
||||
"totalCount": 620
|
||||
"totalCount": 666
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
280
locales/de.json
280
locales/de.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "Video-Einstellungen",
|
||||
"layoutSettings": "Layout-Einstellungen",
|
||||
"misc": "Verschiedenes",
|
||||
"backup": "Backups",
|
||||
"folderSettings": "Standard-Roots",
|
||||
"recipeSettings": "Rezepte",
|
||||
"extraFolderPaths": "Zusätzliche Ordnerpfade",
|
||||
"downloadPathTemplates": "Download-Pfad-Vorlagen",
|
||||
"priorityTags": "Prioritäts-Tags",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "NSFW-Inhalte unscharf stellen",
|
||||
"blurNsfwContentHelp": "Nicht jugendfreie (NSFW) Vorschaubilder unscharf stellen",
|
||||
"showOnlySfw": "Nur SFW-Ergebnisse anzeigen",
|
||||
"showOnlySfwHelp": "Alle NSFW-Inhalte beim Durchsuchen und Suchen herausfiltern"
|
||||
"showOnlySfwHelp": "Alle NSFW-Inhalte beim Durchsuchen und Suchen herausfiltern",
|
||||
"matureBlurThreshold": "Schwelle für Unschärfe bei jugendgefährdenden Inhalten",
|
||||
"matureBlurThresholdHelp": "Legen Sie fest, ab welcher Altersfreigabe die Unschärfe beginnt, wenn NSFW-Unschärfe aktiviert ist.",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 und höher",
|
||||
"r": "R und höher (Standard)",
|
||||
"x": "X und höher",
|
||||
"xxx": "Nur XXX"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "Videos bei Hover automatisch abspielen",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "Übersprungene Pfade konnten nicht gespeichert werden: {message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "Automatische Backups",
|
||||
"autoEnabledHelp": "Erstellt einmal täglich einen lokalen Schnappschuss und behält die neuesten Schnappschüsse gemäß der Aufbewahrungsrichtlinie.",
|
||||
"retention": "Aufbewahrungsanzahl",
|
||||
"retentionHelp": "Wie viele automatische Schnappschüsse behalten werden, bevor ältere entfernt werden.",
|
||||
"management": "Backup-Verwaltung",
|
||||
"managementHelp": "Exportiere deinen aktuellen Benutzerstatus oder stelle ihn aus einem Backup-Archiv wieder her.",
|
||||
"scopeHelp": "Sichert deine Einstellungen, den Downloadverlauf und den Status der Modellaktualisierung. Modelldateien und neu erzeugbare Caches sind nicht enthalten.",
|
||||
"locationSummary": "Aktueller Backup-Speicherort",
|
||||
"openFolderButton": "Backup-Ordner öffnen",
|
||||
"openFolderSuccess": "Backup-Ordner geöffnet",
|
||||
"openFolderFailed": "Backup-Ordner konnte nicht geöffnet werden",
|
||||
"locationCopied": "Backup-Pfad in die Zwischenablage kopiert: {{path}}",
|
||||
"locationClipboardFallback": "Backup-Pfad: {{path}}",
|
||||
"exportButton": "Backup exportieren",
|
||||
"exportSuccess": "Backup erfolgreich exportiert.",
|
||||
"exportFailed": "Backup konnte nicht exportiert werden: {message}",
|
||||
"importButton": "Backup importieren",
|
||||
"importConfirm": "Dieses Backup importieren und den lokalen Benutzerstatus überschreiben?",
|
||||
"importSuccess": "Backup erfolgreich importiert.",
|
||||
"importFailed": "Backup konnte nicht importiert werden: {message}",
|
||||
"latestSnapshot": "Neuester Schnappschuss",
|
||||
"latestAutoSnapshot": "Neuester automatischer Schnappschuss",
|
||||
"snapshotCount": "Gespeicherte Schnappschüsse",
|
||||
"noneAvailable": "Noch keine Schnappschüsse vorhanden"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "Downloads für Basismodelle überspringen",
|
||||
"help": "Gilt für alle Download-Abläufe. Hier können nur unterstützte Basismodelle ausgewählt werden.",
|
||||
"searchPlaceholder": "Basismodelle filtern...",
|
||||
"empty": "Keine Basismodelle entsprechen der aktuellen Suche.",
|
||||
"summary": {
|
||||
"none": "Nichts ausgewählt",
|
||||
"count": "{count} ausgewählt"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "Bearbeiten",
|
||||
"collapse": "Einklappen",
|
||||
"clear": "Löschen"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "Ausgeschlossene Basismodelle konnten nicht gespeichert werden: {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "Bereits heruntergeladene Modellversionen überspringen",
|
||||
"help": "Wenn aktiviert, überspringt LoRA Manager den Download einer Modellversion, wenn der Download-Verlaufsdienst diese spezifische Version als bereits heruntergeladen erfasst hat. Gilt für alle Download-Abläufe."
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Anzeige-Dichte",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "Legen Sie den Standard-Diffusion-Modell-(UNET)-Stammordner für Downloads, Importe und Verschiebungen fest",
|
||||
"defaultEmbeddingRoot": "Embedding-Stammordner",
|
||||
"defaultEmbeddingRootHelp": "Legen Sie den Standard-Embedding-Stammordner für Downloads, Importe und Verschiebungen fest",
|
||||
"recipesPath": "Rezepte-Speicherpfad",
|
||||
"recipesPathHelp": "Optionales benutzerdefiniertes Verzeichnis für gespeicherte Rezepte. Leer lassen, um den recipes-Ordner im ersten LoRA-Stammverzeichnis zu verwenden.",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "Rezepte-Speicher wird verschoben...",
|
||||
"noDefault": "Kein Standard"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "Zusätzliche Ordnerpfade",
|
||||
"help": "Fügen Sie zusätzliche Modellordner außerhalb der Standardpfade von ComfyUI hinzu. Diese Pfade werden separat gespeichert und zusammen mit den Standardordnern gescannt.",
|
||||
"description": "Konfigurieren Sie zusätzliche Ordner zum Scannen von Modellen. Diese Pfade sind spezifisch für LoRA Manager und werden mit den Standardpfaden von ComfyUI zusammengeführt.",
|
||||
"description": "Zusätzliche Modellstammverzeichnisse, die ausschließlich für LoRA Manager gelten. Laden Sie Modelle von Speicherorten außerhalb der Standardordner von ComfyUI – ideal für große Bibliotheken, die ComfyUI sonst verlangsamen würden.",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA-Pfade",
|
||||
"checkpoint": "Checkpoint-Pfade",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Embedding-Pfade"
|
||||
},
|
||||
"pathPlaceholder": "/pfad/zu/extra/modellen",
|
||||
"saveSuccess": "Zusätzliche Ordnerpfade aktualisiert.",
|
||||
"saveSuccess": "Zusätzliche Ordnerpfade aktualisiert. Neustart erforderlich, um Änderungen anzuwenden.",
|
||||
"saveError": "Fehler beim Aktualisieren der zusätzlichen Ordnerpfade: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "Dieser Pfad ist bereits konfiguriert"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "Metadaten-Aktualisierung für ausgewählte Modelle überspringen",
|
||||
"resumeMetadataRefresh": "Metadaten-Aktualisierung für ausgewählte Modelle fortsetzen",
|
||||
"deleteAll": "Alle Modelle löschen",
|
||||
"downloadMissingLoras": "Fehlende LoRAs herunterladen",
|
||||
"clear": "Auswahl löschen",
|
||||
"skipMetadataRefreshCount": "Überspringen({count} Modelle)",
|
||||
"resumeMetadataRefreshCount": "Fortsetzen({count} Modelle)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "Stammverzeichnis",
|
||||
"browseFolders": "Ordner durchsuchen:",
|
||||
"downloadAndSaveRecipe": "Herunterladen & Rezept speichern",
|
||||
"importRecipeOnly": "Nur Rezept importieren",
|
||||
"importAndDownload": "Importieren & Herunterladen",
|
||||
"downloadMissingLoras": "Fehlende LoRAs herunterladen",
|
||||
"saveRecipe": "Rezept speichern",
|
||||
"loraCountInfo": "({existing}/{total} in Bibliothek)",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "Batch Import Recipes",
|
||||
"action": "Batch Import",
|
||||
"urlList": "URL List",
|
||||
"directory": "Directory",
|
||||
"urlDescription": "Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "Enter one URL or path per line",
|
||||
"directoryPath": "Directory Path",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "Browse",
|
||||
"recursive": "Include subdirectories",
|
||||
"tagsOptional": "Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "Enter tags separated by commas",
|
||||
"tagsHint": "Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "Skip images without metadata",
|
||||
"skipNoMetadataHelp": "Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "Start Import",
|
||||
"startImport": "Start Import",
|
||||
"importing": "Importing...",
|
||||
"progress": "Progress",
|
||||
"total": "Total",
|
||||
"success": "Success",
|
||||
"failed": "Failed",
|
||||
"skipped": "Skipped",
|
||||
"current": "Current",
|
||||
"currentItem": "Current",
|
||||
"preparing": "Preparing...",
|
||||
"cancel": "Cancel",
|
||||
"cancelImport": "Cancel",
|
||||
"cancelled": "Import cancelled",
|
||||
"completed": "Import completed",
|
||||
"completedWithErrors": "Completed with errors",
|
||||
"completedSuccess": "Successfully imported {count} recipe(s)",
|
||||
"successCount": "Successful",
|
||||
"failedCount": "Failed",
|
||||
"skippedCount": "Skipped",
|
||||
"totalProcessed": "Total processed",
|
||||
"viewDetails": "View Details",
|
||||
"newImport": "New Import",
|
||||
"manualPathEntry": "Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"batchImportManualEntryRequired": "File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "Back to parent directory",
|
||||
"folders": "Folders",
|
||||
"folderCount": "{count} folders",
|
||||
"imageFiles": "Image Files",
|
||||
"images": "images",
|
||||
"imageCount": "{count} images",
|
||||
"selectFolder": "Select This Folder",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "Please enter at least one URL or path",
|
||||
"enterDirectory": "Please enter a directory path",
|
||||
"startFailed": "Failed to start import: {message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "In {otherType}-Ordner verschieben"
|
||||
"moveToOtherTypeFolder": "In {otherType}-Ordner verschieben",
|
||||
"sendToWorkflow": "An Workflow senden"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "Sidebar lösen",
|
||||
"switchToListView": "Zur Listenansicht wechseln",
|
||||
"switchToTreeView": "Zur Baumansicht wechseln",
|
||||
"recursiveOn": "Unterordner durchsuchen",
|
||||
"recursiveOff": "Nur aktuellen Ordner durchsuchen",
|
||||
"recursiveOn": "Unterordner einbeziehen",
|
||||
"recursiveOff": "Nur aktueller Ordner",
|
||||
"recursiveUnavailable": "Rekursive Suche ist nur in der Baumansicht verfügbar",
|
||||
"collapseAllDisabled": "Im Listenmodus nicht verfügbar",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "Early Access",
|
||||
"earlyAccessTooltip": "Early Access erforderlich",
|
||||
"inLibrary": "In Bibliothek",
|
||||
"downloaded": "Heruntergeladen",
|
||||
"downloadedTooltip": "Zuvor heruntergeladen, aber derzeit nicht in Ihrer Bibliothek.",
|
||||
"alreadyInLibrary": "Bereits in Bibliothek",
|
||||
"autoOrganizedPath": "[Automatisch organisiert durch Pfadvorlage]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "Basis-Modell aktualisieren",
|
||||
"cancel": "Abbrechen"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "Fehlende LoRAs herunterladen",
|
||||
"message": "{uniqueCount} einzigartige fehlende LoRAs gefunden (von insgesamt {totalCount} in ausgewählten Rezepten).",
|
||||
"previewTitle": "Zu herunterladende LoRAs:",
|
||||
"moreItems": "...und {count} weitere",
|
||||
"note": "Dateien werden mit Standard-Pfad-Vorlagen heruntergeladen. Dies kann je nach Anzahl der LoRAs eine Weile dauern.",
|
||||
"downloadButton": "{count} LoRA(s) herunterladen"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Lokale Beispielbilder",
|
||||
"message": "Keine lokalen Beispielbilder für dieses Modell gefunden. Ansichtsoptionen:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "Auf Civitai anzeigen",
|
||||
"viewOnCivitaiText": "Auf Civitai anzeigen",
|
||||
"viewCreatorProfile": "Ersteller-Profil anzeigen",
|
||||
"openFileLocation": "Dateispeicherort öffnen"
|
||||
"openFileLocation": "Dateispeicherort öffnen",
|
||||
"sendToWorkflow": "An ComfyUI senden",
|
||||
"sendToWorkflowText": "An ComfyUI senden"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Dateispeicherort erfolgreich geöffnet",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "Pfad in die Zwischenablage kopiert: {{path}}",
|
||||
"clipboardFallback": "Pfad: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "Kann nicht an ComfyUI senden: Kein Dateipfad verfügbar"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Version",
|
||||
"fileName": "Dateiname",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "in {count}d"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Aktuelle Version",
|
||||
"current": "Geöffnete Version",
|
||||
"currentTooltip": "Das ist die Version, mit der dieses Modal geöffnet wurde",
|
||||
"inLibrary": "In der Bibliothek",
|
||||
"inLibraryTooltip": "Diese Version befindet sich in Ihrer lokalen Bibliothek",
|
||||
"downloaded": "Heruntergeladen",
|
||||
"downloadedTooltip": "Diese Version wurde bereits heruntergeladen, befindet sich aber derzeit nicht in Ihrer Bibliothek",
|
||||
"newer": "Neuere Version",
|
||||
"newerTooltip": "Diese Version ist neuer als Ihre neueste lokale Version",
|
||||
"earlyAccess": "Früher Zugriff",
|
||||
"ignored": "Ignoriert"
|
||||
"earlyAccessTooltip": "Für diese Version ist derzeit Civitai Early Access erforderlich",
|
||||
"ignored": "Ignoriert",
|
||||
"ignoredTooltip": "Für diese Version sind Update-Benachrichtigungen deaktiviert"
|
||||
},
|
||||
"actions": {
|
||||
"download": "Herunterladen",
|
||||
"downloadTooltip": "Diese Version herunterladen",
|
||||
"downloadEarlyAccessTooltip": "Diese Early-Access-Version von Civitai herunterladen",
|
||||
"delete": "Löschen",
|
||||
"deleteTooltip": "Diese lokale Version löschen",
|
||||
"ignore": "Ignorieren",
|
||||
"unignore": "Ignorierung aufheben",
|
||||
"ignoreTooltip": "Update-Benachrichtigungen für diese Version ignorieren",
|
||||
"unignoreTooltip": "Update-Benachrichtigungen für diese Version fortsetzen",
|
||||
"viewVersionOnCivitai": "Version auf Civitai anzeigen",
|
||||
"earlyAccessTooltip": "Erfordert Early-Access-Kauf",
|
||||
"resumeModelUpdates": "Aktualisierungen für dieses Modell fortsetzen",
|
||||
"ignoreModelUpdates": "Aktualisierungen für dieses Modell ignorieren",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "Rezept im Workflow ersetzt",
|
||||
"recipeFailedToSend": "Fehler beim Senden des Rezepts an den Workflow",
|
||||
"noMatchingNodes": "Keine kompatiblen Knoten im aktuellen Workflow verfügbar",
|
||||
"noTargetNodeSelected": "Kein Zielknoten ausgewählt"
|
||||
"noTargetNodeSelected": "Kein Zielknoten ausgewählt",
|
||||
"modelUpdated": "Modell im Workflow aktualisiert",
|
||||
"modelFailed": "Fehler beim Aktualisieren des Modellknotens"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "Rezept",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "Bitte wählen Sie eine Version aus",
|
||||
"versionExists": "Diese Version existiert bereits in Ihrer Bibliothek",
|
||||
"downloadCompleted": "Download erfolgreich abgeschlossen",
|
||||
"downloadSkippedByBaseModel": "Download übersprungen, weil das Basismodell {baseModel} ausgeschlossen ist",
|
||||
"autoOrganizeSuccess": "Automatische Organisation für {count} {type} erfolgreich abgeschlossen",
|
||||
"autoOrganizePartialSuccess": "Automatische Organisation abgeschlossen: {success} verschoben, {failures} fehlgeschlagen von insgesamt {total} Modellen",
|
||||
"autoOrganizeFailed": "Automatische Organisation fehlgeschlagen: {error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "Rezeptname erfolgreich aktualisiert",
|
||||
"tagsUpdated": "Rezept-Tags erfolgreich aktualisiert",
|
||||
"sourceUrlUpdated": "Quell-URL erfolgreich aktualisiert",
|
||||
"promptUpdated": "Prompt erfolgreich aktualisiert",
|
||||
"negativePromptUpdated": "Negativer Prompt erfolgreich aktualisiert",
|
||||
"promptEditorHint": "Drücken Sie Enter zum Speichern, Shift+Enter für neue Zeile",
|
||||
"noRecipeId": "Keine Rezept-ID verfügbar",
|
||||
"sendToWorkflowFailed": "Fehler beim Senden des Rezepts an den Workflow: {message}",
|
||||
"copyFailed": "Fehler beim Kopieren der Rezept-Syntax: {message}",
|
||||
"noMissingLoras": "Keine fehlenden LoRAs zum Herunterladen",
|
||||
"missingLorasInfoFailed": "Fehler beim Abrufen der Informationen für fehlende LoRAs",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "Verarbeitungsfehler: {message}",
|
||||
"folderBrowserError": "Fehler beim Laden des Ordner-Browsers: {message}",
|
||||
"recipeSaveFailed": "Fehler beim Speichern des Rezepts: {error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "Import fehlgeschlagen: {message}",
|
||||
"folderTreeFailed": "Fehler beim Laden des Ordnerbaums",
|
||||
"folderTreeError": "Fehler beim Laden des Ordnerbaums",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "Cancelling batch import...",
|
||||
"batchImportCancelFailed": "Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "Keine Rezepte ausgewählt",
|
||||
"noMissingLorasInSelection": "Keine fehlenden LoRAs in ausgewählten Rezepten gefunden",
|
||||
"noLoraRootConfigured": "Kein LoRA-Stammverzeichnis konfiguriert. Bitte legen Sie ein Standard-LoRA-Stammverzeichnis in den Einstellungen fest."
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "Keine Modelle ausgewählt",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "Fehler beim Speichern der Basis-Modell-Zuordnungen: {message}",
|
||||
"downloadTemplatesUpdated": "Download-Pfad-Vorlagen aktualisiert",
|
||||
"downloadTemplatesFailed": "Fehler beim Speichern der Download-Pfad-Vorlagen: {message}",
|
||||
"recipesPathUpdated": "Rezepte-Speicherpfad aktualisiert",
|
||||
"recipesPathSaveFailed": "Fehler beim Aktualisieren des Rezepte-Speicherpfads: {message}",
|
||||
"settingsUpdated": "Einstellungen aktualisiert: {setting}",
|
||||
"compactModeToggled": "Kompakt-Modus {state}",
|
||||
"settingSaveFailed": "Fehler beim Speichern der Einstellung: {message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "Systemdiagnose",
|
||||
"title": "Doktor",
|
||||
"buttonTitle": "Diagnose und häufige Fehlerbehebungen ausführen",
|
||||
"loading": "Umgebung wird geprüft...",
|
||||
"footer": "Exportiere ein Diagnosepaket, falls das Problem nach der Reparatur weiterhin besteht.",
|
||||
"summary": {
|
||||
"idle": "Führe eine Überprüfung von Einstellungen, Cache-Integrität und UI-Konsistenz durch.",
|
||||
"ok": "Keine aktiven Probleme wurden in der aktuellen Umgebung gefunden.",
|
||||
"warning": "{count} Problem(e) wurden gefunden. Die meisten lassen sich direkt über dieses Panel beheben.",
|
||||
"error": "Bevor die App vollständig fehlerfrei ist, müssen {count} Problem(e) behoben werden."
|
||||
},
|
||||
"status": {
|
||||
"ok": "Gesund",
|
||||
"warning": "Handlungsbedarf",
|
||||
"error": "Aktion erforderlich"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "Erneut ausführen",
|
||||
"exportBundle": "Paket exportieren"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "Diagnose konnte nicht geladen werden: {message}",
|
||||
"repairSuccess": "Cache-Neuaufbau abgeschlossen.",
|
||||
"repairFailed": "Cache-Neuaufbau fehlgeschlagen: {message}",
|
||||
"exportSuccess": "Diagnosepaket exportiert.",
|
||||
"exportFailed": "Export des Diagnosepakets fehlgeschlagen: {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "Anwendungs-Update erkannt",
|
||||
|
||||
160
locales/en.json
160
locales/en.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "Video Settings",
|
||||
"layoutSettings": "Layout Settings",
|
||||
"misc": "Miscellaneous",
|
||||
"backup": "Backups",
|
||||
"folderSettings": "Default Roots",
|
||||
"recipeSettings": "Recipes",
|
||||
"extraFolderPaths": "Extra Folder Paths",
|
||||
"downloadPathTemplates": "Download Path Templates",
|
||||
"priorityTags": "Priority Tags",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "Blur NSFW Content",
|
||||
"blurNsfwContentHelp": "Blur mature (NSFW) content preview images",
|
||||
"showOnlySfw": "Show Only SFW Results",
|
||||
"showOnlySfwHelp": "Filter out all NSFW content when browsing and searching"
|
||||
"showOnlySfwHelp": "Filter out all NSFW content when browsing and searching",
|
||||
"matureBlurThreshold": "Mature Blur Threshold",
|
||||
"matureBlurThresholdHelp": "Set which rating level starts blur filtering when NSFW blur is enabled.",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 and above",
|
||||
"r": "R and above (default)",
|
||||
"x": "X and above",
|
||||
"xxx": "XXX only"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "Autoplay Videos on Hover",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "Unable to save skip paths: {message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "Automatic backups",
|
||||
"autoEnabledHelp": "Create a local snapshot once per day and keep the latest snapshots according to the retention policy.",
|
||||
"retention": "Retention count",
|
||||
"retentionHelp": "How many automatic snapshots to keep before older ones are pruned.",
|
||||
"management": "Backup management",
|
||||
"managementHelp": "Export your current user state or restore it from a backup archive.",
|
||||
"scopeHelp": "Backs up your settings, download history, and model update state. It does not include model files or rebuildable caches.",
|
||||
"locationSummary": "Current backup location",
|
||||
"openFolderButton": "Open backup folder",
|
||||
"openFolderSuccess": "Opened backup folder",
|
||||
"openFolderFailed": "Failed to open backup folder",
|
||||
"locationCopied": "Backup path copied to clipboard: {{path}}",
|
||||
"locationClipboardFallback": "Backup path: {{path}}",
|
||||
"exportButton": "Export backup",
|
||||
"exportSuccess": "Backup exported successfully.",
|
||||
"exportFailed": "Failed to export backup: {message}",
|
||||
"importButton": "Import backup",
|
||||
"importConfirm": "Import this backup and overwrite local user state?",
|
||||
"importSuccess": "Backup imported successfully.",
|
||||
"importFailed": "Failed to import backup: {message}",
|
||||
"latestSnapshot": "Latest snapshot",
|
||||
"latestAutoSnapshot": "Latest automatic snapshot",
|
||||
"snapshotCount": "Saved snapshots",
|
||||
"noneAvailable": "No snapshots yet"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "Skip downloads for base models",
|
||||
"help": "When enabled, versions using the selected base models will be skipped.",
|
||||
"searchPlaceholder": "Filter base models...",
|
||||
"empty": "No base models match the current search.",
|
||||
"summary": {
|
||||
"none": "None selected",
|
||||
"count": "{count} selected"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "Edit",
|
||||
"collapse": "Collapse",
|
||||
"clear": "Clear"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "Unable to save excluded base models: {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "Skip previously downloaded model versions",
|
||||
"help": "When enabled, versions downloaded before will be skipped."
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Display Density",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "Set default diffusion model (UNET) root directory for downloads, imports and moves",
|
||||
"defaultEmbeddingRoot": "Embedding Root",
|
||||
"defaultEmbeddingRootHelp": "Set default embedding root directory for downloads, imports and moves",
|
||||
"recipesPath": "Recipes Storage Path",
|
||||
"recipesPathHelp": "Optional custom directory for stored recipes. Leave empty to use the first LoRA root's recipes folder.",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "Migrating recipes storage...",
|
||||
"noDefault": "No Default"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "Extra Folder Paths",
|
||||
"help": "Add additional model folders outside of ComfyUI's standard paths. These paths are stored separately and scanned alongside the default folders.",
|
||||
"description": "Configure additional folders to scan for models. These paths are specific to LoRA Manager and will be merged with ComfyUI's default paths.",
|
||||
"description": "Additional model root paths exclusive to LoRA Manager. Load models from locations outside ComfyUI's standard folders—ideal for large libraries that would otherwise slow down ComfyUI.",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA Paths",
|
||||
"checkpoint": "Checkpoint Paths",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Embedding Paths"
|
||||
},
|
||||
"pathPlaceholder": "/path/to/extra/models",
|
||||
"saveSuccess": "Extra folder paths updated.",
|
||||
"saveSuccess": "Extra folder paths updated. Restart required to apply changes.",
|
||||
"saveError": "Failed to update extra folder paths: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "This path is already configured"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "Skip Metadata Refresh for Selected",
|
||||
"resumeMetadataRefresh": "Resume Metadata Refresh for Selected",
|
||||
"deleteAll": "Delete Selected Models",
|
||||
"downloadMissingLoras": "Download Missing LoRAs",
|
||||
"clear": "Clear Selection",
|
||||
"skipMetadataRefreshCount": "Skip ({count} models)",
|
||||
"resumeMetadataRefreshCount": "Resume ({count} models)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "Root",
|
||||
"browseFolders": "Browse Folders:",
|
||||
"downloadAndSaveRecipe": "Download & Save Recipe",
|
||||
"importRecipeOnly": "Import Recipe Only",
|
||||
"importAndDownload": "Import & Download",
|
||||
"downloadMissingLoras": "Download Missing LoRAs",
|
||||
"saveRecipe": "Save Recipe",
|
||||
"loraCountInfo": "({existing}/{total} in library)",
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "Move to {otherType} Folder"
|
||||
"moveToOtherTypeFolder": "Move to {otherType} Folder",
|
||||
"sendToWorkflow": "Send to Workflow"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "Unpin Sidebar",
|
||||
"switchToListView": "Switch to List View",
|
||||
"switchToTreeView": "Switch to Tree View",
|
||||
"recursiveOn": "Search subfolders",
|
||||
"recursiveOff": "Search current folder only",
|
||||
"recursiveOn": "Include subfolders",
|
||||
"recursiveOff": "Current folder only",
|
||||
"recursiveUnavailable": "Recursive search is available in tree view only",
|
||||
"collapseAllDisabled": "Not available in list view",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "Early Access",
|
||||
"earlyAccessTooltip": "Early access required",
|
||||
"inLibrary": "In Library",
|
||||
"downloaded": "Downloaded",
|
||||
"downloadedTooltip": "Previously downloaded, but it is not currently in your library.",
|
||||
"alreadyInLibrary": "Already in Library",
|
||||
"autoOrganizedPath": "[Auto-organized by path template]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "Update Base Model",
|
||||
"cancel": "Cancel"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "Download Missing LoRAs",
|
||||
"message": "Found {uniqueCount} unique missing LoRAs (from {totalCount} total across selected recipes).",
|
||||
"previewTitle": "LoRAs to download:",
|
||||
"moreItems": "...and {count} more",
|
||||
"note": "Files will be downloaded using default path templates. This may take a while depending on the number of LoRAs.",
|
||||
"downloadButton": "Download {count} LoRA(s)"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Local Example Images",
|
||||
"message": "No local example images found for this model. View options:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "View on Civitai",
|
||||
"viewOnCivitaiText": "View on Civitai",
|
||||
"viewCreatorProfile": "View Creator Profile",
|
||||
"openFileLocation": "Open File Location"
|
||||
"openFileLocation": "Open File Location",
|
||||
"sendToWorkflow": "Send to ComfyUI",
|
||||
"sendToWorkflowText": "Send to ComfyUI"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "File location opened successfully",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "Path copied to clipboard: {{path}}",
|
||||
"clipboardFallback": "Path: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "Unable to send to ComfyUI: No file path available"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Version",
|
||||
"fileName": "File Name",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "in {count}d"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Current Version",
|
||||
"current": "Opened Version",
|
||||
"currentTooltip": "This is the version you opened this modal from",
|
||||
"inLibrary": "In Library",
|
||||
"inLibraryTooltip": "This version exists in your local library",
|
||||
"downloaded": "Downloaded",
|
||||
"downloadedTooltip": "This version was downloaded before, but is not currently in your library",
|
||||
"newer": "Newer Version",
|
||||
"newerTooltip": "This version is newer than your latest local version",
|
||||
"earlyAccess": "Early Access",
|
||||
"ignored": "Ignored"
|
||||
"earlyAccessTooltip": "This version currently requires Civitai early access",
|
||||
"ignored": "Ignored",
|
||||
"ignoredTooltip": "Update notifications are disabled for this version"
|
||||
},
|
||||
"actions": {
|
||||
"download": "Download",
|
||||
"downloadTooltip": "Download this version",
|
||||
"downloadEarlyAccessTooltip": "Download this early access version from Civitai",
|
||||
"delete": "Delete",
|
||||
"deleteTooltip": "Delete this local version",
|
||||
"ignore": "Ignore",
|
||||
"unignore": "Unignore",
|
||||
"ignoreTooltip": "Ignore update notifications for this version",
|
||||
"unignoreTooltip": "Resume update notifications for this version",
|
||||
"viewVersionOnCivitai": "View version on Civitai",
|
||||
"earlyAccessTooltip": "Requires early access purchase",
|
||||
"resumeModelUpdates": "Resume updates for this model",
|
||||
"ignoreModelUpdates": "Ignore updates for this model",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "Recipe replaced in workflow",
|
||||
"recipeFailedToSend": "Failed to send recipe to workflow",
|
||||
"noMatchingNodes": "No compatible nodes available in the current workflow",
|
||||
"noTargetNodeSelected": "No target node selected"
|
||||
"noTargetNodeSelected": "No target node selected",
|
||||
"modelUpdated": "Model updated in workflow",
|
||||
"modelFailed": "Failed to update model node"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "Recipe",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "Please select a version",
|
||||
"versionExists": "This version already exists in your library",
|
||||
"downloadCompleted": "Download completed successfully",
|
||||
"downloadSkippedByBaseModel": "Skipped download because base model {baseModel} is excluded",
|
||||
"autoOrganizeSuccess": "Auto-organize completed successfully for {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Auto-organize completed with {success} moved, {failures} failed out of {total} models",
|
||||
"autoOrganizeFailed": "Auto-organize failed: {error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "Recipe name updated successfully",
|
||||
"tagsUpdated": "Recipe tags updated successfully",
|
||||
"sourceUrlUpdated": "Source URL updated successfully",
|
||||
"promptUpdated": "Prompt updated successfully",
|
||||
"negativePromptUpdated": "Negative prompt updated successfully",
|
||||
"promptEditorHint": "Press Enter to save, Shift+Enter for new line",
|
||||
"noRecipeId": "No recipe ID available",
|
||||
"sendToWorkflowFailed": "Failed to send recipe to workflow: {message}",
|
||||
"copyFailed": "Error copying recipe syntax: {message}",
|
||||
"noMissingLoras": "No missing LoRAs to download",
|
||||
"missingLorasInfoFailed": "Failed to get information for missing LoRAs",
|
||||
@@ -1495,6 +1596,7 @@
|
||||
"processingError": "Processing error: {message}",
|
||||
"folderBrowserError": "Error loading folder browser: {message}",
|
||||
"recipeSaveFailed": "Failed to save recipe: {error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "Import failed: {message}",
|
||||
"folderTreeFailed": "Failed to load folder tree",
|
||||
"folderTreeError": "Error loading folder tree",
|
||||
@@ -1504,7 +1606,10 @@
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}"
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "No recipes selected",
|
||||
"noMissingLorasInSelection": "No missing LoRAs found in selected recipes",
|
||||
"noLoraRootConfigured": "No LoRA root directory configured. Please set a default LoRA root in settings."
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "No models selected",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "Failed to save base model mappings: {message}",
|
||||
"downloadTemplatesUpdated": "Download path templates updated",
|
||||
"downloadTemplatesFailed": "Failed to save download path templates: {message}",
|
||||
"recipesPathUpdated": "Recipes storage path updated",
|
||||
"recipesPathSaveFailed": "Failed to update recipes storage path: {message}",
|
||||
"settingsUpdated": "Settings updated: {setting}",
|
||||
"compactModeToggled": "Compact Mode {state}",
|
||||
"settingSaveFailed": "Failed to save setting: {message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "System diagnostics",
|
||||
"title": "Doctor",
|
||||
"buttonTitle": "Run diagnostics and common fixes",
|
||||
"loading": "Checking environment...",
|
||||
"footer": "Export a diagnostics bundle if the issue still persists after repair.",
|
||||
"summary": {
|
||||
"idle": "Run a health check for settings, cache integrity, and UI consistency.",
|
||||
"ok": "No active issues were found in the current environment.",
|
||||
"warning": "{count} issue(s) were found. Most can be fixed directly from this panel.",
|
||||
"error": "{count} issue(s) need attention before the app is fully healthy."
|
||||
},
|
||||
"status": {
|
||||
"ok": "Healthy",
|
||||
"warning": "Needs Attention",
|
||||
"error": "Action Required"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "Run Again",
|
||||
"exportBundle": "Export Bundle"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "Failed to load diagnostics: {message}",
|
||||
"repairSuccess": "Cache rebuild completed.",
|
||||
"repairFailed": "Cache rebuild failed: {message}",
|
||||
"exportSuccess": "Diagnostics bundle exported.",
|
||||
"exportFailed": "Failed to export diagnostics bundle: {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "Application Update Detected",
|
||||
|
||||
280
locales/es.json
280
locales/es.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "Configuración de video",
|
||||
"layoutSettings": "Configuración de diseño",
|
||||
"misc": "Varios",
|
||||
"backup": "Copias de seguridad",
|
||||
"folderSettings": "Raíces predeterminadas",
|
||||
"recipeSettings": "Recetas",
|
||||
"extraFolderPaths": "Rutas de carpetas adicionales",
|
||||
"downloadPathTemplates": "Plantillas de rutas de descarga",
|
||||
"priorityTags": "Etiquetas prioritarias",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "Difuminar contenido NSFW",
|
||||
"blurNsfwContentHelp": "Difuminar imágenes de vista previa de contenido para adultos (NSFW)",
|
||||
"showOnlySfw": "Mostrar solo resultados SFW",
|
||||
"showOnlySfwHelp": "Filtrar todo el contenido NSFW al navegar y buscar"
|
||||
"showOnlySfwHelp": "Filtrar todo el contenido NSFW al navegar y buscar",
|
||||
"matureBlurThreshold": "Umbral de difuminado para contenido adulto",
|
||||
"matureBlurThresholdHelp": "Establecer a partir de qué nivel de clasificación comienza el filtrado por difuminado cuando el difuminado NSFW está habilitado.",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 y superior",
|
||||
"r": "R y superior (predeterminado)",
|
||||
"x": "X y superior",
|
||||
"xxx": "Solo XXX"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "Reproducir videos automáticamente al pasar el ratón",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "No se pudieron guardar las rutas a omitir: {message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "Copias de seguridad automáticas",
|
||||
"autoEnabledHelp": "Crea una instantánea local una vez al día y conserva las más recientes según la política de retención.",
|
||||
"retention": "Cantidad de retención",
|
||||
"retentionHelp": "Cuántas instantáneas automáticas conservar antes de eliminar las antiguas.",
|
||||
"management": "Gestión de copias",
|
||||
"managementHelp": "Exporta tu estado de usuario actual o restáuralo desde un archivo de copia de seguridad.",
|
||||
"scopeHelp": "Incluye tu configuración, el historial de descargas y el estado de actualización de los modelos. No incluye los archivos de modelo ni las cachés que se pueden regenerar.",
|
||||
"locationSummary": "Ubicación actual de la copia",
|
||||
"openFolderButton": "Abrir carpeta de copias",
|
||||
"openFolderSuccess": "Carpeta de copias abierta",
|
||||
"openFolderFailed": "No se pudo abrir la carpeta de copias",
|
||||
"locationCopied": "Ruta de la copia copiada al portapapeles: {{path}}",
|
||||
"locationClipboardFallback": "Ruta de la copia: {{path}}",
|
||||
"exportButton": "Exportar copia",
|
||||
"exportSuccess": "Copia exportada correctamente.",
|
||||
"exportFailed": "No se pudo exportar la copia: {message}",
|
||||
"importButton": "Importar copia",
|
||||
"importConfirm": "¿Importar esta copia y sobrescribir el estado local del usuario?",
|
||||
"importSuccess": "Copia importada correctamente.",
|
||||
"importFailed": "No se pudo importar la copia: {message}",
|
||||
"latestSnapshot": "Última instantánea",
|
||||
"latestAutoSnapshot": "Última instantánea automática",
|
||||
"snapshotCount": "Instantáneas guardadas",
|
||||
"noneAvailable": "Aún no hay instantáneas"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "Omitir descargas para modelos base",
|
||||
"help": "Se aplica a todos los flujos de descarga. Aquí solo se pueden seleccionar modelos base compatibles.",
|
||||
"searchPlaceholder": "Filtrar modelos base...",
|
||||
"empty": "Ningún modelo base coincide con la búsqueda actual.",
|
||||
"summary": {
|
||||
"none": "Ninguno seleccionado",
|
||||
"count": "{count} seleccionados"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "Editar",
|
||||
"collapse": "Contraer",
|
||||
"clear": "Limpiar"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "No se pudieron guardar los modelos base excluidos: {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "Omitir versiones de modelos previamente descargadas",
|
||||
"help": "Cuando está habilitado, LoRA Manager omitirá la descarga de una versión de modelo si el servicio de historial de descargas registra esa versión exacta como ya descargada. Aplica a todos los flujos de descarga."
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Densidad de visualización",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "Establecer el directorio raíz predeterminado de Diffusion Model (UNET) para descargas, importaciones y movimientos",
|
||||
"defaultEmbeddingRoot": "Raíz de embedding",
|
||||
"defaultEmbeddingRootHelp": "Establecer el directorio raíz predeterminado de embedding para descargas, importaciones y movimientos",
|
||||
"recipesPath": "Ruta de almacenamiento de recetas",
|
||||
"recipesPathHelp": "Directorio personalizado opcional para las recetas guardadas. Déjalo vacío para usar la carpeta recipes del primer directorio raíz de LoRA.",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "Migrando el almacenamiento de recetas...",
|
||||
"noDefault": "Sin predeterminado"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "Rutas de carpetas adicionales",
|
||||
"help": "Agregue carpetas de modelos adicionales fuera de las rutas estándar de ComfyUI. Estas rutas se almacenan por separado y se escanean junto con las carpetas predeterminadas.",
|
||||
"description": "Configure carpetas adicionales para escanear modelos. Estas rutas son específicas de LoRA Manager y se fusionarán con las rutas predeterminadas de ComfyUI.",
|
||||
"description": "Rutas raíz de modelos adicionales exclusivas para LoRA Manager. Cargue modelos desde ubicaciones fuera de las carpetas estándar de ComfyUI, ideal para bibliotecas grandes que de otro modo ralentizarían ComfyUI.",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "Rutas de LoRA",
|
||||
"checkpoint": "Rutas de Checkpoint",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Rutas de Embedding"
|
||||
},
|
||||
"pathPlaceholder": "/ruta/a/modelos/extra",
|
||||
"saveSuccess": "Rutas de carpetas adicionales actualizadas.",
|
||||
"saveSuccess": "Rutas de carpetas adicionales actualizadas. Se requiere reinicio para aplicar los cambios.",
|
||||
"saveError": "Error al actualizar las rutas de carpetas adicionales: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "Esta ruta ya está configurada"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "Omitir actualización de metadatos para seleccionados",
|
||||
"resumeMetadataRefresh": "Reanudar actualización de metadatos para seleccionados",
|
||||
"deleteAll": "Eliminar todos los modelos",
|
||||
"downloadMissingLoras": "Descargar LoRAs faltantes",
|
||||
"clear": "Limpiar selección",
|
||||
"skipMetadataRefreshCount": "Omitir({count} modelos)",
|
||||
"resumeMetadataRefreshCount": "Reanudar({count} modelos)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "Raíz",
|
||||
"browseFolders": "Explorar carpetas:",
|
||||
"downloadAndSaveRecipe": "Descargar y guardar receta",
|
||||
"importRecipeOnly": "Importar solo la receta",
|
||||
"importAndDownload": "Importar y descargar",
|
||||
"downloadMissingLoras": "Descargar LoRAs faltantes",
|
||||
"saveRecipe": "Guardar receta",
|
||||
"loraCountInfo": "({existing}/{total} en la biblioteca)",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "Batch Import Recipes",
|
||||
"action": "Batch Import",
|
||||
"urlList": "URL List",
|
||||
"directory": "Directory",
|
||||
"urlDescription": "Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "Enter one URL or path per line",
|
||||
"directoryPath": "Directory Path",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "Browse",
|
||||
"recursive": "Include subdirectories",
|
||||
"tagsOptional": "Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "Enter tags separated by commas",
|
||||
"tagsHint": "Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "Skip images without metadata",
|
||||
"skipNoMetadataHelp": "Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "Start Import",
|
||||
"startImport": "Start Import",
|
||||
"importing": "Importing...",
|
||||
"progress": "Progress",
|
||||
"total": "Total",
|
||||
"success": "Success",
|
||||
"failed": "Failed",
|
||||
"skipped": "Skipped",
|
||||
"current": "Current",
|
||||
"currentItem": "Current",
|
||||
"preparing": "Preparing...",
|
||||
"cancel": "Cancel",
|
||||
"cancelImport": "Cancel",
|
||||
"cancelled": "Import cancelled",
|
||||
"completed": "Import completed",
|
||||
"completedWithErrors": "Completed with errors",
|
||||
"completedSuccess": "Successfully imported {count} recipe(s)",
|
||||
"successCount": "Successful",
|
||||
"failedCount": "Failed",
|
||||
"skippedCount": "Skipped",
|
||||
"totalProcessed": "Total processed",
|
||||
"viewDetails": "View Details",
|
||||
"newImport": "New Import",
|
||||
"manualPathEntry": "Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"batchImportManualEntryRequired": "File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "Back to parent directory",
|
||||
"folders": "Folders",
|
||||
"folderCount": "{count} folders",
|
||||
"imageFiles": "Image Files",
|
||||
"images": "images",
|
||||
"imageCount": "{count} images",
|
||||
"selectFolder": "Select This Folder",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "Please enter at least one URL or path",
|
||||
"enterDirectory": "Please enter a directory path",
|
||||
"startFailed": "Failed to start import: {message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "Mover a la carpeta {otherType}"
|
||||
"moveToOtherTypeFolder": "Mover a la carpeta {otherType}",
|
||||
"sendToWorkflow": "Enviar al flujo de trabajo"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "Desfijar barra lateral",
|
||||
"switchToListView": "Cambiar a vista de lista",
|
||||
"switchToTreeView": "Cambiar a vista de árbol",
|
||||
"recursiveOn": "Buscar en subcarpetas",
|
||||
"recursiveOff": "Buscar solo en la carpeta actual",
|
||||
"recursiveOn": "Incluir subcarpetas",
|
||||
"recursiveOff": "Solo carpeta actual",
|
||||
"recursiveUnavailable": "La búsqueda recursiva solo está disponible en la vista en árbol",
|
||||
"collapseAllDisabled": "No disponible en vista de lista",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "Acceso temprano",
|
||||
"earlyAccessTooltip": "Acceso temprano requerido",
|
||||
"inLibrary": "En la biblioteca",
|
||||
"downloaded": "Descargado",
|
||||
"downloadedTooltip": "Descargado anteriormente, pero actualmente no está en tu biblioteca.",
|
||||
"alreadyInLibrary": "Ya en la biblioteca",
|
||||
"autoOrganizedPath": "[Auto-organizado por plantilla de ruta]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "Actualizar modelo base",
|
||||
"cancel": "Cancelar"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "Descargar LoRAs faltantes",
|
||||
"message": "Se encontraron {uniqueCount} LoRAs faltantes únicos (de {totalCount} en total entre las recetas seleccionadas).",
|
||||
"previewTitle": "LoRAs para descargar:",
|
||||
"moreItems": "...y {count} más",
|
||||
"note": "Los archivos se descargarán usando las plantillas de ruta predeterminadas. Esto puede tomar un tiempo dependiendo del número de LoRAs.",
|
||||
"downloadButton": "Descargar {count} LoRA(s)"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Imágenes de ejemplo locales",
|
||||
"message": "No se encontraron imágenes de ejemplo locales para este modelo. Opciones de visualización:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "Ver en Civitai",
|
||||
"viewOnCivitaiText": "Ver en Civitai",
|
||||
"viewCreatorProfile": "Ver perfil del creador",
|
||||
"openFileLocation": "Abrir ubicación del archivo"
|
||||
"openFileLocation": "Abrir ubicación del archivo",
|
||||
"sendToWorkflow": "Enviar a ComfyUI",
|
||||
"sendToWorkflowText": "Enviar a ComfyUI"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Ubicación del archivo abierta exitosamente",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "Ruta copiada al portapapeles: {{path}}",
|
||||
"clipboardFallback": "Ruta: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "No se puede enviar a ComfyUI: no hay ruta de archivo disponible"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Versión",
|
||||
"fileName": "Nombre de archivo",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "en {count}d"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Versión actual",
|
||||
"current": "Versión abierta",
|
||||
"currentTooltip": "Es la versión con la que abriste este modal",
|
||||
"inLibrary": "En la biblioteca",
|
||||
"inLibraryTooltip": "Esta versión existe en tu biblioteca local",
|
||||
"downloaded": "Descargado",
|
||||
"downloadedTooltip": "Esta versión se descargó antes, pero ahora no está en tu biblioteca",
|
||||
"newer": "Versión más reciente",
|
||||
"newerTooltip": "Esta versión es más reciente que tu última versión local",
|
||||
"earlyAccess": "Acceso temprano",
|
||||
"ignored": "Ignorada"
|
||||
"earlyAccessTooltip": "Esta versión requiere actualmente acceso temprano de Civitai",
|
||||
"ignored": "Ignorada",
|
||||
"ignoredTooltip": "Las notificaciones de actualización están desactivadas para esta versión"
|
||||
},
|
||||
"actions": {
|
||||
"download": "Descargar",
|
||||
"downloadTooltip": "Descargar esta versión",
|
||||
"downloadEarlyAccessTooltip": "Descargar esta versión de acceso temprano desde Civitai",
|
||||
"delete": "Eliminar",
|
||||
"deleteTooltip": "Eliminar esta versión local",
|
||||
"ignore": "Ignorar",
|
||||
"unignore": "Dejar de ignorar",
|
||||
"ignoreTooltip": "Ignorar las notificaciones de actualización de esta versión",
|
||||
"unignoreTooltip": "Reanudar las notificaciones de actualización de esta versión",
|
||||
"viewVersionOnCivitai": "Ver versión en Civitai",
|
||||
"earlyAccessTooltip": "Requiere compra de acceso temprano",
|
||||
"resumeModelUpdates": "Reanudar actualizaciones para este modelo",
|
||||
"ignoreModelUpdates": "Ignorar actualizaciones para este modelo",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "Receta reemplazada en el flujo de trabajo",
|
||||
"recipeFailedToSend": "Error al enviar receta al flujo de trabajo",
|
||||
"noMatchingNodes": "No hay nodos compatibles disponibles en el flujo de trabajo actual",
|
||||
"noTargetNodeSelected": "No se ha seleccionado ningún nodo de destino"
|
||||
"noTargetNodeSelected": "No se ha seleccionado ningún nodo de destino",
|
||||
"modelUpdated": "Modelo actualizado en el flujo de trabajo",
|
||||
"modelFailed": "Error al actualizar nodo de modelo"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "Receta",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "Por favor selecciona una versión",
|
||||
"versionExists": "Esta versión ya existe en tu biblioteca",
|
||||
"downloadCompleted": "Descarga completada exitosamente",
|
||||
"downloadSkippedByBaseModel": "Descarga omitida porque el modelo base {baseModel} está excluido",
|
||||
"autoOrganizeSuccess": "Auto-organización completada exitosamente para {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Auto-organización completada con {success} movidos, {failures} fallidos de un total de {total} modelos",
|
||||
"autoOrganizeFailed": "Auto-organización fallida: {error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "Nombre de receta actualizado exitosamente",
|
||||
"tagsUpdated": "Etiquetas de receta actualizadas exitosamente",
|
||||
"sourceUrlUpdated": "URL de origen actualizada exitosamente",
|
||||
"promptUpdated": "Prompt actualizado exitosamente",
|
||||
"negativePromptUpdated": "Prompt negativo actualizado exitosamente",
|
||||
"promptEditorHint": "Presiona Enter para guardar, Shift+Enter para nueva línea",
|
||||
"noRecipeId": "No hay ID de receta disponible",
|
||||
"sendToWorkflowFailed": "Error al enviar la receta al flujo de trabajo: {message}",
|
||||
"copyFailed": "Error copiando sintaxis de receta: {message}",
|
||||
"noMissingLoras": "No hay LoRAs faltantes para descargar",
|
||||
"missingLorasInfoFailed": "Error al obtener información de LoRAs faltantes",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "Error de procesamiento: {message}",
|
||||
"folderBrowserError": "Error cargando explorador de carpetas: {message}",
|
||||
"recipeSaveFailed": "Error al guardar receta: {error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "Importación falló: {message}",
|
||||
"folderTreeFailed": "Error al cargar árbol de carpetas",
|
||||
"folderTreeError": "Error cargando árbol de carpetas",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "Cancelling batch import...",
|
||||
"batchImportCancelFailed": "Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "No se han seleccionado recetas",
|
||||
"noMissingLorasInSelection": "No se encontraron LoRAs faltantes en las recetas seleccionadas",
|
||||
"noLoraRootConfigured": "No se ha configurado el directorio raíz de LoRA. Por favor, establezca un directorio raíz de LoRA predeterminado en la configuración."
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "No hay modelos seleccionados",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "Error al guardar mapeos de modelo base: {message}",
|
||||
"downloadTemplatesUpdated": "Plantillas de rutas de descarga actualizadas",
|
||||
"downloadTemplatesFailed": "Error al guardar plantillas de rutas de descarga: {message}",
|
||||
"recipesPathUpdated": "Ruta de almacenamiento de recetas actualizada",
|
||||
"recipesPathSaveFailed": "Error al actualizar la ruta de almacenamiento de recetas: {message}",
|
||||
"settingsUpdated": "Configuración actualizada: {setting}",
|
||||
"compactModeToggled": "Modo compacto {state}",
|
||||
"settingSaveFailed": "Error al guardar configuración: {message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "Diagnósticos del sistema",
|
||||
"title": "Doctor",
|
||||
"buttonTitle": "Ejecutar diagnósticos y correcciones comunes",
|
||||
"loading": "Comprobando el entorno...",
|
||||
"footer": "Exporta un paquete de diagnóstico si el problema persiste después de la reparación.",
|
||||
"summary": {
|
||||
"idle": "Ejecuta una comprobación del estado de la configuración, la integridad de la caché y la coherencia de la interfaz.",
|
||||
"ok": "No se encontraron problemas activos en el entorno actual.",
|
||||
"warning": "Se encontraron {count} problema(s). La mayoría se puede solucionar directamente desde este panel.",
|
||||
"error": "Se encontraron {count} problema(s). Deben atenderse antes de que la aplicación esté completamente saludable."
|
||||
},
|
||||
"status": {
|
||||
"ok": "Saludable",
|
||||
"warning": "Requiere atención",
|
||||
"error": "Se requiere acción"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "Ejecutar de nuevo",
|
||||
"exportBundle": "Exportar paquete"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "Error al cargar los diagnósticos: {message}",
|
||||
"repairSuccess": "Reconstrucción de caché completada.",
|
||||
"repairFailed": "Error al reconstruir la caché: {message}",
|
||||
"exportSuccess": "Paquete de diagnósticos exportado.",
|
||||
"exportFailed": "Error al exportar el paquete de diagnósticos: {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "Actualización de la aplicación detectada",
|
||||
|
||||
280
locales/fr.json
280
locales/fr.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "Paramètres vidéo",
|
||||
"layoutSettings": "Paramètres d'affichage",
|
||||
"misc": "Divers",
|
||||
"backup": "Sauvegardes",
|
||||
"folderSettings": "Racines par défaut",
|
||||
"recipeSettings": "Recipes",
|
||||
"extraFolderPaths": "Chemins de dossiers supplémentaires",
|
||||
"downloadPathTemplates": "Modèles de chemin de téléchargement",
|
||||
"priorityTags": "Étiquettes prioritaires",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "Flouter le contenu NSFW",
|
||||
"blurNsfwContentHelp": "Flouter les images d'aperçu de contenu pour adultes (NSFW)",
|
||||
"showOnlySfw": "Afficher uniquement les résultats SFW",
|
||||
"showOnlySfwHelp": "Filtrer tout le contenu NSFW lors de la navigation et de la recherche"
|
||||
"showOnlySfwHelp": "Filtrer tout le contenu NSFW lors de la navigation et de la recherche",
|
||||
"matureBlurThreshold": "Seuil de floutage pour contenu adulte",
|
||||
"matureBlurThresholdHelp": "Définir à partir de quel niveau de classification le floutage s'applique lorsque le floutage NSFW est activé.",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 et plus",
|
||||
"r": "R et plus (par défaut)",
|
||||
"x": "X et plus",
|
||||
"xxx": "XXX uniquement"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "Lecture automatique vidéo au survol",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "Impossible d'enregistrer les chemins à ignorer : {message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "Sauvegardes automatiques",
|
||||
"autoEnabledHelp": "Crée un instantané local une fois par jour et conserve les plus récents selon la politique de rétention.",
|
||||
"retention": "Nombre de rétention",
|
||||
"retentionHelp": "Combien d'instantanés automatiques conserver avant de supprimer les plus anciens.",
|
||||
"management": "Gestion des sauvegardes",
|
||||
"managementHelp": "Exporte l'état actuel de l'utilisateur ou restaure-le depuis une archive de sauvegarde.",
|
||||
"scopeHelp": "Inclut vos paramètres, l'historique des téléchargements et l'état des mises à jour des modèles. Les fichiers de modèle et les caches régénérables ne sont pas inclus.",
|
||||
"locationSummary": "Emplacement actuel des sauvegardes",
|
||||
"openFolderButton": "Ouvrir le dossier de sauvegarde",
|
||||
"openFolderSuccess": "Dossier de sauvegarde ouvert",
|
||||
"openFolderFailed": "Impossible d'ouvrir le dossier de sauvegarde",
|
||||
"locationCopied": "Chemin de sauvegarde copié dans le presse-papiers : {{path}}",
|
||||
"locationClipboardFallback": "Chemin de sauvegarde : {{path}}",
|
||||
"exportButton": "Exporter la sauvegarde",
|
||||
"exportSuccess": "Sauvegarde exportée avec succès.",
|
||||
"exportFailed": "Échec de l'export de la sauvegarde : {message}",
|
||||
"importButton": "Importer la sauvegarde",
|
||||
"importConfirm": "Importer cette sauvegarde et écraser l'état local de l'utilisateur ?",
|
||||
"importSuccess": "Sauvegarde importée avec succès.",
|
||||
"importFailed": "Échec de l'import de la sauvegarde : {message}",
|
||||
"latestSnapshot": "Dernier instantané",
|
||||
"latestAutoSnapshot": "Dernier instantané automatique",
|
||||
"snapshotCount": "Instantanés enregistrés",
|
||||
"noneAvailable": "Aucun instantané pour le moment"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "Ignorer les téléchargements pour certains modèles de base",
|
||||
"help": "S’applique à tous les flux de téléchargement. Seuls les modèles de base pris en charge peuvent être sélectionnés ici.",
|
||||
"searchPlaceholder": "Filtrer les modèles de base...",
|
||||
"empty": "Aucun modèle de base ne correspond à la recherche actuelle.",
|
||||
"summary": {
|
||||
"none": "Aucune sélection",
|
||||
"count": "{count} sélectionnés"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "Modifier",
|
||||
"collapse": "Réduire",
|
||||
"clear": "Effacer"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "Impossible d’enregistrer les modèles de base exclus : {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "Ignorer les versions de modèles précédemment téléchargées",
|
||||
"help": "Lorsque activé, LoRA Manager ignorera le téléchargement d'une version de modèle si le service d'historique des téléchargements enregistre cette version exacte comme déjà téléchargée. S'applique à tous les flux de téléchargement."
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Densité d'affichage",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "Définir le répertoire racine Diffusion Model (UNET) par défaut pour les téléchargements, imports et déplacements",
|
||||
"defaultEmbeddingRoot": "Racine Embedding",
|
||||
"defaultEmbeddingRootHelp": "Définir le répertoire racine embedding par défaut pour les téléchargements, imports et déplacements",
|
||||
"recipesPath": "Recipes Storage Path",
|
||||
"recipesPathHelp": "Optional custom directory for stored recipes. Leave empty to use the first LoRA root's recipes folder.",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "Migrating recipes storage...",
|
||||
"noDefault": "Aucun par défaut"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "Chemins de dossiers supplémentaires",
|
||||
"help": "Ajoutez des dossiers de modèles supplémentaires en dehors des chemins standard de ComfyUI. Ces chemins sont stockés séparément et analysés aux côtés des dossiers par défaut.",
|
||||
"description": "Configurez des dossiers supplémentaires pour l'analyse de modèles. Ces chemins sont spécifiques à LoRA Manager et seront fusionnés avec les chemins par défaut de ComfyUI.",
|
||||
"description": "Chemins racine de modèles supplémentaires exclusifs à LoRA Manager. Chargez des modèles depuis des emplacements en dehors des dossiers standard de ComfyUI, idéal pour les grandes bibliothèques qui ralentiraient autrement ComfyUI.",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "Chemins LoRA",
|
||||
"checkpoint": "Chemins Checkpoint",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Chemins Embedding"
|
||||
},
|
||||
"pathPlaceholder": "/chemin/vers/modèles/supplémentaires",
|
||||
"saveSuccess": "Chemins de dossiers supplémentaires mis à jour.",
|
||||
"saveSuccess": "Chemins de dossiers supplémentaires mis à jour. Redémarrage requis pour appliquer les changements.",
|
||||
"saveError": "Échec de la mise à jour des chemins de dossiers supplémentaires: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "Ce chemin est déjà configuré"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "Ignorer l'actualisation des métadonnées pour la sélection",
|
||||
"resumeMetadataRefresh": "Reprendre l'actualisation des métadonnées pour la sélection",
|
||||
"deleteAll": "Supprimer tous les modèles",
|
||||
"downloadMissingLoras": "Télécharger les LoRAs manquants",
|
||||
"clear": "Effacer la sélection",
|
||||
"skipMetadataRefreshCount": "Ignorer({count} modèles)",
|
||||
"resumeMetadataRefreshCount": "Reprendre({count} modèles)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "Racine",
|
||||
"browseFolders": "Parcourir les dossiers :",
|
||||
"downloadAndSaveRecipe": "Télécharger et sauvegarder la recipe",
|
||||
"importRecipeOnly": "Importer uniquement la recette",
|
||||
"importAndDownload": "Importer et télécharger",
|
||||
"downloadMissingLoras": "Télécharger les LoRAs manquants",
|
||||
"saveRecipe": "Sauvegarder la recipe",
|
||||
"loraCountInfo": "({existing}/{total} dans la bibliothèque)",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "Batch Import Recipes",
|
||||
"action": "Batch Import",
|
||||
"urlList": "URL List",
|
||||
"directory": "Directory",
|
||||
"urlDescription": "Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "Enter one URL or path per line",
|
||||
"directoryPath": "Directory Path",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "Browse",
|
||||
"recursive": "Include subdirectories",
|
||||
"tagsOptional": "Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "Enter tags separated by commas",
|
||||
"tagsHint": "Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "Skip images without metadata",
|
||||
"skipNoMetadataHelp": "Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "Start Import",
|
||||
"startImport": "Start Import",
|
||||
"importing": "Importing...",
|
||||
"progress": "Progress",
|
||||
"total": "Total",
|
||||
"success": "Success",
|
||||
"failed": "Failed",
|
||||
"skipped": "Skipped",
|
||||
"current": "Current",
|
||||
"currentItem": "Current",
|
||||
"preparing": "Preparing...",
|
||||
"cancel": "Cancel",
|
||||
"cancelImport": "Cancel",
|
||||
"cancelled": "Import cancelled",
|
||||
"completed": "Import completed",
|
||||
"completedWithErrors": "Completed with errors",
|
||||
"completedSuccess": "Successfully imported {count} recipe(s)",
|
||||
"successCount": "Successful",
|
||||
"failedCount": "Failed",
|
||||
"skippedCount": "Skipped",
|
||||
"totalProcessed": "Total processed",
|
||||
"viewDetails": "View Details",
|
||||
"newImport": "New Import",
|
||||
"manualPathEntry": "Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"batchImportManualEntryRequired": "File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "Back to parent directory",
|
||||
"folders": "Folders",
|
||||
"folderCount": "{count} folders",
|
||||
"imageFiles": "Image Files",
|
||||
"images": "images",
|
||||
"imageCount": "{count} images",
|
||||
"selectFolder": "Select This Folder",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "Please enter at least one URL or path",
|
||||
"enterDirectory": "Please enter a directory path",
|
||||
"startFailed": "Failed to start import: {message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "Déplacer vers le dossier {otherType}"
|
||||
"moveToOtherTypeFolder": "Déplacer vers le dossier {otherType}",
|
||||
"sendToWorkflow": "Envoyer vers le workflow"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "Désépingler la barre latérale",
|
||||
"switchToListView": "Passer en vue liste",
|
||||
"switchToTreeView": "Passer en vue arborescence",
|
||||
"recursiveOn": "Rechercher dans les sous-dossiers",
|
||||
"recursiveOff": "Rechercher uniquement dans le dossier actuel",
|
||||
"recursiveOn": "Inclure les sous-dossiers",
|
||||
"recursiveOff": "Dossier actuel uniquement",
|
||||
"recursiveUnavailable": "La recherche récursive n'est disponible qu'en vue arborescente",
|
||||
"collapseAllDisabled": "Non disponible en vue liste",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "Accès anticipé",
|
||||
"earlyAccessTooltip": "Accès anticipé requis",
|
||||
"inLibrary": "Dans la bibliothèque",
|
||||
"downloaded": "Téléchargé",
|
||||
"downloadedTooltip": "Déjà téléchargé, mais il n'est actuellement pas dans votre bibliothèque.",
|
||||
"alreadyInLibrary": "Déjà dans la bibliothèque",
|
||||
"autoOrganizedPath": "[Auto-organisé par modèle de chemin]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "Mettre à jour le modèle de base",
|
||||
"cancel": "Annuler"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "Télécharger les LoRAs manquants",
|
||||
"message": "{uniqueCount} LoRAs manquants uniques trouvés (sur un total de {totalCount} dans les recettes sélectionnées).",
|
||||
"previewTitle": "LoRAs à télécharger :",
|
||||
"moreItems": "...et {count} de plus",
|
||||
"note": "Les fichiers seront téléchargés en utilisant les modèles de chemins par défaut. Cela peut prendre un certain temps selon le nombre de LoRAs.",
|
||||
"downloadButton": "Télécharger {count} LoRA(s)"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Images d'exemple locales",
|
||||
"message": "Aucune image d'exemple locale trouvée pour ce modèle. Options d'affichage :",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "Voir sur Civitai",
|
||||
"viewOnCivitaiText": "Voir sur Civitai",
|
||||
"viewCreatorProfile": "Voir le profil du créateur",
|
||||
"openFileLocation": "Ouvrir l'emplacement du fichier"
|
||||
"openFileLocation": "Ouvrir l'emplacement du fichier",
|
||||
"sendToWorkflow": "Envoyer vers ComfyUI",
|
||||
"sendToWorkflowText": "Envoyer vers ComfyUI"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Emplacement du fichier ouvert avec succès",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "Chemin copié dans le presse-papiers: {{path}}",
|
||||
"clipboardFallback": "Chemin: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "Impossible d'envoyer vers ComfyUI : aucun chemin de fichier disponible"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Version",
|
||||
"fileName": "Nom de fichier",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "dans {count}j"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Version actuelle",
|
||||
"current": "Version ouverte",
|
||||
"currentTooltip": "C'est la version à partir de laquelle cette fenêtre a été ouverte",
|
||||
"inLibrary": "Dans la bibliothèque",
|
||||
"inLibraryTooltip": "Cette version existe dans votre bibliothèque locale",
|
||||
"downloaded": "Téléchargé",
|
||||
"downloadedTooltip": "Cette version a déjà été téléchargée, mais n'est pas actuellement dans votre bibliothèque",
|
||||
"newer": "Version plus récente",
|
||||
"newerTooltip": "Cette version est plus récente que votre dernière version locale",
|
||||
"earlyAccess": "Accès anticipé",
|
||||
"ignored": "Ignorée"
|
||||
"earlyAccessTooltip": "Cette version nécessite actuellement l'accès anticipé Civitai",
|
||||
"ignored": "Ignorée",
|
||||
"ignoredTooltip": "Les notifications de mise à jour sont désactivées pour cette version"
|
||||
},
|
||||
"actions": {
|
||||
"download": "Télécharger",
|
||||
"downloadTooltip": "Télécharger cette version",
|
||||
"downloadEarlyAccessTooltip": "Télécharger cette version en accès anticipé depuis Civitai",
|
||||
"delete": "Supprimer",
|
||||
"deleteTooltip": "Supprimer cette version locale",
|
||||
"ignore": "Ignorer",
|
||||
"unignore": "Ne plus ignorer",
|
||||
"ignoreTooltip": "Ignorer les notifications de mise à jour pour cette version",
|
||||
"unignoreTooltip": "Reprendre les notifications de mise à jour pour cette version",
|
||||
"viewVersionOnCivitai": "Voir la version sur Civitai",
|
||||
"earlyAccessTooltip": "Nécessite l'achat de l'accès anticipé",
|
||||
"resumeModelUpdates": "Reprendre les mises à jour pour ce modèle",
|
||||
"ignoreModelUpdates": "Ignorer les mises à jour pour ce modèle",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "Recipe remplacée dans le workflow",
|
||||
"recipeFailedToSend": "Échec de l'envoi de la recipe au workflow",
|
||||
"noMatchingNodes": "Aucun nœud compatible disponible dans le workflow actuel",
|
||||
"noTargetNodeSelected": "Aucun nœud cible sélectionné"
|
||||
"noTargetNodeSelected": "Aucun nœud cible sélectionné",
|
||||
"modelUpdated": "Modèle mis à jour dans le workflow",
|
||||
"modelFailed": "Échec de la mise à jour du nœud modèle"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "Recipe",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "Veuillez sélectionner une version",
|
||||
"versionExists": "Cette version existe déjà dans votre bibliothèque",
|
||||
"downloadCompleted": "Téléchargement terminé avec succès",
|
||||
"downloadSkippedByBaseModel": "Téléchargement ignoré, car le modèle de base {baseModel} est exclu",
|
||||
"autoOrganizeSuccess": "Auto-organisation terminée avec succès pour {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Auto-organisation terminée avec {success} déplacés, {failures} échecs sur {total} modèles",
|
||||
"autoOrganizeFailed": "Échec de l'auto-organisation : {error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "Nom de la recipe mis à jour avec succès",
|
||||
"tagsUpdated": "Tags de la recipe mis à jour avec succès",
|
||||
"sourceUrlUpdated": "URL source mise à jour avec succès",
|
||||
"promptUpdated": "Prompt mis à jour avec succès",
|
||||
"negativePromptUpdated": "Prompt négatif mis à jour avec succès",
|
||||
"promptEditorHint": "Appuyez sur Entrée pour sauvegarder, Maj+Entrée pour nouvelle ligne",
|
||||
"noRecipeId": "Aucun ID de recipe disponible",
|
||||
"sendToWorkflowFailed": "Échec de l'envoi de la recette vers le workflow : {message}",
|
||||
"copyFailed": "Erreur lors de la copie de la syntaxe de la recipe : {message}",
|
||||
"noMissingLoras": "Aucun LoRA manquant à télécharger",
|
||||
"missingLorasInfoFailed": "Échec de l'obtention des informations pour les LoRAs manquants",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "Erreur de traitement : {message}",
|
||||
"folderBrowserError": "Erreur lors du chargement du navigateur de dossiers : {message}",
|
||||
"recipeSaveFailed": "Échec de la sauvegarde de la recipe : {error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "Échec de l'importation : {message}",
|
||||
"folderTreeFailed": "Échec du chargement de l'arborescence des dossiers",
|
||||
"folderTreeError": "Erreur lors du chargement de l'arborescence des dossiers",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "Cancelling batch import...",
|
||||
"batchImportCancelFailed": "Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "Aucune recette sélectionnée",
|
||||
"noMissingLorasInSelection": "Aucun LoRA manquant trouvé dans les recettes sélectionnées",
|
||||
"noLoraRootConfigured": "Aucun répertoire racine LoRA configuré. Veuillez définir un répertoire racine LoRA par défaut dans les paramètres."
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "Aucun modèle sélectionné",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "Échec de la sauvegarde des mappages de modèle de base : {message}",
|
||||
"downloadTemplatesUpdated": "Modèles de chemin de téléchargement mis à jour",
|
||||
"downloadTemplatesFailed": "Échec de la sauvegarde des modèles de chemin de téléchargement : {message}",
|
||||
"recipesPathUpdated": "Recipes storage path updated",
|
||||
"recipesPathSaveFailed": "Failed to update recipes storage path: {message}",
|
||||
"settingsUpdated": "Paramètres mis à jour : {setting}",
|
||||
"compactModeToggled": "Mode compact {state}",
|
||||
"settingSaveFailed": "Échec de la sauvegarde du paramètre : {message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "Diagnostics système",
|
||||
"title": "Docteur",
|
||||
"buttonTitle": "Lancer les diagnostics et les corrections courantes",
|
||||
"loading": "Vérification de l'environnement...",
|
||||
"footer": "Exportez un lot de diagnostic si le problème persiste après la réparation.",
|
||||
"summary": {
|
||||
"idle": "Lancez une vérification de l'état des paramètres, de l'intégrité du cache et de la cohérence de l'interface.",
|
||||
"ok": "Aucun problème actif n'a été trouvé dans l'environnement actuel.",
|
||||
"warning": "{count} problème(s) ont été trouvés. La plupart peuvent être corrigés directement depuis ce panneau.",
|
||||
"error": "{count} problème(s) nécessitent une attention avant que l'application soit entièrement saine."
|
||||
},
|
||||
"status": {
|
||||
"ok": "Sain",
|
||||
"warning": "Nécessite une attention",
|
||||
"error": "Action requise"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "Relancer",
|
||||
"exportBundle": "Exporter le lot"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "Échec du chargement des diagnostics : {message}",
|
||||
"repairSuccess": "Reconstruction du cache terminée.",
|
||||
"repairFailed": "Échec de la reconstruction du cache : {message}",
|
||||
"exportSuccess": "Lot de diagnostics exporté.",
|
||||
"exportFailed": "Échec de l'export du lot de diagnostics : {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "Mise à jour de l'application détectée",
|
||||
|
||||
280
locales/he.json
280
locales/he.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "הגדרות וידאו",
|
||||
"layoutSettings": "הגדרות פריסה",
|
||||
"misc": "שונות",
|
||||
"backup": "גיבויים",
|
||||
"folderSettings": "תיקיות ברירת מחדל",
|
||||
"recipeSettings": "מתכונים",
|
||||
"extraFolderPaths": "נתיבי תיקיות נוספים",
|
||||
"downloadPathTemplates": "תבניות נתיב הורדה",
|
||||
"priorityTags": "תגיות עדיפות",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "טשטש תוכן NSFW",
|
||||
"blurNsfwContentHelp": "טשטש תמונות תצוגה מקדימה של תוכן למבוגרים (NSFW)",
|
||||
"showOnlySfw": "הצג רק תוצאות SFW",
|
||||
"showOnlySfwHelp": "סנן את כל התוכן ה-NSFW בעת גלישה וחיפוש"
|
||||
"showOnlySfwHelp": "סנן את כל התוכן ה-NSFW בעת גלישה וחיפוש",
|
||||
"matureBlurThreshold": "סף טשטוש תוכן מבוגרים",
|
||||
"matureBlurThresholdHelp": "הגדר מאיזו רמת דירוג מתחיל סינון הטשטוש כאשר טשטוש NSFW מופעל.",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 ומעלה",
|
||||
"r": "R ומעלה (ברירת מחדל)",
|
||||
"x": "X ומעלה",
|
||||
"xxx": "XXX בלבד"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "נגן וידאו אוטומטית בריחוף",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "לא ניתן לשמור נתיבי דילוג: {message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "גיבויים אוטומטיים",
|
||||
"autoEnabledHelp": "יוצר צילום מצב מקומי פעם ביום ושומר את הצילומים האחרונים לפי מדיניות השמירה.",
|
||||
"retention": "כמות שמירה",
|
||||
"retentionHelp": "כמה צילומי מצב אוטומטיים לשמור לפני שמסירים ישנים.",
|
||||
"management": "ניהול גיבויים",
|
||||
"managementHelp": "ייצא את מצב המשתמש הנוכחי או שחזר אותו מארכיון גיבוי.",
|
||||
"scopeHelp": "כולל את ההגדרות שלך, היסטוריית ההורדות ומצב עדכוני המודלים. אינו כולל קובצי מודל או מטמונים שניתן לשחזר.",
|
||||
"locationSummary": "מיקום הגיבוי הנוכחי",
|
||||
"openFolderButton": "פתח את תיקיית הגיבויים",
|
||||
"openFolderSuccess": "תיקיית הגיבויים נפתחה",
|
||||
"openFolderFailed": "לא ניתן היה לפתוח את תיקיית הגיבויים",
|
||||
"locationCopied": "נתיב הגיבוי הועתק ללוח: {{path}}",
|
||||
"locationClipboardFallback": "נתיב הגיבוי: {{path}}",
|
||||
"exportButton": "ייצא גיבוי",
|
||||
"exportSuccess": "הגיבוי יוצא בהצלחה.",
|
||||
"exportFailed": "נכשל ייצוא הגיבוי: {message}",
|
||||
"importButton": "ייבא גיבוי",
|
||||
"importConfirm": "לייבא את הגיבוי הזה ולדרוס את מצב המשתמש המקומי?",
|
||||
"importSuccess": "הגיבוי יובא בהצלחה.",
|
||||
"importFailed": "נכשל ייבוא הגיבוי: {message}",
|
||||
"latestSnapshot": "צילום המצב האחרון",
|
||||
"latestAutoSnapshot": "צילום המצב האוטומטי האחרון",
|
||||
"snapshotCount": "צילומי מצב שמורים",
|
||||
"noneAvailable": "עדיין אין צילומי מצב"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "דלג על הורדות עבור מודלי בסיס",
|
||||
"help": "חל על כל תהליכי ההורדה. ניתן לבחור כאן רק מודלי בסיס נתמכים.",
|
||||
"searchPlaceholder": "סנן מודלי בסיס...",
|
||||
"empty": "אין מודלי בסיס התואמים לחיפוש הנוכחי.",
|
||||
"summary": {
|
||||
"none": "לא נבחר דבר",
|
||||
"count": "{count} נבחרו"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "עריכה",
|
||||
"collapse": "כווץ",
|
||||
"clear": "נקה"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "לא ניתן לשמור את מודלי הבסיס המוחרגים: {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "דלג על גרסאות מודלים שהורדו בעבר",
|
||||
"help": "כאשר מופעל, LoRA Manager ידלג על הורדת גרסת מודל אם שירות היסטוריית ההורדות רושם את הגרסה המדויקת הזו ככבר שהורדה. חל על כל תהליכי ההורדה."
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "צפיפות תצוגה",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "הגדר את ספריית השורש המוגדרת כברירת מחדל של Diffusion Model (UNET) להורדות, ייבוא והעברות",
|
||||
"defaultEmbeddingRoot": "תיקיית שורש Embedding",
|
||||
"defaultEmbeddingRootHelp": "הגדר את ספריית השורש המוגדרת כברירת מחדל של embedding להורדות, ייבוא והעברות",
|
||||
"recipesPath": "נתיב אחסון מתכונים",
|
||||
"recipesPathHelp": "ספרייה מותאמת אישית אופציונלית למתכונים שנשמרו. השאר ריק כדי להשתמש בתיקיית recipes של שורש LoRA הראשון.",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "מעביר את אחסון המתכונים...",
|
||||
"noDefault": "אין ברירת מחדל"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "נתיבי תיקיות נוספים",
|
||||
"help": "הוסף תיקיות מודלים נוספות מחוץ לנתיבים הסטנדרטיים של ComfyUI. נתיבים אלה נשמרים בנפרד ונסרקים לצד תיקיות ברירת המחדל.",
|
||||
"description": "הגדר תיקיות נוספות לסריקת מודלים. נתיבים אלה ספציפיים ל-LoRA Manager וימוזגו עם נתיבי ברירת המחדל של ComfyUI.",
|
||||
"description": "נתיבי שורש מודלים נוספים בלעדיים ל-LoRA Manager. טען מודלים ממיקומים מחוץ לתיקיות הסטנדרטיות של ComfyUI - אידיאלי לספריות גדולות שאחרת יאטו את ComfyUI.",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "נתיבי LoRA",
|
||||
"checkpoint": "נתיבי Checkpoint",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "נתיבי Embedding"
|
||||
},
|
||||
"pathPlaceholder": "/נתיב/למודלים/נוספים",
|
||||
"saveSuccess": "נתיבי תיקיות נוספים עודכנו.",
|
||||
"saveSuccess": "נתיבי תיקיות נוספים עודכנו. נדרשת הפעלה מחדש כדי להחיל את השינויים.",
|
||||
"saveError": "נכשל בעדכון נתיבי תיקיות נוספים: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "נתיב זה כבר מוגדר"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "דילוג על רענון מטא-נתונים לנבחרים",
|
||||
"resumeMetadataRefresh": "המשך רענון מטא-נתונים לנבחרים",
|
||||
"deleteAll": "מחק את כל המודלים",
|
||||
"downloadMissingLoras": "הורדת LoRAs חסרים",
|
||||
"clear": "נקה בחירה",
|
||||
"skipMetadataRefreshCount": "דילוג({count} מודלים)",
|
||||
"resumeMetadataRefreshCount": "המשך({count} מודלים)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "שורש",
|
||||
"browseFolders": "דפדף בתיקיות:",
|
||||
"downloadAndSaveRecipe": "הורד ושמור מתכון",
|
||||
"importRecipeOnly": "יבא רק מתכון",
|
||||
"importAndDownload": "יבא והורד",
|
||||
"downloadMissingLoras": "הורד LoRAs חסרים",
|
||||
"saveRecipe": "שמור מתכון",
|
||||
"loraCountInfo": "({existing}/{total} בספרייה)",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "Batch Import Recipes",
|
||||
"action": "Batch Import",
|
||||
"urlList": "URL List",
|
||||
"directory": "Directory",
|
||||
"urlDescription": "Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "Enter one URL or path per line",
|
||||
"directoryPath": "Directory Path",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "Browse",
|
||||
"recursive": "Include subdirectories",
|
||||
"tagsOptional": "Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "Enter tags separated by commas",
|
||||
"tagsHint": "Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "Skip images without metadata",
|
||||
"skipNoMetadataHelp": "Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "Start Import",
|
||||
"startImport": "Start Import",
|
||||
"importing": "Importing...",
|
||||
"progress": "Progress",
|
||||
"total": "Total",
|
||||
"success": "Success",
|
||||
"failed": "Failed",
|
||||
"skipped": "Skipped",
|
||||
"current": "Current",
|
||||
"currentItem": "Current",
|
||||
"preparing": "Preparing...",
|
||||
"cancel": "Cancel",
|
||||
"cancelImport": "Cancel",
|
||||
"cancelled": "Import cancelled",
|
||||
"completed": "Import completed",
|
||||
"completedWithErrors": "Completed with errors",
|
||||
"completedSuccess": "Successfully imported {count} recipe(s)",
|
||||
"successCount": "Successful",
|
||||
"failedCount": "Failed",
|
||||
"skippedCount": "Skipped",
|
||||
"totalProcessed": "Total processed",
|
||||
"viewDetails": "View Details",
|
||||
"newImport": "New Import",
|
||||
"manualPathEntry": "Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"batchImportManualEntryRequired": "File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "Back to parent directory",
|
||||
"folders": "Folders",
|
||||
"folderCount": "{count} folders",
|
||||
"imageFiles": "Image Files",
|
||||
"images": "images",
|
||||
"imageCount": "{count} images",
|
||||
"selectFolder": "Select This Folder",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "Please enter at least one URL or path",
|
||||
"enterDirectory": "Please enter a directory path",
|
||||
"startFailed": "Failed to start import: {message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "העבר לתיקיית {otherType}"
|
||||
"moveToOtherTypeFolder": "העבר לתיקיית {otherType}",
|
||||
"sendToWorkflow": "שלח ל-workflow"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "שחרר סרגל צד",
|
||||
"switchToListView": "עבור לתצוגת רשימה",
|
||||
"switchToTreeView": "תצוגת עץ",
|
||||
"recursiveOn": "חיפוש בתיקיות משנה",
|
||||
"recursiveOff": "חיפוש רק בתיקייה הנוכחית",
|
||||
"recursiveOn": "כלול תיקיות משנה",
|
||||
"recursiveOff": "רק התיקייה הנוכחית",
|
||||
"recursiveUnavailable": "חיפוש רקורסיבי זמין רק בתצוגת עץ",
|
||||
"collapseAllDisabled": "לא זמין בתצוגת רשימה",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "גישה מוקדמת",
|
||||
"earlyAccessTooltip": "נדרשת גישה מוקדמת",
|
||||
"inLibrary": "בספרייה",
|
||||
"downloaded": "הורד",
|
||||
"downloadedTooltip": "הורד בעבר, אך הוא אינו נמצא כרגע בספרייה שלך.",
|
||||
"alreadyInLibrary": "כבר בספרייה",
|
||||
"autoOrganizedPath": "[מאורגן אוטומטית לפי תבנית נתיב]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "עדכן מודל בסיס",
|
||||
"cancel": "ביטול"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "הורדת LoRAs חסרים",
|
||||
"message": "נמצאו {uniqueCount} LoRAs חסרים ייחודיים (מתוך {totalCount} בסך הכל במתכונים שנבחרו).",
|
||||
"previewTitle": "LoRAs להורדה:",
|
||||
"moreItems": "...ועוד {count}",
|
||||
"note": "הקבצים יורדו באמצעות תבניות נתיב ברירת מחדל. זה עשוי לקחת זמן בהתאם למספר ה-LoRAs.",
|
||||
"downloadButton": "הורד {count} LoRA(s)"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "תמונות דוגמה מקומיות",
|
||||
"message": "לא נמצאו תמונות דוגמה מקומיות למודל זה. אפשרויות צפייה:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "הצג ב-Civitai",
|
||||
"viewOnCivitaiText": "הצג ב-Civitai",
|
||||
"viewCreatorProfile": "הצג פרופיל יוצר",
|
||||
"openFileLocation": "פתח מיקום קובץ"
|
||||
"openFileLocation": "פתח מיקום קובץ",
|
||||
"sendToWorkflow": "שלח ל-ComfyUI",
|
||||
"sendToWorkflowText": "שלח ל-ComfyUI"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "מיקום הקובץ נפתח בהצלחה",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "הנתיב הועתק ללוח העריכה: {{path}}",
|
||||
"clipboardFallback": "נתיב: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "לא ניתן לשלוח ל-ComfyUI: אין נתיב קובץ זמין"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "גרסה",
|
||||
"fileName": "שם קובץ",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "בעוד {count} ימים"
|
||||
},
|
||||
"badges": {
|
||||
"current": "גרסה נוכחית",
|
||||
"current": "גרסה שנפתחה",
|
||||
"currentTooltip": "זוהי הגרסה שממנה נפתח החלון הזה",
|
||||
"inLibrary": "בספרייה",
|
||||
"inLibraryTooltip": "גרסה זו קיימת בספרייה המקומית שלך",
|
||||
"downloaded": "הורד",
|
||||
"downloadedTooltip": "גרסה זו הורדה בעבר, אך אינה נמצאת כרגע בספרייה שלך",
|
||||
"newer": "גרסה חדשה יותר",
|
||||
"newerTooltip": "גרסה זו חדשה יותר מהגרסה המקומית האחרונה שלך",
|
||||
"earlyAccess": "גישה מוקדמת",
|
||||
"ignored": "התעלם"
|
||||
"earlyAccessTooltip": "גרסה זו דורשת כרגע גישת Early Access של Civitai",
|
||||
"ignored": "התעלם",
|
||||
"ignoredTooltip": "התראות העדכון מושבתות עבור גרסה זו"
|
||||
},
|
||||
"actions": {
|
||||
"download": "הורדה",
|
||||
"downloadTooltip": "הורד את הגרסה הזו",
|
||||
"downloadEarlyAccessTooltip": "הורד את גרסת ה-Early Access הזו מ-Civitai",
|
||||
"delete": "מחיקה",
|
||||
"deleteTooltip": "מחק את הגרסה המקומית הזו",
|
||||
"ignore": "התעלם",
|
||||
"unignore": "בטל התעלמות",
|
||||
"ignoreTooltip": "התעלם מהתראות העדכון עבור גרסה זו",
|
||||
"unignoreTooltip": "חזור לקבל התראות עדכון עבור גרסה זו",
|
||||
"viewVersionOnCivitai": "הצג את הגרסה ב-Civitai",
|
||||
"earlyAccessTooltip": "נדרש רכישת גישה מוקדמת",
|
||||
"resumeModelUpdates": "המשך עדכונים עבור מודל זה",
|
||||
"ignoreModelUpdates": "התעלם מעדכונים עבור מודל זה",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "מתכון הוחלף ב-workflow",
|
||||
"recipeFailedToSend": "שליחת מתכון ל-workflow נכשלה",
|
||||
"noMatchingNodes": "אין צמתים תואמים זמינים ב-workflow הנוכחי",
|
||||
"noTargetNodeSelected": "לא נבחר צומת יעד"
|
||||
"noTargetNodeSelected": "לא נבחר צומת יעד",
|
||||
"modelUpdated": "מודל עודכן ב-workflow",
|
||||
"modelFailed": "עדכון צומת המודל נכשל"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "מתכון",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "אנא בחר גרסה",
|
||||
"versionExists": "גרסה זו כבר קיימת בספרייה שלך",
|
||||
"downloadCompleted": "ההורדה הושלמה בהצלחה",
|
||||
"downloadSkippedByBaseModel": "ההורדה דולגה כי מודל הבסיס {baseModel} מוחרג",
|
||||
"autoOrganizeSuccess": "הארגון האוטומטי הושלם בהצלחה עבור {count} {type}",
|
||||
"autoOrganizePartialSuccess": "הארגון האוטומטי הושלם עם {success} שהועברו, {failures} שנכשלו מתוך {total} מודלים",
|
||||
"autoOrganizeFailed": "הארגון האוטומטי נכשל: {error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "שם המתכון עודכן בהצלחה",
|
||||
"tagsUpdated": "תגיות המתכון עודכנו בהצלחה",
|
||||
"sourceUrlUpdated": "כתובת ה-URL המקורית עודכנה בהצלחה",
|
||||
"promptUpdated": "הפרומפט עודכן בהצלחה",
|
||||
"negativePromptUpdated": "הפרומפט השלילי עודכן בהצלחה",
|
||||
"promptEditorHint": "לחץ Enter לשמירה, Shift+Enter לשורה חדשה",
|
||||
"noRecipeId": "אין מזהה מתכון זמין",
|
||||
"sendToWorkflowFailed": "נכשל שליחת המתכון ל-workflow: {message}",
|
||||
"copyFailed": "שגיאה בהעתקת תחביר המתכון: {message}",
|
||||
"noMissingLoras": "אין LoRAs חסרים להורדה",
|
||||
"missingLorasInfoFailed": "קבלת מידע עבור LoRAs חסרים נכשלה",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "שגיאת עיבוד: {message}",
|
||||
"folderBrowserError": "שגיאה בטעינת דפדפן התיקיות: {message}",
|
||||
"recipeSaveFailed": "שמירת המתכון נכשלה: {error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "הייבוא נכשל: {message}",
|
||||
"folderTreeFailed": "טעינת עץ התיקיות נכשלה",
|
||||
"folderTreeError": "שגיאה בטעינת עץ התיקיות",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "Cancelling batch import...",
|
||||
"batchImportCancelFailed": "Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "לא נבחרו מתכונים",
|
||||
"noMissingLorasInSelection": "לא נמצאו LoRAs חסרים במתכונים שנבחרו",
|
||||
"noLoraRootConfigured": "תיקיית השורש של LoRA לא מוגדרת. אנא הגדר תיקיית שורש LoRA ברירת מחדל בהגדרות."
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "לא נבחרו מודלים",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "שמירת מיפויי מודל בסיס נכשלה: {message}",
|
||||
"downloadTemplatesUpdated": "תבניות נתיב הורדה עודכנו",
|
||||
"downloadTemplatesFailed": "שמירת תבניות נתיב הורדה נכשלה: {message}",
|
||||
"recipesPathUpdated": "נתיב אחסון המתכונים עודכן",
|
||||
"recipesPathSaveFailed": "עדכון נתיב אחסון המתכונים נכשל: {message}",
|
||||
"settingsUpdated": "הגדרות עודכנו: {setting}",
|
||||
"compactModeToggled": "מצב קומפקטי {state}",
|
||||
"settingSaveFailed": "שמירת ההגדרה נכשלה: {message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "אבחון מערכת",
|
||||
"title": "דוקטור",
|
||||
"buttonTitle": "הפעלת אבחון ותיקונים נפוצים",
|
||||
"loading": "בודק את הסביבה...",
|
||||
"footer": "ייצא חבילת אבחון אם הבעיה עדיין נמשכת לאחר התיקון.",
|
||||
"summary": {
|
||||
"idle": "הרץ בדיקת תקינות עבור הגדרות, שלמות המטמון ועקביות הממשק.",
|
||||
"ok": "לא נמצאו בעיות פעילות בסביבה הנוכחית.",
|
||||
"warning": "נמצאה/נמצאו {count} בעיה/בעיות. את רובן אפשר לתקן ישירות מלוח זה.",
|
||||
"error": "יש לטפל ב-{count} בעיה/בעיות לפני שהאפליקציה תהיה תקינה לחלוטין."
|
||||
},
|
||||
"status": {
|
||||
"ok": "תקין",
|
||||
"warning": "דורש תשומת לב",
|
||||
"error": "נדרשת פעולה"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "הפעל שוב",
|
||||
"exportBundle": "ייצוא חבילה"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "טעינת האבחון נכשלה: {message}",
|
||||
"repairSuccess": "בניית המטמון מחדש הושלמה.",
|
||||
"repairFailed": "בניית המטמון מחדש נכשלה: {message}",
|
||||
"exportSuccess": "חבילת האבחון יוצאה.",
|
||||
"exportFailed": "ייצוא חבילת האבחון נכשל: {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "זוהה עדכון יישום",
|
||||
|
||||
280
locales/ja.json
280
locales/ja.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "動画設定",
|
||||
"layoutSettings": "レイアウト設定",
|
||||
"misc": "その他",
|
||||
"backup": "バックアップ",
|
||||
"folderSettings": "デフォルトルート",
|
||||
"recipeSettings": "レシピ",
|
||||
"extraFolderPaths": "追加フォルダーパス",
|
||||
"downloadPathTemplates": "ダウンロードパステンプレート",
|
||||
"priorityTags": "優先タグ",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "NSFWコンテンツをぼかす",
|
||||
"blurNsfwContentHelp": "成人向け(NSFW)コンテンツのプレビュー画像をぼかします",
|
||||
"showOnlySfw": "SFWコンテンツのみ表示",
|
||||
"showOnlySfwHelp": "閲覧と検索時にすべてのNSFWコンテンツを除外します"
|
||||
"showOnlySfwHelp": "閲覧と検索時にすべてのNSFWコンテンツを除外します",
|
||||
"matureBlurThreshold": "成人コンテンツぼかし閾値",
|
||||
"matureBlurThresholdHelp": "NSFWぼかしが有効な場合、どのレーティングレベルからぼかしフィルタリングを開始するかを設定します。",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 以上",
|
||||
"r": "R 以上(デフォルト)",
|
||||
"x": "X 以上",
|
||||
"xxx": "XXX のみ"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "ホバー時に動画を自動再生",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "スキップパスの保存に失敗しました:{message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "自動バックアップ",
|
||||
"autoEnabledHelp": "1日1回ローカルのスナップショットを作成し、保持ポリシーに従って最新のものを残します。",
|
||||
"retention": "保持数",
|
||||
"retentionHelp": "古いものを削除する前に、何件の自動スナップショットを保持するかを指定します。",
|
||||
"management": "バックアップ管理",
|
||||
"managementHelp": "現在のユーザー状態をエクスポートするか、バックアップアーカイブから復元します。",
|
||||
"scopeHelp": "設定、ダウンロード履歴、モデル更新の状態をバックアップします。モデルファイルや再生成できるキャッシュは含まれません。",
|
||||
"locationSummary": "現在のバックアップ場所",
|
||||
"openFolderButton": "バックアップフォルダを開く",
|
||||
"openFolderSuccess": "バックアップフォルダを開きました",
|
||||
"openFolderFailed": "バックアップフォルダを開けませんでした",
|
||||
"locationCopied": "バックアップパスをクリップボードにコピーしました: {{path}}",
|
||||
"locationClipboardFallback": "バックアップパス: {{path}}",
|
||||
"exportButton": "バックアップをエクスポート",
|
||||
"exportSuccess": "バックアップを正常にエクスポートしました。",
|
||||
"exportFailed": "バックアップのエクスポートに失敗しました: {message}",
|
||||
"importButton": "バックアップをインポート",
|
||||
"importConfirm": "このバックアップをインポートして、ローカルのユーザー状態を上書きしますか?",
|
||||
"importSuccess": "バックアップを正常にインポートしました。",
|
||||
"importFailed": "バックアップのインポートに失敗しました: {message}",
|
||||
"latestSnapshot": "最新のスナップショット",
|
||||
"latestAutoSnapshot": "最新の自動スナップショット",
|
||||
"snapshotCount": "保存済みスナップショット",
|
||||
"noneAvailable": "まだスナップショットはありません"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "ベースモデルのダウンロードをスキップ",
|
||||
"help": "すべてのダウンロードフローに適用されます。ここでは対応しているベースモデルのみ選択できます。",
|
||||
"searchPlaceholder": "ベースモデルを絞り込む...",
|
||||
"empty": "現在の検索に一致するベースモデルはありません。",
|
||||
"summary": {
|
||||
"none": "未選択",
|
||||
"count": "{count} 件を選択"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "編集",
|
||||
"collapse": "折りたたむ",
|
||||
"clear": "クリア"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "除外するベースモデルを保存できませんでした: {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "以前にダウンロードしたモデルバージョンをスキップ",
|
||||
"help": "有効にすると、ダウンロード履歴サービスがそのバージョンが既にダウンロード済みと記録している場合、LoRA Managerはそのモデルバージョンのダウンロードをスキップします。すべてのダウンロードフローに適用されます。"
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "表示密度",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "ダウンロード、インポート、移動用のデフォルトDiffusion Model (UNET)ルートディレクトリを設定",
|
||||
"defaultEmbeddingRoot": "Embeddingルート",
|
||||
"defaultEmbeddingRootHelp": "ダウンロード、インポート、移動用のデフォルトembeddingルートディレクトリを設定",
|
||||
"recipesPath": "レシピ保存先",
|
||||
"recipesPathHelp": "保存済みレシピ用の任意のカスタムディレクトリです。空欄にすると最初のLoRAルートのrecipesフォルダーを使用します。",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "レシピ保存先を移動中...",
|
||||
"noDefault": "デフォルトなし"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "追加フォルダーパス",
|
||||
"help": "ComfyUIの標準パスの外部に追加のモデルフォルダを追加します。これらのパスは別々に保存され、デフォルトのフォルダと一緒にスキャンされます。",
|
||||
"description": "モデルをスキャンするための追加フォルダを設定します。これらのパスはLoRA Manager固有であり、ComfyUIのデフォルトパスとマージされます。",
|
||||
"description": "LoRA Manager専用の追加モデルルートパス。ComfyUIの標準フォルダー外の場所からモデルを読み込みます。ComfyUIの動作を低下させる可能性のある大規模ライブラリに最適です。",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "LoRAパス",
|
||||
"checkpoint": "Checkpointパス",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Embeddingパス"
|
||||
},
|
||||
"pathPlaceholder": "/追加モデルへのパス",
|
||||
"saveSuccess": "追加フォルダーパスを更新しました。",
|
||||
"saveSuccess": "追加フォルダーパスを更新しました。変更を適用するには再起動が必要です。",
|
||||
"saveError": "追加フォルダーパスの更新に失敗しました: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "このパスはすでに設定されています"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "選択したモデルのメタデータ更新をスキップ",
|
||||
"resumeMetadataRefresh": "選択したモデルのメタデータ更新を再開",
|
||||
"deleteAll": "すべてのモデルを削除",
|
||||
"downloadMissingLoras": "不足している LoRA をダウンロード",
|
||||
"clear": "選択をクリア",
|
||||
"skipMetadataRefreshCount": "スキップ({count}モデル)",
|
||||
"resumeMetadataRefreshCount": "再開({count}モデル)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "ルート",
|
||||
"browseFolders": "フォルダを参照:",
|
||||
"downloadAndSaveRecipe": "ダウンロード & レシピ保存",
|
||||
"importRecipeOnly": "レシピのみインポート",
|
||||
"importAndDownload": "インポートとダウンロード",
|
||||
"downloadMissingLoras": "不足しているLoRAをダウンロード",
|
||||
"saveRecipe": "レシピを保存",
|
||||
"loraCountInfo": "({existing}/{total} ライブラリ内)",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "Batch Import Recipes",
|
||||
"action": "Batch Import",
|
||||
"urlList": "URL List",
|
||||
"directory": "Directory",
|
||||
"urlDescription": "Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "Enter one URL or path per line",
|
||||
"directoryPath": "Directory Path",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "Browse",
|
||||
"recursive": "Include subdirectories",
|
||||
"tagsOptional": "Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "Enter tags separated by commas",
|
||||
"tagsHint": "Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "Skip images without metadata",
|
||||
"skipNoMetadataHelp": "Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "Start Import",
|
||||
"startImport": "Start Import",
|
||||
"importing": "Importing...",
|
||||
"progress": "Progress",
|
||||
"total": "Total",
|
||||
"success": "Success",
|
||||
"failed": "Failed",
|
||||
"skipped": "Skipped",
|
||||
"current": "Current",
|
||||
"currentItem": "Current",
|
||||
"preparing": "Preparing...",
|
||||
"cancel": "Cancel",
|
||||
"cancelImport": "Cancel",
|
||||
"cancelled": "Import cancelled",
|
||||
"completed": "Import completed",
|
||||
"completedWithErrors": "Completed with errors",
|
||||
"completedSuccess": "Successfully imported {count} recipe(s)",
|
||||
"successCount": "Successful",
|
||||
"failedCount": "Failed",
|
||||
"skippedCount": "Skipped",
|
||||
"totalProcessed": "Total processed",
|
||||
"viewDetails": "View Details",
|
||||
"newImport": "New Import",
|
||||
"manualPathEntry": "Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"batchImportManualEntryRequired": "File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "Back to parent directory",
|
||||
"folders": "Folders",
|
||||
"folderCount": "{count} folders",
|
||||
"imageFiles": "Image Files",
|
||||
"images": "images",
|
||||
"imageCount": "{count} images",
|
||||
"selectFolder": "Select This Folder",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "Please enter at least one URL or path",
|
||||
"enterDirectory": "Please enter a directory path",
|
||||
"startFailed": "Failed to start import: {message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "{otherType} フォルダに移動"
|
||||
"moveToOtherTypeFolder": "{otherType} フォルダに移動",
|
||||
"sendToWorkflow": "ワークフローに送信"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "サイドバーの固定を解除",
|
||||
"switchToListView": "リストビューに切り替え",
|
||||
"switchToTreeView": "ツリー表示に切り替え",
|
||||
"recursiveOn": "サブフォルダーを検索",
|
||||
"recursiveOff": "現在のフォルダーのみを検索",
|
||||
"recursiveOn": "サブフォルダーを含める",
|
||||
"recursiveOff": "現在のフォルダーのみ",
|
||||
"recursiveUnavailable": "再帰検索はツリービューでのみ利用できます",
|
||||
"collapseAllDisabled": "リストビューでは利用できません",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "アーリーアクセス",
|
||||
"earlyAccessTooltip": "アーリーアクセスが必要",
|
||||
"inLibrary": "ライブラリ内",
|
||||
"downloaded": "ダウンロード済み",
|
||||
"downloadedTooltip": "以前にダウンロード済みですが、現在はライブラリにありません。",
|
||||
"alreadyInLibrary": "既にライブラリ内",
|
||||
"autoOrganizedPath": "[パステンプレートによる自動整理]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "ベースモデルを更新",
|
||||
"cancel": "キャンセル"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "不足している LoRA をダウンロード",
|
||||
"message": "選択したレシピから合計 {totalCount} 個中 {uniqueCount} 個のユニークな不足している LoRA が見つかりました。",
|
||||
"previewTitle": "ダウンロードする LoRA:",
|
||||
"moreItems": "...あと {count} 個",
|
||||
"note": "ファイルはデフォルトのパステンプレートを使用してダウンロードされます。LoRA の数によっては時間がかかる場合があります。",
|
||||
"downloadButton": "{count} 個の LoRA をダウンロード"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "ローカル例画像",
|
||||
"message": "このモデルのローカル例画像が見つかりませんでした。表示オプション:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "Civitaiで表示",
|
||||
"viewOnCivitaiText": "Civitaiで表示",
|
||||
"viewCreatorProfile": "作成者プロフィールを表示",
|
||||
"openFileLocation": "ファイルの場所を開く"
|
||||
"openFileLocation": "ファイルの場所を開く",
|
||||
"sendToWorkflow": "ComfyUI に送信",
|
||||
"sendToWorkflowText": "ComfyUI に送信"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "ファイルの場所を正常に開きました",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "パスをクリップボードにコピーしました: {{path}}",
|
||||
"clipboardFallback": "パス: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "ComfyUI に送信できません:ファイルパスがありません"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "バージョン",
|
||||
"fileName": "ファイル名",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "{count}日後"
|
||||
},
|
||||
"badges": {
|
||||
"current": "現在のバージョン",
|
||||
"current": "開いたバージョン",
|
||||
"currentTooltip": "このモーダルを開くために選択したバージョンです",
|
||||
"inLibrary": "ライブラリにあります",
|
||||
"inLibraryTooltip": "このバージョンはローカルライブラリに存在します",
|
||||
"downloaded": "ダウンロード済み",
|
||||
"downloadedTooltip": "このバージョンは以前ダウンロードされましたが、現在はライブラリにありません",
|
||||
"newer": "新しいバージョン",
|
||||
"newerTooltip": "このバージョンはローカルの最新バージョンより新しいです",
|
||||
"earlyAccess": "早期アクセス",
|
||||
"ignored": "無視中"
|
||||
"earlyAccessTooltip": "このバージョンは現在 Civitai の早期アクセスが必要です",
|
||||
"ignored": "無視中",
|
||||
"ignoredTooltip": "このバージョンの更新通知は無効です"
|
||||
},
|
||||
"actions": {
|
||||
"download": "ダウンロード",
|
||||
"downloadTooltip": "このバージョンをダウンロード",
|
||||
"downloadEarlyAccessTooltip": "Civitai からこの早期アクセス版をダウンロード",
|
||||
"delete": "削除",
|
||||
"deleteTooltip": "このローカルバージョンを削除",
|
||||
"ignore": "無視",
|
||||
"unignore": "無視を解除",
|
||||
"ignoreTooltip": "このバージョンの更新通知を無視",
|
||||
"unignoreTooltip": "このバージョンの更新通知を再開",
|
||||
"viewVersionOnCivitai": "Civitai でバージョンを表示",
|
||||
"earlyAccessTooltip": "早期アクセス購入が必要",
|
||||
"resumeModelUpdates": "このモデルの更新を再開",
|
||||
"ignoreModelUpdates": "このモデルの更新を無視",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "レシピがワークフローで置換されました",
|
||||
"recipeFailedToSend": "レシピをワークフローに送信できませんでした",
|
||||
"noMatchingNodes": "現在のワークフローには互換性のあるノードがありません",
|
||||
"noTargetNodeSelected": "ターゲットノードが選択されていません"
|
||||
"noTargetNodeSelected": "ターゲットノードが選択されていません",
|
||||
"modelUpdated": "モデルがワークフローで更新されました",
|
||||
"modelFailed": "モデルノードの更新に失敗しました"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "レシピ",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "バージョンを選択してください",
|
||||
"versionExists": "このバージョンは既にライブラリに存在します",
|
||||
"downloadCompleted": "ダウンロードが正常に完了しました",
|
||||
"downloadSkippedByBaseModel": "ベースモデル {baseModel} が除外されているため、ダウンロードをスキップしました",
|
||||
"autoOrganizeSuccess": "{count} {type} の自動整理が正常に完了しました",
|
||||
"autoOrganizePartialSuccess": "自動整理が完了しました:{total} モデル中 {success} 移動、{failures} 失敗",
|
||||
"autoOrganizeFailed": "自動整理に失敗しました:{error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "レシピ名が正常に更新されました",
|
||||
"tagsUpdated": "レシピタグが正常に更新されました",
|
||||
"sourceUrlUpdated": "ソースURLが正常に更新されました",
|
||||
"promptUpdated": "プロンプトが正常に更新されました",
|
||||
"negativePromptUpdated": "ネガティブプロンプトが正常に更新されました",
|
||||
"promptEditorHint": "Enterキーで保存、Shift+Enterで改行",
|
||||
"noRecipeId": "レシピIDが利用できません",
|
||||
"sendToWorkflowFailed": "ワークフローへのレシピ送信に失敗しました:{message}",
|
||||
"copyFailed": "レシピ構文のコピーエラー:{message}",
|
||||
"noMissingLoras": "ダウンロードする不足LoRAがありません",
|
||||
"missingLorasInfoFailed": "不足LoRAの情報取得に失敗しました",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "処理エラー:{message}",
|
||||
"folderBrowserError": "フォルダブラウザの読み込みエラー:{message}",
|
||||
"recipeSaveFailed": "レシピの保存に失敗しました:{error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "インポートに失敗しました:{message}",
|
||||
"folderTreeFailed": "フォルダツリーの読み込みに失敗しました",
|
||||
"folderTreeError": "フォルダツリー読み込みエラー",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "Cancelling batch import...",
|
||||
"batchImportCancelFailed": "Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "レシピが選択されていません",
|
||||
"noMissingLorasInSelection": "選択したレシピに不足している LoRA が見つかりませんでした",
|
||||
"noLoraRootConfigured": "LoRA ルートディレクトリが設定されていません。設定でデフォルトの LoRA ルートを設定してください。"
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "モデルが選択されていません",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "ベースモデルマッピングの保存に失敗しました:{message}",
|
||||
"downloadTemplatesUpdated": "ダウンロードパステンプレートが更新されました",
|
||||
"downloadTemplatesFailed": "ダウンロードパステンプレートの保存に失敗しました:{message}",
|
||||
"recipesPathUpdated": "レシピ保存先を更新しました",
|
||||
"recipesPathSaveFailed": "レシピ保存先の更新に失敗しました: {message}",
|
||||
"settingsUpdated": "設定が更新されました:{setting}",
|
||||
"compactModeToggled": "コンパクトモード {state}",
|
||||
"settingSaveFailed": "設定の保存に失敗しました:{message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "システム診断",
|
||||
"title": "ドクター",
|
||||
"buttonTitle": "診断と一般的な修復を実行",
|
||||
"loading": "環境を確認中...",
|
||||
"footer": "修復後も問題が続く場合は、診断パッケージをエクスポートしてください。",
|
||||
"summary": {
|
||||
"idle": "設定、キャッシュ整合性、UI の一貫性をヘルスチェックします。",
|
||||
"ok": "現在の環境でアクティブな問題は見つかりませんでした。",
|
||||
"warning": "{count} 件の問題が見つかりました。ほとんどはこのパネルから直接修復できます。",
|
||||
"error": "アプリが完全に正常になる前に、{count} 件の問題に対処する必要があります。"
|
||||
},
|
||||
"status": {
|
||||
"ok": "正常",
|
||||
"warning": "要注意",
|
||||
"error": "対応が必要"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "再実行",
|
||||
"exportBundle": "パッケージをエクスポート"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "診断の読み込みに失敗しました: {message}",
|
||||
"repairSuccess": "キャッシュの再構築が完了しました。",
|
||||
"repairFailed": "キャッシュの再構築に失敗しました: {message}",
|
||||
"exportSuccess": "診断パッケージをエクスポートしました。",
|
||||
"exportFailed": "診断パッケージのエクスポートに失敗しました: {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "アプリケーション更新が検出されました",
|
||||
|
||||
280
locales/ko.json
280
locales/ko.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "비디오 설정",
|
||||
"layoutSettings": "레이아웃 설정",
|
||||
"misc": "기타",
|
||||
"backup": "백업",
|
||||
"folderSettings": "기본 루트",
|
||||
"recipeSettings": "레시피",
|
||||
"extraFolderPaths": "추가 폴다 경로",
|
||||
"downloadPathTemplates": "다운로드 경로 템플릿",
|
||||
"priorityTags": "우선순위 태그",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "NSFW 콘텐츠 블러 처리",
|
||||
"blurNsfwContentHelp": "성인(NSFW) 콘텐츠 미리보기 이미지를 블러 처리합니다",
|
||||
"showOnlySfw": "SFW 결과만 표시",
|
||||
"showOnlySfwHelp": "탐색 및 검색 시 모든 NSFW 콘텐츠를 필터링합니다"
|
||||
"showOnlySfwHelp": "탐색 및 검색 시 모든 NSFW 콘텐츠를 필터링합니다",
|
||||
"matureBlurThreshold": "성인 콘텐츠 블러 임계값",
|
||||
"matureBlurThresholdHelp": "NSFW 블러가 활성화될 때 어떤 등급 레벨부터 블러 필터링을 시작할지 설정합니다.",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 이상",
|
||||
"r": "R 이상(기본값)",
|
||||
"x": "X 이상",
|
||||
"xxx": "XXX만"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "호버 시 비디오 자동 재생",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "건너뛰기 경로를 저장할 수 없습니다: {message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "자동 백업",
|
||||
"autoEnabledHelp": "하루에 한 번 로컬 스냅샷을 만들고 보존 정책에 따라 최신 스냅샷을 유지합니다.",
|
||||
"retention": "보존 개수",
|
||||
"retentionHelp": "오래된 자동 스냅샷을 삭제하기 전에 몇 개를 유지할지 지정합니다.",
|
||||
"management": "백업 관리",
|
||||
"managementHelp": "현재 사용자 상태를 내보내거나 백업 아카이브에서 복원합니다.",
|
||||
"scopeHelp": "설정, 다운로드 기록, 모델 업데이트 상태를 백업합니다. 모델 파일과 다시 생성할 수 있는 캐시는 포함되지 않습니다.",
|
||||
"locationSummary": "현재 백업 위치",
|
||||
"openFolderButton": "백업 폴더 열기",
|
||||
"openFolderSuccess": "백업 폴더를 열었습니다",
|
||||
"openFolderFailed": "백업 폴더를 열지 못했습니다",
|
||||
"locationCopied": "백업 경로를 클립보드에 복사했습니다: {{path}}",
|
||||
"locationClipboardFallback": "백업 경로: {{path}}",
|
||||
"exportButton": "백업 내보내기",
|
||||
"exportSuccess": "백업을 성공적으로 내보냈습니다.",
|
||||
"exportFailed": "백업 내보내기에 실패했습니다: {message}",
|
||||
"importButton": "백업 가져오기",
|
||||
"importConfirm": "이 백업을 가져와서 로컬 사용자 상태를 덮어쓰시겠습니까?",
|
||||
"importSuccess": "백업을 성공적으로 가져왔습니다.",
|
||||
"importFailed": "백업 가져오기에 실패했습니다: {message}",
|
||||
"latestSnapshot": "최근 스냅샷",
|
||||
"latestAutoSnapshot": "최근 자동 스냅샷",
|
||||
"snapshotCount": "저장된 스냅샷",
|
||||
"noneAvailable": "아직 스냅샷이 없습니다"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "기본 모델 다운로드 건너뛰기",
|
||||
"help": "모든 다운로드 흐름에 적용됩니다. 여기서는 지원되는 기본 모델만 선택할 수 있습니다.",
|
||||
"searchPlaceholder": "기본 모델 필터링...",
|
||||
"empty": "현재 검색과 일치하는 기본 모델이 없습니다.",
|
||||
"summary": {
|
||||
"none": "선택 없음",
|
||||
"count": "{count}개 선택됨"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "편집",
|
||||
"collapse": "접기",
|
||||
"clear": "지우기"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "제외된 기본 모델을 저장할 수 없습니다: {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "이전에 다운로드한 모델 버전 건너뛰기",
|
||||
"help": "활성화하면 다운로드 기록 서비스가 해당 버전이 이미 다운로드되었음을 기록한 경우 LoRA Manager는 해당 모델 버전 다운로드를 건너뜁니다. 모든 다운로드 플로우에 적용됩니다."
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "표시 밀도",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "다운로드, 가져오기 및 이동을 위한 기본 Diffusion Model (UNET) 루트 디렉토리를 설정합니다",
|
||||
"defaultEmbeddingRoot": "Embedding 루트",
|
||||
"defaultEmbeddingRootHelp": "다운로드, 가져오기 및 이동을 위한 기본 Embedding 루트 디렉토리를 설정합니다",
|
||||
"recipesPath": "레시피 저장 경로",
|
||||
"recipesPathHelp": "저장된 레시피를 위한 선택적 사용자 지정 디렉터리입니다. 비워 두면 첫 번째 LoRA 루트의 recipes 폴더를 사용합니다.",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "레시피 저장 경로를 이동 중...",
|
||||
"noDefault": "기본값 없음"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "추가 폴다 경로",
|
||||
"help": "ComfyUI의 표준 경로 외부에 추가 모델 폴드를 추가하세요. 이러한 경로는 별도로 저장되며 기본 폴와 함께 스캔됩니다.",
|
||||
"description": "모델을 스캔하기 위한 추가 폴를 설정하세요. 이러한 경로는 LoRA Manager 특유의 것이며 ComfyUI의 기본 경로와 병합됩니다.",
|
||||
"description": "LoRA Manager 전용 추가 모델 루트 경로입니다. ComfyUI의 표준 폴더 외부 위치에서 모델을 로드하여 대규모 라이브러리로 인한 성능 저하를 방지합니다.",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA 경로",
|
||||
"checkpoint": "Checkpoint 경로",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Embedding 경로"
|
||||
},
|
||||
"pathPlaceholder": "/추가/모델/경로",
|
||||
"saveSuccess": "추가 폴다 경로가 업데이트되었습니다.",
|
||||
"saveSuccess": "추가 폴다 경로가 업데이트되었습니다. 변경 사항을 적용하려면 재시작이 필요합니다.",
|
||||
"saveError": "추가 폴다 경로 업데이트 실패: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "이 경로는 이미 구성되어 있습니다"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "선택한 모델의 메타데이터 새로고침 건너뛰기",
|
||||
"resumeMetadataRefresh": "선택한 모델의 메타데이터 새로고침 재개",
|
||||
"deleteAll": "모든 모델 삭제",
|
||||
"downloadMissingLoras": "누락된 LoRA 다운로드",
|
||||
"clear": "선택 지우기",
|
||||
"skipMetadataRefreshCount": "건너뛰기({count}개 모델)",
|
||||
"resumeMetadataRefreshCount": "재개({count}개 모델)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "루트",
|
||||
"browseFolders": "폴더 탐색:",
|
||||
"downloadAndSaveRecipe": "다운로드 및 레시피 저장",
|
||||
"importRecipeOnly": "레시피만 가져오기",
|
||||
"importAndDownload": "가져오기 및 다운로드",
|
||||
"downloadMissingLoras": "누락된 LoRA 다운로드",
|
||||
"saveRecipe": "레시피 저장",
|
||||
"loraCountInfo": "({existing}/{total} 라이브러리에 있음)",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "Batch Import Recipes",
|
||||
"action": "Batch Import",
|
||||
"urlList": "URL List",
|
||||
"directory": "Directory",
|
||||
"urlDescription": "Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "Enter one URL or path per line",
|
||||
"directoryPath": "Directory Path",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "Browse",
|
||||
"recursive": "Include subdirectories",
|
||||
"tagsOptional": "Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "Enter tags separated by commas",
|
||||
"tagsHint": "Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "Skip images without metadata",
|
||||
"skipNoMetadataHelp": "Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "Start Import",
|
||||
"startImport": "Start Import",
|
||||
"importing": "Importing...",
|
||||
"progress": "Progress",
|
||||
"total": "Total",
|
||||
"success": "Success",
|
||||
"failed": "Failed",
|
||||
"skipped": "Skipped",
|
||||
"current": "Current",
|
||||
"currentItem": "Current",
|
||||
"preparing": "Preparing...",
|
||||
"cancel": "Cancel",
|
||||
"cancelImport": "Cancel",
|
||||
"cancelled": "Import cancelled",
|
||||
"completed": "Import completed",
|
||||
"completedWithErrors": "Completed with errors",
|
||||
"completedSuccess": "Successfully imported {count} recipe(s)",
|
||||
"successCount": "Successful",
|
||||
"failedCount": "Failed",
|
||||
"skippedCount": "Skipped",
|
||||
"totalProcessed": "Total processed",
|
||||
"viewDetails": "View Details",
|
||||
"newImport": "New Import",
|
||||
"manualPathEntry": "Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"batchImportManualEntryRequired": "File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "Back to parent directory",
|
||||
"folders": "Folders",
|
||||
"folderCount": "{count} folders",
|
||||
"imageFiles": "Image Files",
|
||||
"images": "images",
|
||||
"imageCount": "{count} images",
|
||||
"selectFolder": "Select This Folder",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "Please enter at least one URL or path",
|
||||
"enterDirectory": "Please enter a directory path",
|
||||
"startFailed": "Failed to start import: {message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "{otherType} 폴더로 이동"
|
||||
"moveToOtherTypeFolder": "{otherType} 폴더로 이동",
|
||||
"sendToWorkflow": "워크플로우로 전송"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "사이드바 고정 해제",
|
||||
"switchToListView": "목록 보기로 전환",
|
||||
"switchToTreeView": "트리 보기로 전환",
|
||||
"recursiveOn": "하위 폴더 검색",
|
||||
"recursiveOff": "현재 폴더만 검색",
|
||||
"recursiveOn": "하위 폴더 포함",
|
||||
"recursiveOff": "현재 폴더만",
|
||||
"recursiveUnavailable": "재귀 검색은 트리 보기에서만 사용할 수 있습니다",
|
||||
"collapseAllDisabled": "목록 보기에서는 사용할 수 없습니다",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "얼리 액세스",
|
||||
"earlyAccessTooltip": "얼리 액세스 필요",
|
||||
"inLibrary": "라이브러리에 있음",
|
||||
"downloaded": "다운로드됨",
|
||||
"downloadedTooltip": "이전에 다운로드했지만 현재 라이브러리에 없습니다.",
|
||||
"alreadyInLibrary": "이미 라이브러리에 있음",
|
||||
"autoOrganizedPath": "[경로 템플릿으로 자동 정리됨]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "베이스 모델 업데이트",
|
||||
"cancel": "취소"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "누락된 LoRA 다운로드",
|
||||
"message": "선택한 레시피에서 총 {totalCount}개 중 {uniqueCount}개의 고유한 누락된 LoRA를 찾았습니다.",
|
||||
"previewTitle": "다운로드할 LoRA:",
|
||||
"moreItems": "...그리고 {count}개 더",
|
||||
"note": "파일은 기본 경로 템플릿을 사용하여 다운로드됩니다. LoRA의 수에 따라 다소 시간이 걸릴 수 있습니다.",
|
||||
"downloadButton": "{count}개 LoRA 다운로드"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "로컬 예시 이미지",
|
||||
"message": "이 모델의 로컬 예시 이미지를 찾을 수 없습니다. 보기 옵션:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "Civitai에서 보기",
|
||||
"viewOnCivitaiText": "Civitai에서 보기",
|
||||
"viewCreatorProfile": "제작자 프로필 보기",
|
||||
"openFileLocation": "파일 위치 열기"
|
||||
"openFileLocation": "파일 위치 열기",
|
||||
"sendToWorkflow": "ComfyUI로 보내기",
|
||||
"sendToWorkflowText": "ComfyUI로 보내기"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "파일 위치가 성공적으로 열렸습니다",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "경로가 클립보드에 복사되었습니다: {{path}}",
|
||||
"clipboardFallback": "경로: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "ComfyUI로 보낼 수 없습니다: 파일 경로가 없습니다"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "버전",
|
||||
"fileName": "파일명",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "{count}일 후"
|
||||
},
|
||||
"badges": {
|
||||
"current": "현재 버전",
|
||||
"current": "열린 버전",
|
||||
"currentTooltip": "이 모달을 열 때 사용한 버전입니다",
|
||||
"inLibrary": "라이브러리에 있음",
|
||||
"inLibraryTooltip": "이 버전은 로컬 라이브러리에 있습니다",
|
||||
"downloaded": "다운로드됨",
|
||||
"downloadedTooltip": "이 버전은 이전에 다운로드되었지만 현재는 라이브러리에 없습니다",
|
||||
"newer": "최신 버전",
|
||||
"newerTooltip": "이 버전은 로컬의 최신 버전보다 더 새롭습니다",
|
||||
"earlyAccess": "얼리 액세스",
|
||||
"ignored": "무시됨"
|
||||
"earlyAccessTooltip": "이 버전은 현재 Civitai 얼리 액세스가 필요합니다",
|
||||
"ignored": "무시됨",
|
||||
"ignoredTooltip": "이 버전은 업데이트 알림이 비활성화되어 있습니다"
|
||||
},
|
||||
"actions": {
|
||||
"download": "다운로드",
|
||||
"downloadTooltip": "이 버전 다운로드",
|
||||
"downloadEarlyAccessTooltip": "Civitai에서 이 얼리 액세스 버전 다운로드",
|
||||
"delete": "삭제",
|
||||
"deleteTooltip": "이 로컬 버전 삭제",
|
||||
"ignore": "무시",
|
||||
"unignore": "무시 해제",
|
||||
"ignoreTooltip": "이 버전의 업데이트 알림 무시",
|
||||
"unignoreTooltip": "이 버전의 업데이트 알림 다시 받기",
|
||||
"viewVersionOnCivitai": "Civitai에서 버전 보기",
|
||||
"earlyAccessTooltip": "얼리 액세스 구매 필요",
|
||||
"resumeModelUpdates": "이 모델 업데이트 재개",
|
||||
"ignoreModelUpdates": "이 모델 업데이트 무시",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "레시피가 워크플로에서 교체되었습니다",
|
||||
"recipeFailedToSend": "레시피를 워크플로로 전송하지 못했습니다",
|
||||
"noMatchingNodes": "현재 워크플로에서 호환되는 노드가 없습니다",
|
||||
"noTargetNodeSelected": "대상 노드가 선택되지 않았습니다"
|
||||
"noTargetNodeSelected": "대상 노드가 선택되지 않았습니다",
|
||||
"modelUpdated": "모델이 워크플로에서 업데이트되었습니다",
|
||||
"modelFailed": "모델 노드 업데이트 실패"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "레시피",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "버전을 선택해주세요",
|
||||
"versionExists": "이 버전은 이미 라이브러리에 있습니다",
|
||||
"downloadCompleted": "다운로드가 성공적으로 완료되었습니다",
|
||||
"downloadSkippedByBaseModel": "기본 모델 {baseModel}이(가) 제외되어 다운로드를 건너뛰었습니다",
|
||||
"autoOrganizeSuccess": "{count}개의 {type}에 대해 자동 정리가 성공적으로 완료되었습니다",
|
||||
"autoOrganizePartialSuccess": "자동 정리 완료: 전체 {total}개 중 {success}개 이동, {failures}개 실패",
|
||||
"autoOrganizeFailed": "자동 정리 실패: {error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "레시피 이름이 성공적으로 업데이트되었습니다",
|
||||
"tagsUpdated": "레시피 태그가 성공적으로 업데이트되었습니다",
|
||||
"sourceUrlUpdated": "소스 URL이 성공적으로 업데이트되었습니다",
|
||||
"promptUpdated": "프롬프트가 성공적으로 업데이트되었습니다",
|
||||
"negativePromptUpdated": "네거티브 프롬프트가 성공적으로 업데이트되었습니다",
|
||||
"promptEditorHint": "Enter 키를 눌러 저장, Shift+Enter로 새 줄",
|
||||
"noRecipeId": "사용 가능한 레시피 ID가 없습니다",
|
||||
"sendToWorkflowFailed": "워크플로우에 레시피 보내기 실패: {message}",
|
||||
"copyFailed": "레시피 문법 복사 오류: {message}",
|
||||
"noMissingLoras": "다운로드할 누락된 LoRA가 없습니다",
|
||||
"missingLorasInfoFailed": "누락된 LoRA 정보를 가져오는데 실패했습니다",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "처리 오류: {message}",
|
||||
"folderBrowserError": "폴더 브라우저 로딩 오류: {message}",
|
||||
"recipeSaveFailed": "레시피 저장 실패: {error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "가져오기 실패: {message}",
|
||||
"folderTreeFailed": "폴더 트리 로딩 실패",
|
||||
"folderTreeError": "폴더 트리 로딩 오류",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "Cancelling batch import...",
|
||||
"batchImportCancelFailed": "Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "선택한 레시피가 없습니다",
|
||||
"noMissingLorasInSelection": "선택한 레시피에서 누락된 LoRA를 찾을 수 없습니다",
|
||||
"noLoraRootConfigured": "LoRA 루트 디렉토리가 구성되지 않았습니다. 설정에서 기본 LoRA 루트를 설정하세요."
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "선택된 모델이 없습니다",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "베이스 모델 매핑 저장 실패: {message}",
|
||||
"downloadTemplatesUpdated": "다운로드 경로 템플릿이 업데이트되었습니다",
|
||||
"downloadTemplatesFailed": "다운로드 경로 템플릿 저장 실패: {message}",
|
||||
"recipesPathUpdated": "레시피 저장 경로가 업데이트되었습니다",
|
||||
"recipesPathSaveFailed": "레시피 저장 경로 업데이트 실패: {message}",
|
||||
"settingsUpdated": "설정 업데이트됨: {setting}",
|
||||
"compactModeToggled": "컴팩트 모드 {state}",
|
||||
"settingSaveFailed": "설정 저장 실패: {message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "시스템 진단",
|
||||
"title": "닥터",
|
||||
"buttonTitle": "진단 및 일반적인 수정 실행",
|
||||
"loading": "환경을 확인하는 중...",
|
||||
"footer": "수리 후에도 문제가 계속되면 진단 번들을 내보내세요.",
|
||||
"summary": {
|
||||
"idle": "설정, 캐시 무결성, UI 일관성에 대한 상태 검사를 실행합니다.",
|
||||
"ok": "현재 환경에서 활성 문제를 찾지 못했습니다.",
|
||||
"warning": "{count}개의 문제가 발견되었습니다. 대부분은 이 패널에서 바로 해결할 수 있습니다.",
|
||||
"error": "앱이 완전히 정상 상태가 되기 전에 {count}개의 문제를 처리해야 합니다."
|
||||
},
|
||||
"status": {
|
||||
"ok": "정상",
|
||||
"warning": "주의 필요",
|
||||
"error": "조치 필요"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "다시 실행",
|
||||
"exportBundle": "번들 내보내기"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "진단 로드 실패: {message}",
|
||||
"repairSuccess": "캐시 재구성이 완료되었습니다.",
|
||||
"repairFailed": "캐시 재구성 실패: {message}",
|
||||
"exportSuccess": "진단 번들이 내보내졌습니다.",
|
||||
"exportFailed": "진단 번들 내보내기 실패: {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "애플리케이션 업데이트 감지",
|
||||
|
||||
280
locales/ru.json
280
locales/ru.json
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "Настройки видео",
|
||||
"layoutSettings": "Настройки макета",
|
||||
"misc": "Разное",
|
||||
"backup": "Резервные копии",
|
||||
"folderSettings": "Корневые папки",
|
||||
"recipeSettings": "Рецепты",
|
||||
"extraFolderPaths": "Дополнительные пути к папкам",
|
||||
"downloadPathTemplates": "Шаблоны путей загрузки",
|
||||
"priorityTags": "Приоритетные теги",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "Размывать NSFW контент",
|
||||
"blurNsfwContentHelp": "Размывать превью изображений контента для взрослых (NSFW)",
|
||||
"showOnlySfw": "Показывать только SFW результаты",
|
||||
"showOnlySfwHelp": "Фильтровать весь NSFW контент при просмотре и поиске"
|
||||
"showOnlySfwHelp": "Фильтровать весь NSFW контент при просмотре и поиске",
|
||||
"matureBlurThreshold": "Порог размытия взрослого контента",
|
||||
"matureBlurThresholdHelp": "Установить, с какого уровня рейтинга начинается размытие при включенном размытии NSFW.",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 и выше",
|
||||
"r": "R и выше (по умолчанию)",
|
||||
"x": "X и выше",
|
||||
"xxx": "Только XXX"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "Автовоспроизведение видео при наведении",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "Не удалось сохранить пути для пропуска: {message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "Автоматические резервные копии",
|
||||
"autoEnabledHelp": "Создаёт локальный снимок раз в день и хранит последние снимки согласно политике хранения.",
|
||||
"retention": "Количество хранения",
|
||||
"retentionHelp": "Сколько автоматических снимков сохранять перед удалением старых.",
|
||||
"management": "Управление резервными копиями",
|
||||
"managementHelp": "Экспортируйте текущее состояние пользователя или восстановите его из архива резервной копии.",
|
||||
"scopeHelp": "Резервная копия включает ваши настройки, историю загрузок и состояние обновлений моделей. Файлы моделей и пересоздаваемые кэши не входят.",
|
||||
"locationSummary": "Текущее расположение резервных копий",
|
||||
"openFolderButton": "Открыть папку резервных копий",
|
||||
"openFolderSuccess": "Папка резервных копий открыта",
|
||||
"openFolderFailed": "Не удалось открыть папку резервных копий",
|
||||
"locationCopied": "Путь к резервной копии скопирован в буфер обмена: {{path}}",
|
||||
"locationClipboardFallback": "Путь к резервной копии: {{path}}",
|
||||
"exportButton": "Экспортировать резервную копию",
|
||||
"exportSuccess": "Резервная копия успешно экспортирована.",
|
||||
"exportFailed": "Не удалось экспортировать резервную копию: {message}",
|
||||
"importButton": "Импортировать резервную копию",
|
||||
"importConfirm": "Импортировать эту резервную копию и перезаписать локальное состояние пользователя?",
|
||||
"importSuccess": "Резервная копия успешно импортирована.",
|
||||
"importFailed": "Не удалось импортировать резервную копию: {message}",
|
||||
"latestSnapshot": "Последний снимок",
|
||||
"latestAutoSnapshot": "Последний автоматический снимок",
|
||||
"snapshotCount": "Сохранённые снимки",
|
||||
"noneAvailable": "Снимков пока нет"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "Пропускать загрузки для базовых моделей",
|
||||
"help": "Применяется ко всем сценариям загрузки. Здесь можно выбрать только поддерживаемые базовые модели.",
|
||||
"searchPlaceholder": "Фильтровать базовые модели...",
|
||||
"empty": "Нет базовых моделей, соответствующих текущему поиску.",
|
||||
"summary": {
|
||||
"none": "Ничего не выбрано",
|
||||
"count": "Выбрано: {count}"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "Изменить",
|
||||
"collapse": "Свернуть",
|
||||
"clear": "Очистить"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "Не удалось сохранить исключённые базовые модели: {message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "Пропускать ранее загруженные версии моделей",
|
||||
"help": "Если включено, LoRA Manager будет пропускать загрузку версии модели, если сервис истории загрузок записал, что эта конкретная версия уже загружена. Применяется ко всем потокам загрузки."
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Плотность отображения",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "Установить корневую папку Diffusion Model (UNET) по умолчанию для загрузок, импорта и перемещений",
|
||||
"defaultEmbeddingRoot": "Корневая папка Embedding",
|
||||
"defaultEmbeddingRootHelp": "Установить корневую папку embedding по умолчанию для загрузок, импорта и перемещений",
|
||||
"recipesPath": "Путь хранения рецептов",
|
||||
"recipesPathHelp": "Дополнительный пользовательский каталог для сохранённых рецептов. Оставьте пустым, чтобы использовать папку recipes в первом корне LoRA.",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "Перенос хранилища рецептов...",
|
||||
"noDefault": "Не задано"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "Дополнительные пути к папкам",
|
||||
"help": "Добавьте дополнительные папки моделей за пределами стандартных путей ComfyUI. Эти пути хранятся отдельно и сканируются вместе с папками по умолчанию.",
|
||||
"description": "Настройте дополнительные папки для сканирования моделей. Эти пути специфичны для LoRA Manager и будут объединены с путями по умолчанию ComfyUI.",
|
||||
"description": "Дополнительные корневые пути моделей, эксклюзивные для LoRA Manager. Загружайте модели из расположений за пределами стандартных папок ComfyUI — идеально подходит для больших библиотек, которые иначе замедлили бы ComfyUI.",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "Пути LoRA",
|
||||
"checkpoint": "Пути Checkpoint",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Пути Embedding"
|
||||
},
|
||||
"pathPlaceholder": "/путь/к/дополнительным/моделям",
|
||||
"saveSuccess": "Дополнительные пути к папкам обновлены.",
|
||||
"saveSuccess": "Дополнительные пути к папкам обновлены. Требуется перезапуск для применения изменений.",
|
||||
"saveError": "Не удалось обновить дополнительные пути к папкам: {message}",
|
||||
"validation": {
|
||||
"duplicatePath": "Этот путь уже настроен"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "Пропустить обновление метаданных для выбранных",
|
||||
"resumeMetadataRefresh": "Возобновить обновление метаданных для выбранных",
|
||||
"deleteAll": "Удалить все модели",
|
||||
"downloadMissingLoras": "Скачать отсутствующие LoRAs",
|
||||
"clear": "Очистить выбор",
|
||||
"skipMetadataRefreshCount": "Пропустить({count} моделей)",
|
||||
"resumeMetadataRefreshCount": "Возобновить({count} моделей)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "Корень",
|
||||
"browseFolders": "Обзор папок:",
|
||||
"downloadAndSaveRecipe": "Скачать и сохранить рецепт",
|
||||
"importRecipeOnly": "Импортировать только рецепт",
|
||||
"importAndDownload": "Импорт и скачивание",
|
||||
"downloadMissingLoras": "Скачать отсутствующие LoRAs",
|
||||
"saveRecipe": "Сохранить рецепт",
|
||||
"loraCountInfo": "({existing}/{total} в библиотеке)",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "Batch Import Recipes",
|
||||
"action": "Batch Import",
|
||||
"urlList": "URL List",
|
||||
"directory": "Directory",
|
||||
"urlDescription": "Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "Enter one URL or path per line",
|
||||
"directoryPath": "Directory Path",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "Browse",
|
||||
"recursive": "Include subdirectories",
|
||||
"tagsOptional": "Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "Enter tags separated by commas",
|
||||
"tagsHint": "Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "Skip images without metadata",
|
||||
"skipNoMetadataHelp": "Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "Start Import",
|
||||
"startImport": "Start Import",
|
||||
"importing": "Importing...",
|
||||
"progress": "Progress",
|
||||
"total": "Total",
|
||||
"success": "Success",
|
||||
"failed": "Failed",
|
||||
"skipped": "Skipped",
|
||||
"current": "Current",
|
||||
"currentItem": "Current",
|
||||
"preparing": "Preparing...",
|
||||
"cancel": "Cancel",
|
||||
"cancelImport": "Cancel",
|
||||
"cancelled": "Import cancelled",
|
||||
"completed": "Import completed",
|
||||
"completedWithErrors": "Completed with errors",
|
||||
"completedSuccess": "Successfully imported {count} recipe(s)",
|
||||
"successCount": "Successful",
|
||||
"failedCount": "Failed",
|
||||
"skippedCount": "Skipped",
|
||||
"totalProcessed": "Total processed",
|
||||
"viewDetails": "View Details",
|
||||
"newImport": "New Import",
|
||||
"manualPathEntry": "Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"batchImportManualEntryRequired": "File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "Back to parent directory",
|
||||
"folders": "Folders",
|
||||
"folderCount": "{count} folders",
|
||||
"imageFiles": "Image Files",
|
||||
"images": "images",
|
||||
"imageCount": "{count} images",
|
||||
"selectFolder": "Select This Folder",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "Please enter at least one URL or path",
|
||||
"enterDirectory": "Please enter a directory path",
|
||||
"startFailed": "Failed to start import: {message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "Переместить в папку {otherType}"
|
||||
"moveToOtherTypeFolder": "Переместить в папку {otherType}",
|
||||
"sendToWorkflow": "Отправить в workflow"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "Открепить боковую панель",
|
||||
"switchToListView": "Переключить на вид списка",
|
||||
"switchToTreeView": "Переключить на древовидный вид",
|
||||
"recursiveOn": "Искать во вложенных папках",
|
||||
"recursiveOff": "Искать только в текущей папке",
|
||||
"recursiveOn": "Включать вложенные папки",
|
||||
"recursiveOff": "Только текущая папка",
|
||||
"recursiveUnavailable": "Рекурсивный поиск доступен только в режиме дерева",
|
||||
"collapseAllDisabled": "Недоступно в виде списка",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "Ранний доступ",
|
||||
"earlyAccessTooltip": "Требуется ранний доступ",
|
||||
"inLibrary": "В библиотеке",
|
||||
"downloaded": "Загружено",
|
||||
"downloadedTooltip": "Ранее загружено, но сейчас этого нет в вашей библиотеке.",
|
||||
"alreadyInLibrary": "Уже в библиотеке",
|
||||
"autoOrganizedPath": "[Автоматически организовано по шаблону пути]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "Обновить базовую модель",
|
||||
"cancel": "Отмена"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "Скачать отсутствующие LoRAs",
|
||||
"message": "Найдено {uniqueCount} уникальных отсутствующих LoRAs (из {totalCount} всего в выбранных рецептах).",
|
||||
"previewTitle": "LoRAs для скачивания:",
|
||||
"moreItems": "...и еще {count}",
|
||||
"note": "Файлы будут скачаны с использованием шаблонов путей по умолчанию. Это может занять некоторое время в зависимости от количества LoRAs.",
|
||||
"downloadButton": "Скачать {count} LoRA(s)"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Локальные примеры изображений",
|
||||
"message": "Локальные примеры изображений для этой модели не найдены. Варианты просмотра:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "Посмотреть на Civitai",
|
||||
"viewOnCivitaiText": "Посмотреть на Civitai",
|
||||
"viewCreatorProfile": "Посмотреть профиль создателя",
|
||||
"openFileLocation": "Открыть расположение файла"
|
||||
"openFileLocation": "Открыть расположение файла",
|
||||
"sendToWorkflow": "Отправить в ComfyUI",
|
||||
"sendToWorkflowText": "Отправить в ComfyUI"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Расположение файла успешно открыто",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "Путь скопирован в буфер обмена: {{path}}",
|
||||
"clipboardFallback": "Путь: {{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "Невозможно отправить в ComfyUI: путь к файлу недоступен"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Версия",
|
||||
"fileName": "Имя файла",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "через {count}д"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Текущая версия",
|
||||
"current": "Открытая версия",
|
||||
"currentTooltip": "Это версия, с которой было открыто это окно",
|
||||
"inLibrary": "В библиотеке",
|
||||
"inLibraryTooltip": "Эта версия есть в вашей локальной библиотеке",
|
||||
"downloaded": "Загружено",
|
||||
"downloadedTooltip": "Эта версия уже загружалась, но сейчас отсутствует в вашей библиотеке",
|
||||
"newer": "Более новая версия",
|
||||
"newerTooltip": "Эта версия новее вашей последней локальной версии",
|
||||
"earlyAccess": "Ранний доступ",
|
||||
"ignored": "Игнорируется"
|
||||
"earlyAccessTooltip": "Для этой версии сейчас требуется ранний доступ Civitai",
|
||||
"ignored": "Игнорируется",
|
||||
"ignoredTooltip": "Уведомления об обновлениях для этой версии отключены"
|
||||
},
|
||||
"actions": {
|
||||
"download": "Скачать",
|
||||
"downloadTooltip": "Скачать эту версию",
|
||||
"downloadEarlyAccessTooltip": "Скачать эту версию раннего доступа с Civitai",
|
||||
"delete": "Удалить",
|
||||
"deleteTooltip": "Удалить эту локальную версию",
|
||||
"ignore": "Игнорировать",
|
||||
"unignore": "Перестать игнорировать",
|
||||
"ignoreTooltip": "Игнорировать уведомления об обновлениях для этой версии",
|
||||
"unignoreTooltip": "Возобновить уведомления об обновлениях для этой версии",
|
||||
"viewVersionOnCivitai": "Посмотреть версию на Civitai",
|
||||
"earlyAccessTooltip": "Требуется покупка раннего доступа",
|
||||
"resumeModelUpdates": "Возобновить обновления для этой модели",
|
||||
"ignoreModelUpdates": "Игнорировать обновления для этой модели",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "Рецепт заменён в workflow",
|
||||
"recipeFailedToSend": "Не удалось отправить рецепт в workflow",
|
||||
"noMatchingNodes": "В текущем workflow нет совместимых узлов",
|
||||
"noTargetNodeSelected": "Целевой узел не выбран"
|
||||
"noTargetNodeSelected": "Целевой узел не выбран",
|
||||
"modelUpdated": "Модель обновлена в workflow",
|
||||
"modelFailed": "Не удалось обновить узел модели"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "Рецепт",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "Пожалуйста, выберите версию",
|
||||
"versionExists": "Эта версия уже существует в вашей библиотеке",
|
||||
"downloadCompleted": "Загрузка успешно завершена",
|
||||
"downloadSkippedByBaseModel": "Загрузка пропущена, потому что базовая модель {baseModel} исключена",
|
||||
"autoOrganizeSuccess": "Автоматическая организация успешно завершена для {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Автоматическая организация завершена: перемещено {success}, не удалось {failures} из {total} моделей",
|
||||
"autoOrganizeFailed": "Ошибка автоматической организации: {error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "Название рецепта успешно обновлено",
|
||||
"tagsUpdated": "Теги рецепта успешно обновлены",
|
||||
"sourceUrlUpdated": "Исходный URL успешно обновлен",
|
||||
"promptUpdated": "Промпт успешно обновлён",
|
||||
"negativePromptUpdated": "Негативный промпт успешно обновлён",
|
||||
"promptEditorHint": "Нажмите Enter для сохранения, Shift+Enter для новой строки",
|
||||
"noRecipeId": "ID рецепта недоступен",
|
||||
"sendToWorkflowFailed": "Не удалось отправить рецепт в рабочий процесс: {message}",
|
||||
"copyFailed": "Ошибка копирования синтаксиса рецепта: {message}",
|
||||
"noMissingLoras": "Нет отсутствующих LoRAs для загрузки",
|
||||
"missingLorasInfoFailed": "Не удалось получить информацию для отсутствующих LoRAs",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "Ошибка обработки: {message}",
|
||||
"folderBrowserError": "Ошибка загрузки браузера папок: {message}",
|
||||
"recipeSaveFailed": "Не удалось сохранить рецепт: {error}",
|
||||
"recipeSaved": "Recipe saved successfully",
|
||||
"importFailed": "Импорт не удался: {message}",
|
||||
"folderTreeFailed": "Не удалось загрузить дерево папок",
|
||||
"folderTreeError": "Ошибка загрузки дерева папок",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "Cancelling batch import...",
|
||||
"batchImportCancelFailed": "Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "Please enter a directory path",
|
||||
"batchImportBrowseFailed": "Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "Directory selected: {path}",
|
||||
"noRecipesSelected": "Рецепты не выбраны",
|
||||
"noMissingLorasInSelection": "В выбранных рецептах не найдены отсутствующие LoRAs",
|
||||
"noLoraRootConfigured": "Корневой каталог LoRA не настроен. Пожалуйста, установите корневой каталог LoRA по умолчанию в настройках."
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "Модели не выбраны",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "Не удалось сохранить сопоставления базовых моделей: {message}",
|
||||
"downloadTemplatesUpdated": "Шаблоны путей загрузки обновлены",
|
||||
"downloadTemplatesFailed": "Не удалось сохранить шаблоны путей загрузки: {message}",
|
||||
"recipesPathUpdated": "Путь хранения рецептов обновлён",
|
||||
"recipesPathSaveFailed": "Не удалось обновить путь хранения рецептов: {message}",
|
||||
"settingsUpdated": "Настройки обновлены: {setting}",
|
||||
"compactModeToggled": "Компактный режим {state}",
|
||||
"settingSaveFailed": "Не удалось сохранить настройку: {message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "Системная диагностика",
|
||||
"title": "Доктор",
|
||||
"buttonTitle": "Запустить диагностику и обычные исправления",
|
||||
"loading": "Проверка окружения...",
|
||||
"footer": "Экспортируйте диагностический пакет, если проблема сохраняется после исправления.",
|
||||
"summary": {
|
||||
"idle": "Выполнить проверку настроек, целостности кэша и согласованности интерфейса.",
|
||||
"ok": "В текущем окружении активных проблем не обнаружено.",
|
||||
"warning": "Обнаружено {count} проблем(ы). Большинство можно исправить прямо из этой панели.",
|
||||
"error": "Перед тем как приложение станет полностью исправным, нужно устранить {count} проблем(ы)."
|
||||
},
|
||||
"status": {
|
||||
"ok": "Исправно",
|
||||
"warning": "Требует внимания",
|
||||
"error": "Требуется действие"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "Запустить снова",
|
||||
"exportBundle": "Экспортировать пакет"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "Не удалось загрузить диагностику: {message}",
|
||||
"repairSuccess": "Перестройка кэша завершена.",
|
||||
"repairFailed": "Не удалось перестроить кэш: {message}",
|
||||
"exportSuccess": "Диагностический пакет экспортирован.",
|
||||
"exportFailed": "Не удалось экспортировать диагностический пакет: {message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "Обнаружено обновление приложения",
|
||||
|
||||
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "视频设置",
|
||||
"layoutSettings": "布局设置",
|
||||
"misc": "其他",
|
||||
"backup": "备份",
|
||||
"folderSettings": "默认根目录",
|
||||
"recipeSettings": "配方",
|
||||
"extraFolderPaths": "额外文件夹路径",
|
||||
"downloadPathTemplates": "下载路径模板",
|
||||
"priorityTags": "优先标签",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "模糊 NSFW 内容",
|
||||
"blurNsfwContentHelp": "模糊成熟(NSFW)内容预览图片",
|
||||
"showOnlySfw": "仅显示 SFW 结果",
|
||||
"showOnlySfwHelp": "浏览和搜索时过滤所有 NSFW 内容"
|
||||
"showOnlySfwHelp": "浏览和搜索时过滤所有 NSFW 内容",
|
||||
"matureBlurThreshold": "成人内容模糊阈值",
|
||||
"matureBlurThresholdHelp": "设置当启用 NSFW 模糊时,从哪个评级级别开始模糊过滤。",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 及以上",
|
||||
"r": "R 及以上(默认)",
|
||||
"x": "X 及以上",
|
||||
"xxx": "仅 XXX"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "悬停时自动播放视频",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "无法保存跳过路径:{message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "自动备份",
|
||||
"autoEnabledHelp": "每天创建一次本地快照,并按保留策略保留最新快照。",
|
||||
"retention": "保留数量",
|
||||
"retentionHelp": "在删除旧快照之前,要保留多少个自动快照。",
|
||||
"management": "备份管理",
|
||||
"managementHelp": "导出当前用户状态,或从备份归档中恢复。",
|
||||
"scopeHelp": "备份你的设置、下载历史和模型更新状态。不包含模型文件或可重建的缓存。",
|
||||
"locationSummary": "当前备份位置",
|
||||
"openFolderButton": "打开备份文件夹",
|
||||
"openFolderSuccess": "已打开备份文件夹",
|
||||
"openFolderFailed": "无法打开备份文件夹",
|
||||
"locationCopied": "备份路径已复制到剪贴板:{{path}}",
|
||||
"locationClipboardFallback": "备份路径:{{path}}",
|
||||
"exportButton": "导出备份",
|
||||
"exportSuccess": "备份导出成功。",
|
||||
"exportFailed": "备份导出失败:{message}",
|
||||
"importButton": "导入备份",
|
||||
"importConfirm": "导入此备份并覆盖本地用户状态吗?",
|
||||
"importSuccess": "备份导入成功。",
|
||||
"importFailed": "备份导入失败:{message}",
|
||||
"latestSnapshot": "最近快照",
|
||||
"latestAutoSnapshot": "最近自动快照",
|
||||
"snapshotCount": "已保存快照",
|
||||
"noneAvailable": "还没有快照"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "跳过这些基础模型的下载",
|
||||
"help": "适用于所有下载流程。这里只能选择受支持的基础模型。",
|
||||
"searchPlaceholder": "筛选基础模型...",
|
||||
"empty": "没有与当前搜索匹配的基础模型。",
|
||||
"summary": {
|
||||
"none": "未选择",
|
||||
"count": "已选择 {count} 项"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "编辑",
|
||||
"collapse": "收起",
|
||||
"clear": "清空"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "无法保存已排除的基础模型:{message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "跳过已下载的模型版本",
|
||||
"help": "启用后,如果下载历史服务记录显示该版本已下载,LoRA Manager 将跳过下载该模型版本。适用于所有下载流程。"
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "显示密度",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "设置下载、导入和移动时的默认 Diffusion Model (UNET) 根目录",
|
||||
"defaultEmbeddingRoot": "Embedding 根目录",
|
||||
"defaultEmbeddingRootHelp": "设置下载、导入和移动时的默认 Embedding 根目录",
|
||||
"recipesPath": "配方存储路径",
|
||||
"recipesPathHelp": "已保存配方的可选自定义目录。留空则使用第一个 LoRA 根目录下的 recipes 文件夹。",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "正在迁移配方存储...",
|
||||
"noDefault": "无默认"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "额外文件夹路径",
|
||||
"help": "在 ComfyUI 的标准路径之外添加额外的模型文件夹。这些路径单独存储,并与默认文件夹一起扫描。",
|
||||
"description": "配置额外的文件夹以扫描模型。这些路径是 LoRA Manager 特有的,将与 ComfyUI 的默认路径合并。",
|
||||
"description": "LoRA Manager 专属的额外模型根目录。从 ComfyUI 标准文件夹之外的位置加载模型,特别适合管理大型模型库,避免影响 ComfyUI 性能。",
|
||||
"restartRequired": "需要重启才能生效",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA 路径",
|
||||
"checkpoint": "Checkpoint 路径",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Embedding 路径"
|
||||
},
|
||||
"pathPlaceholder": "/额外/模型/路径",
|
||||
"saveSuccess": "额外文件夹路径已更新。",
|
||||
"saveSuccess": "额外文件夹路径已更新,需要重启才能生效。",
|
||||
"saveError": "更新额外文件夹路径失败:{message}",
|
||||
"validation": {
|
||||
"duplicatePath": "此路径已配置"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "跳过所选模型的元数据刷新",
|
||||
"resumeMetadataRefresh": "恢复所选模型的元数据刷新",
|
||||
"deleteAll": "删除选中模型",
|
||||
"downloadMissingLoras": "下载缺失的 LoRAs",
|
||||
"clear": "清除选择",
|
||||
"skipMetadataRefreshCount": "跳过({count} 个模型)",
|
||||
"resumeMetadataRefreshCount": "恢复({count} 个模型)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "根目录",
|
||||
"browseFolders": "浏览文件夹:",
|
||||
"downloadAndSaveRecipe": "下载并保存配方",
|
||||
"importRecipeOnly": "仅导入配方",
|
||||
"importAndDownload": "导入并下载",
|
||||
"downloadMissingLoras": "下载缺失的 LoRA",
|
||||
"saveRecipe": "保存配方",
|
||||
"loraCountInfo": "({existing}/{total} in library)",
|
||||
@@ -734,55 +799,55 @@
|
||||
"batchImport": {
|
||||
"title": "批量导入配方",
|
||||
"action": "批量导入",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"urlList": "URL 列表",
|
||||
"directory": "目录",
|
||||
"urlDescription": "输入图像 URL 或本地文件路径(每行一个)。每个都将作为配方导入。",
|
||||
"directoryDescription": "输入目录路径以导入该文件夹中的所有图片。",
|
||||
"urlsLabel": "图片 URL 或本地路径",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"urlsHint": "每行输入一个 URL 或路径",
|
||||
"directoryPath": "目录路径",
|
||||
"directoryPlaceholder": "/图片/文件夹/路径",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"browse": "浏览",
|
||||
"recursive": "包含子目录",
|
||||
"tagsOptional": "标签(可选,应用于所有配方)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"tagsPlaceholder": "输入以逗号分隔的标签",
|
||||
"tagsHint": "标签将被添加到所有导入的配方中",
|
||||
"skipNoMetadata": "跳过无元数据的图片",
|
||||
"skipNoMetadataHelp": "没有 LoRA 元数据的图片将自动跳过。",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"start": "开始导入",
|
||||
"startImport": "开始导入",
|
||||
"importing": "正在导入配方...",
|
||||
"progress": "进度",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"total": "总计",
|
||||
"success": "成功",
|
||||
"failed": "失败",
|
||||
"skipped": "跳过",
|
||||
"current": "当前",
|
||||
"currentItem": "当前",
|
||||
"preparing": "准备中...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancel": "取消",
|
||||
"cancelImport": "取消",
|
||||
"cancelled": "批量导入已取消",
|
||||
"completed": "导入完成",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedWithErrors": "导入完成但有错误",
|
||||
"completedSuccess": "成功导入 {count} 个配方",
|
||||
"successCount": "成功",
|
||||
"failedCount": "失败",
|
||||
"skippedCount": "跳过",
|
||||
"totalProcessed": "总计处理",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"viewDetails": "查看详情",
|
||||
"newImport": "新建导入",
|
||||
"manualPathEntry": "请手动输入目录路径。此浏览器中文件浏览器不可用。",
|
||||
"batchImportDirectorySelected": "已选择目录:{path}",
|
||||
"batchImportManualEntryRequired": "文件浏览器不可用。请手动输入目录路径。",
|
||||
"backToParent": "返回上级目录",
|
||||
"folders": "文件夹",
|
||||
"folderCount": "{count} 个文件夹",
|
||||
"imageFiles": "图像文件",
|
||||
"images": "图像",
|
||||
"imageCount": "{count} 个图像",
|
||||
"selectFolder": "选择此文件夹",
|
||||
"errors": {
|
||||
"enterUrls": "请至少输入一个 URL 或路径",
|
||||
"enterDirectory": "请输入目录路径",
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "移动到 {otherType} 文件夹"
|
||||
"moveToOtherTypeFolder": "移动到 {otherType} 文件夹",
|
||||
"sendToWorkflow": "发送到工作流"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "取消固定侧边栏",
|
||||
"switchToListView": "切换到列表视图",
|
||||
"switchToTreeView": "切换到树状视图",
|
||||
"recursiveOn": "搜索子文件夹",
|
||||
"recursiveOff": "仅搜索当前文件夹",
|
||||
"recursiveOn": "包含子文件夹",
|
||||
"recursiveOff": "仅当前文件夹",
|
||||
"recursiveUnavailable": "仅在树形视图中可使用递归搜索",
|
||||
"collapseAllDisabled": "列表视图下不可用",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "早期访问",
|
||||
"earlyAccessTooltip": "需要早期访问权限",
|
||||
"inLibrary": "已在库中",
|
||||
"downloaded": "已下载",
|
||||
"downloadedTooltip": "之前已下载,但当前不在你的库中。",
|
||||
"alreadyInLibrary": "已存在于库中",
|
||||
"autoOrganizedPath": "【已按路径模板自动整理】",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "更新基础模型",
|
||||
"cancel": "取消"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "下载缺失的 LoRAs",
|
||||
"message": "发现 {uniqueCount} 个独特的缺失 LoRAs(从选定配方中的 {totalCount} 个总数)。",
|
||||
"previewTitle": "要下载的 LoRAs:",
|
||||
"moreItems": "...还有 {count} 个",
|
||||
"note": "文件将使用默认路径模板下载。根据 LoRAs 的数量,这可能需要一些时间。",
|
||||
"downloadButton": "下载 {count} 个 LoRA(s)"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "本地示例图片",
|
||||
"message": "未找到此模型的本地示例图片。可选操作:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "在 Civitai 查看",
|
||||
"viewOnCivitaiText": "在 Civitai 查看",
|
||||
"viewCreatorProfile": "查看创作者主页",
|
||||
"openFileLocation": "打开文件位置"
|
||||
"openFileLocation": "打开文件位置",
|
||||
"sendToWorkflow": "发送到 ComfyUI",
|
||||
"sendToWorkflowText": "发送到 ComfyUI"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "文件位置已成功打开",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "路径已复制到剪贴板:{{path}}",
|
||||
"clipboardFallback": "路径:{{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "无法发送到 ComfyUI:没有可用的文件路径"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "版本",
|
||||
"fileName": "文件名",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "{count}天后"
|
||||
},
|
||||
"badges": {
|
||||
"current": "当前版本",
|
||||
"current": "已打开版本",
|
||||
"currentTooltip": "这是你用来打开此弹窗的版本",
|
||||
"inLibrary": "已在库中",
|
||||
"inLibraryTooltip": "此版本已存在于你的本地库中",
|
||||
"downloaded": "已下载",
|
||||
"downloadedTooltip": "此版本之前下载过,但当前不在你的本地库中",
|
||||
"newer": "较新的版本",
|
||||
"newerTooltip": "此版本比你本地的最新版本更新",
|
||||
"earlyAccess": "抢先体验",
|
||||
"ignored": "已忽略"
|
||||
"earlyAccessTooltip": "此版本当前需要 Civitai 抢先体验权限",
|
||||
"ignored": "已忽略",
|
||||
"ignoredTooltip": "此版本已关闭更新通知"
|
||||
},
|
||||
"actions": {
|
||||
"download": "下载",
|
||||
"downloadTooltip": "下载此版本",
|
||||
"downloadEarlyAccessTooltip": "从 Civitai 下载此抢先体验版本",
|
||||
"delete": "删除",
|
||||
"deleteTooltip": "删除此本地版本",
|
||||
"ignore": "忽略",
|
||||
"unignore": "取消忽略",
|
||||
"ignoreTooltip": "忽略此版本的更新通知",
|
||||
"unignoreTooltip": "恢复此版本的更新通知",
|
||||
"viewVersionOnCivitai": "在 Civitai 上查看版本",
|
||||
"earlyAccessTooltip": "需要购买抢先体验",
|
||||
"resumeModelUpdates": "继续跟踪该模型的更新",
|
||||
"ignoreModelUpdates": "忽略该模型的更新",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "配方已替换到工作流",
|
||||
"recipeFailedToSend": "发送配方到工作流失败",
|
||||
"noMatchingNodes": "当前工作流中没有兼容的节点",
|
||||
"noTargetNodeSelected": "未选择目标节点"
|
||||
"noTargetNodeSelected": "未选择目标节点",
|
||||
"modelUpdated": "模型已更新到工作流",
|
||||
"modelFailed": "更新模型节点失败"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "配方",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "请选择版本",
|
||||
"versionExists": "该版本已存在于你的库中",
|
||||
"downloadCompleted": "下载成功完成",
|
||||
"downloadSkippedByBaseModel": "由于基础模型 {baseModel} 已被排除,已跳过下载",
|
||||
"autoOrganizeSuccess": "自动整理已成功完成,共 {count} 个 {type}",
|
||||
"autoOrganizePartialSuccess": "自动整理完成:已移动 {success} 个,{failures} 个失败,共 {total} 个模型",
|
||||
"autoOrganizeFailed": "自动整理失败:{error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "配方名称更新成功",
|
||||
"tagsUpdated": "配方标签更新成功",
|
||||
"sourceUrlUpdated": "来源 URL 更新成功",
|
||||
"promptUpdated": "提示词更新成功",
|
||||
"negativePromptUpdated": "负面提示词更新成功",
|
||||
"promptEditorHint": "按 Enter 保存,Shift+Enter 换行",
|
||||
"noRecipeId": "无配方 ID",
|
||||
"sendToWorkflowFailed": "发送配方到工作流失败:{message}",
|
||||
"copyFailed": "复制配方语法出错:{message}",
|
||||
"noMissingLoras": "没有缺失的 LoRA 可下载",
|
||||
"missingLorasInfoFailed": "获取缺失 LoRA 信息失败",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "处理出错:{message}",
|
||||
"folderBrowserError": "加载文件夹浏览器出错:{message}",
|
||||
"recipeSaveFailed": "保存配方失败:{error}",
|
||||
"recipeSaved": "配方保存成功",
|
||||
"importFailed": "导入失败:{message}",
|
||||
"folderTreeFailed": "加载文件夹树失败",
|
||||
"folderTreeError": "加载文件夹树出错",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "启动批量导入失败:{message}",
|
||||
"batchImportCancelling": "正在取消批量导入...",
|
||||
"batchImportCancelFailed": "取消批量导入失败:{message}",
|
||||
"batchImportNoUrls": "请输入至少一个 URL 或文件路径",
|
||||
"batchImportNoDirectory": "请输入目录路径",
|
||||
"batchImportBrowseFailed": "浏览目录失败:{message}",
|
||||
"batchImportDirectorySelected": "已选择目录:{path}",
|
||||
"noRecipesSelected": "未选择任何配方",
|
||||
"noMissingLorasInSelection": "在选定的配方中未找到缺失的 LoRAs",
|
||||
"noLoraRootConfigured": "未配置 LoRA 根目录。请在设置中设置默认的 LoRA 根目录。"
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "未选中模型",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "保存基础模型映射失败:{message}",
|
||||
"downloadTemplatesUpdated": "下载路径模板已更新",
|
||||
"downloadTemplatesFailed": "保存下载路径模板失败:{message}",
|
||||
"recipesPathUpdated": "配方存储路径已更新",
|
||||
"recipesPathSaveFailed": "更新配方存储路径失败:{message}",
|
||||
"settingsUpdated": "设置已更新:{setting}",
|
||||
"compactModeToggled": "紧凑模式 {state}",
|
||||
"settingSaveFailed": "保存设置失败:{message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "系统诊断",
|
||||
"title": "医生",
|
||||
"buttonTitle": "运行诊断并尝试修复常见问题",
|
||||
"loading": "正在检查当前环境...",
|
||||
"footer": "如果修复后问题仍然存在,可以导出诊断包进一步排查。",
|
||||
"summary": {
|
||||
"idle": "检查设置、缓存健康状况和前后端 UI 版本是否一致。",
|
||||
"ok": "当前环境未发现活动问题。",
|
||||
"warning": "发现 {count} 个问题,大多数可以直接在这里处理。",
|
||||
"error": "发现 {count} 个需要尽快处理的问题。"
|
||||
},
|
||||
"status": {
|
||||
"ok": "健康",
|
||||
"warning": "需要关注",
|
||||
"error": "需要处理"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "重新检查",
|
||||
"exportBundle": "导出诊断包"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "加载诊断结果失败:{message}",
|
||||
"repairSuccess": "缓存重建完成。",
|
||||
"repairFailed": "缓存重建失败:{message}",
|
||||
"exportSuccess": "诊断包已导出。",
|
||||
"exportFailed": "导出诊断包失败:{message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "检测到应用更新",
|
||||
|
||||
@@ -263,7 +263,9 @@
|
||||
"videoSettings": "影片設定",
|
||||
"layoutSettings": "版面設定",
|
||||
"misc": "其他",
|
||||
"backup": "備份",
|
||||
"folderSettings": "預設根目錄",
|
||||
"recipeSettings": "配方",
|
||||
"extraFolderPaths": "額外資料夾路徑",
|
||||
"downloadPathTemplates": "下載路徑範本",
|
||||
"priorityTags": "優先標籤",
|
||||
@@ -291,7 +293,15 @@
|
||||
"blurNsfwContent": "模糊 NSFW 內容",
|
||||
"blurNsfwContentHelp": "模糊成熟(NSFW)內容預覽圖片",
|
||||
"showOnlySfw": "僅顯示 SFW 結果",
|
||||
"showOnlySfwHelp": "瀏覽和搜尋時過濾所有 NSFW 內容"
|
||||
"showOnlySfwHelp": "瀏覽和搜尋時過濾所有 NSFW 內容",
|
||||
"matureBlurThreshold": "成人內容模糊閾值",
|
||||
"matureBlurThresholdHelp": "設定當啟用 NSFW 模糊時,從哪個評級級別開始模糊過濾。",
|
||||
"matureBlurThresholdOptions": {
|
||||
"pg13": "PG13 及以上",
|
||||
"r": "R 及以上(預設)",
|
||||
"x": "X 及以上",
|
||||
"xxx": "僅 XXX"
|
||||
}
|
||||
},
|
||||
"videoSettings": {
|
||||
"autoplayOnHover": "滑鼠懸停自動播放影片",
|
||||
@@ -315,6 +325,54 @@
|
||||
"saveFailed": "無法儲存跳過路徑:{message}"
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"autoEnabled": "自動備份",
|
||||
"autoEnabledHelp": "每天建立一次本地快照,並依保留政策保留最新快照。",
|
||||
"retention": "保留數量",
|
||||
"retentionHelp": "在刪除舊快照之前,要保留多少自動快照。",
|
||||
"management": "備份管理",
|
||||
"managementHelp": "匯出目前的使用者狀態,或從備份封存中還原。",
|
||||
"scopeHelp": "備份你的設定、下載歷史與模型更新狀態。不包含模型檔案或可重建的快取。",
|
||||
"locationSummary": "目前備份位置",
|
||||
"openFolderButton": "開啟備份資料夾",
|
||||
"openFolderSuccess": "已開啟備份資料夾",
|
||||
"openFolderFailed": "無法開啟備份資料夾",
|
||||
"locationCopied": "備份路徑已複製到剪貼簿:{{path}}",
|
||||
"locationClipboardFallback": "備份路徑:{{path}}",
|
||||
"exportButton": "匯出備份",
|
||||
"exportSuccess": "備份匯出成功。",
|
||||
"exportFailed": "備份匯出失敗:{message}",
|
||||
"importButton": "匯入備份",
|
||||
"importConfirm": "要匯入此備份並覆寫本機使用者狀態嗎?",
|
||||
"importSuccess": "備份匯入成功。",
|
||||
"importFailed": "備份匯入失敗:{message}",
|
||||
"latestSnapshot": "最近快照",
|
||||
"latestAutoSnapshot": "最近自動快照",
|
||||
"snapshotCount": "已儲存快照",
|
||||
"noneAvailable": "目前還沒有快照"
|
||||
},
|
||||
"downloadSkipBaseModels": {
|
||||
"label": "跳過這些基礎模型的下載",
|
||||
"help": "適用於所有下載流程。這裡只能選擇受支援的基礎模型。",
|
||||
"searchPlaceholder": "篩選基礎模型...",
|
||||
"empty": "沒有符合目前搜尋條件的基礎模型。",
|
||||
"summary": {
|
||||
"none": "未選擇",
|
||||
"count": "已選擇 {count} 項"
|
||||
},
|
||||
"actions": {
|
||||
"edit": "編輯",
|
||||
"collapse": "收起",
|
||||
"clear": "清空"
|
||||
},
|
||||
"validation": {
|
||||
"saveFailed": "無法儲存已排除的基礎模型:{message}"
|
||||
}
|
||||
},
|
||||
"skipPreviouslyDownloadedModelVersions": {
|
||||
"label": "跳過已下載的模型版本",
|
||||
"help": "啟用後,如果下載歷史服務記錄顯示該版本已下載,LoRA Manager 將跳過下載該模型版本。適用於所有下載流程。"
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "顯示密度",
|
||||
"displayDensityOptions": {
|
||||
@@ -363,12 +421,16 @@
|
||||
"defaultUnetRootHelp": "設定下載、匯入和移動時的預設 Diffusion Model (UNET) 根目錄",
|
||||
"defaultEmbeddingRoot": "Embedding 根目錄",
|
||||
"defaultEmbeddingRootHelp": "設定下載、匯入和移動時的預設 Embedding 根目錄",
|
||||
"recipesPath": "配方儲存路徑",
|
||||
"recipesPathHelp": "已儲存配方的可選自訂目錄。留空則使用第一個 LoRA 根目錄下的 recipes 資料夾。",
|
||||
"recipesPathPlaceholder": "/path/to/recipes",
|
||||
"recipesPathMigrating": "正在遷移配方儲存...",
|
||||
"noDefault": "未設定預設"
|
||||
},
|
||||
"extraFolderPaths": {
|
||||
"title": "額外資料夾路徑",
|
||||
"help": "在 ComfyUI 的標準路徑之外新增額外的模型資料夾。這些路徑單獨儲存,並與預設資料夾一起掃描。",
|
||||
"description": "設定額外的資料夾以掃描模型。這些路徑是 LoRA Manager 特有的,將與 ComfyUI 的預設路徑合併。",
|
||||
"description": "LoRA Manager 專屬的額外模型根目錄。從 ComfyUI 標準資料夾之外的位置載入模型,特別適合管理大型模型庫,避免影響 ComfyUI 效能。",
|
||||
"restartRequired": "Requires restart to take effect",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA 路徑",
|
||||
"checkpoint": "Checkpoint 路徑",
|
||||
@@ -376,7 +438,7 @@
|
||||
"embedding": "Embedding 路徑"
|
||||
},
|
||||
"pathPlaceholder": "/額外/模型/路徑",
|
||||
"saveSuccess": "額外資料夾路徑已更新。",
|
||||
"saveSuccess": "額外資料夾路徑已更新,需要重啟才能生效。",
|
||||
"saveError": "更新額外資料夾路徑失敗:{message}",
|
||||
"validation": {
|
||||
"duplicatePath": "此路徑已設定"
|
||||
@@ -575,6 +637,7 @@
|
||||
"skipMetadataRefresh": "跳過所選模型的元數據更新",
|
||||
"resumeMetadataRefresh": "恢復所選模型的元數據更新",
|
||||
"deleteAll": "刪除全部模型",
|
||||
"downloadMissingLoras": "下載缺失的 LoRAs",
|
||||
"clear": "清除選取",
|
||||
"skipMetadataRefreshCount": "跳過({count} 個模型)",
|
||||
"resumeMetadataRefreshCount": "恢復({count} 個模型)",
|
||||
@@ -645,6 +708,8 @@
|
||||
"root": "根目錄",
|
||||
"browseFolders": "瀏覽資料夾:",
|
||||
"downloadAndSaveRecipe": "下載並儲存配方",
|
||||
"importRecipeOnly": "僅匯入配方",
|
||||
"importAndDownload": "匯入並下載",
|
||||
"downloadMissingLoras": "下載缺少的 LoRA",
|
||||
"saveRecipe": "儲存配方",
|
||||
"loraCountInfo": "(庫存 {existing}/{total})",
|
||||
@@ -732,61 +797,61 @@
|
||||
}
|
||||
},
|
||||
"batchImport": {
|
||||
"title": "[TODO: Translate] Batch Import Recipes",
|
||||
"action": "[TODO: Translate] Batch Import",
|
||||
"urlList": "[TODO: Translate] URL List",
|
||||
"directory": "[TODO: Translate] Directory",
|
||||
"urlDescription": "[TODO: Translate] Enter image URLs or local file paths (one per line). Each will be imported as a recipe.",
|
||||
"directoryDescription": "[TODO: Translate] Enter a directory path to import all images from that folder.",
|
||||
"urlsLabel": "[TODO: Translate] Image URLs or Local Paths",
|
||||
"urlsPlaceholder": "[TODO: Translate] https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "[TODO: Translate] Enter one URL or path per line",
|
||||
"directoryPath": "[TODO: Translate] Directory Path",
|
||||
"directoryPlaceholder": "[TODO: Translate] /path/to/images/folder",
|
||||
"browse": "[TODO: Translate] Browse",
|
||||
"recursive": "[TODO: Translate] Include subdirectories",
|
||||
"tagsOptional": "[TODO: Translate] Tags (optional, applied to all recipes)",
|
||||
"tagsPlaceholder": "[TODO: Translate] Enter tags separated by commas",
|
||||
"tagsHint": "[TODO: Translate] Tags will be added to all imported recipes",
|
||||
"skipNoMetadata": "[TODO: Translate] Skip images without metadata",
|
||||
"skipNoMetadataHelp": "[TODO: Translate] Images without LoRA metadata will be skipped automatically.",
|
||||
"start": "[TODO: Translate] Start Import",
|
||||
"startImport": "[TODO: Translate] Start Import",
|
||||
"importing": "[TODO: Translate] Importing...",
|
||||
"progress": "[TODO: Translate] Progress",
|
||||
"total": "[TODO: Translate] Total",
|
||||
"success": "[TODO: Translate] Success",
|
||||
"failed": "[TODO: Translate] Failed",
|
||||
"skipped": "[TODO: Translate] Skipped",
|
||||
"current": "[TODO: Translate] Current",
|
||||
"currentItem": "[TODO: Translate] Current",
|
||||
"preparing": "[TODO: Translate] Preparing...",
|
||||
"cancel": "[TODO: Translate] Cancel",
|
||||
"cancelImport": "[TODO: Translate] Cancel",
|
||||
"cancelled": "[TODO: Translate] Import cancelled",
|
||||
"completed": "[TODO: Translate] Import completed",
|
||||
"completedWithErrors": "[TODO: Translate] Completed with errors",
|
||||
"completedSuccess": "[TODO: Translate] Successfully imported {count} recipe(s)",
|
||||
"successCount": "[TODO: Translate] Successful",
|
||||
"failedCount": "[TODO: Translate] Failed",
|
||||
"skippedCount": "[TODO: Translate] Skipped",
|
||||
"totalProcessed": "[TODO: Translate] Total processed",
|
||||
"viewDetails": "[TODO: Translate] View Details",
|
||||
"newImport": "[TODO: Translate] New Import",
|
||||
"manualPathEntry": "[TODO: Translate] Please enter the directory path manually. File browser is not available in this browser.",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {name}. You may need to enter the full path manually.",
|
||||
"batchImportManualEntryRequired": "[TODO: Translate] File browser not available. Please enter the directory path manually.",
|
||||
"backToParent": "[TODO: Translate] Back to parent directory",
|
||||
"folders": "[TODO: Translate] Folders",
|
||||
"folderCount": "[TODO: Translate] {count} folders",
|
||||
"imageFiles": "[TODO: Translate] Image Files",
|
||||
"images": "[TODO: Translate] images",
|
||||
"imageCount": "[TODO: Translate] {count} images",
|
||||
"selectFolder": "[TODO: Translate] Select This Folder",
|
||||
"title": "批量匯入配方",
|
||||
"action": "批量匯入",
|
||||
"urlList": "URL 列表",
|
||||
"directory": "目錄",
|
||||
"urlDescription": "輸入圖像 URL 或本地檔案路徑(每行一個)。每個都將作為配方匯入。",
|
||||
"directoryDescription": "輸入目錄路徑以匯入該資料夾中的所有圖像。",
|
||||
"urlsLabel": "圖像 URL 或本地路徑",
|
||||
"urlsPlaceholder": "https://civitai.com/images/...\nhttps://civitai.com/images/...\nC:/path/to/image.png\n...",
|
||||
"urlsHint": "每行輸入一個 URL 或路徑",
|
||||
"directoryPath": "目錄路徑",
|
||||
"directoryPlaceholder": "/path/to/images/folder",
|
||||
"browse": "瀏覽",
|
||||
"recursive": "包含子目錄",
|
||||
"tagsOptional": "標籤(可選,應用於所有配方)",
|
||||
"tagsPlaceholder": "輸入以逗號分隔的標籤",
|
||||
"tagsHint": "標籤將被添加到所有匯入的配方中",
|
||||
"skipNoMetadata": "跳過無元資料的圖像",
|
||||
"skipNoMetadataHelp": "沒有 LoRA 元資料的圖像將被自動跳過。",
|
||||
"start": "開始匯入",
|
||||
"startImport": "開始匯入",
|
||||
"importing": "匯入中...",
|
||||
"progress": "進度",
|
||||
"total": "總計",
|
||||
"success": "成功",
|
||||
"failed": "失敗",
|
||||
"skipped": "跳過",
|
||||
"current": "當前",
|
||||
"currentItem": "當前項目",
|
||||
"preparing": "準備中...",
|
||||
"cancel": "取消",
|
||||
"cancelImport": "取消匯入",
|
||||
"cancelled": "匯入已取消",
|
||||
"completed": "匯入完成",
|
||||
"completedWithErrors": "匯入完成但有錯誤",
|
||||
"completedSuccess": "成功匯入 {count} 個配方",
|
||||
"successCount": "成功",
|
||||
"failedCount": "失敗",
|
||||
"skippedCount": "跳過",
|
||||
"totalProcessed": "總計處理",
|
||||
"viewDetails": "查看詳情",
|
||||
"newImport": "新建匯入",
|
||||
"manualPathEntry": "請手動輸入目錄路徑。此瀏覽器中檔案瀏覽器不可用。",
|
||||
"batchImportDirectorySelected": "已選擇目錄:{path}",
|
||||
"batchImportManualEntryRequired": "檔案瀏覽器不可用。請手動輸入目錄路徑。",
|
||||
"backToParent": "返回上級目錄",
|
||||
"folders": "資料夾",
|
||||
"folderCount": "{count} 個資料夾",
|
||||
"imageFiles": "圖像檔案",
|
||||
"images": "圖像",
|
||||
"imageCount": "{count} 個圖像",
|
||||
"selectFolder": "選擇此資料夾",
|
||||
"errors": {
|
||||
"enterUrls": "[TODO: Translate] Please enter at least one URL or path",
|
||||
"enterDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"startFailed": "[TODO: Translate] Failed to start import: {message}"
|
||||
"enterUrls": "請輸入至少一個 URL 或路徑",
|
||||
"enterDirectory": "請輸入目錄路徑",
|
||||
"startFailed": "啟動匯入失敗:{message}"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -797,7 +862,8 @@
|
||||
"diffusion_model": "Diffusion Model"
|
||||
},
|
||||
"contextMenu": {
|
||||
"moveToOtherTypeFolder": "移動到 {otherType} 資料夾"
|
||||
"moveToOtherTypeFolder": "移動到 {otherType} 資料夾",
|
||||
"sendToWorkflow": "傳送到工作流"
|
||||
}
|
||||
},
|
||||
"embeddings": {
|
||||
@@ -810,8 +876,8 @@
|
||||
"unpinSidebar": "取消固定側邊欄",
|
||||
"switchToListView": "切換至列表檢視",
|
||||
"switchToTreeView": "切換到樹狀檢視",
|
||||
"recursiveOn": "搜尋子資料夾",
|
||||
"recursiveOff": "僅搜尋目前資料夾",
|
||||
"recursiveOn": "包含子資料夾",
|
||||
"recursiveOff": "僅目前資料夾",
|
||||
"recursiveUnavailable": "遞迴搜尋僅能在樹狀檢視中使用",
|
||||
"collapseAllDisabled": "列表檢視下不可用",
|
||||
"dragDrop": {
|
||||
@@ -891,6 +957,8 @@
|
||||
"earlyAccess": "早期存取",
|
||||
"earlyAccessTooltip": "需要早期存取",
|
||||
"inLibrary": "已在庫存",
|
||||
"downloaded": "已下載",
|
||||
"downloadedTooltip": "先前已下載,但目前不在你的庫中。",
|
||||
"alreadyInLibrary": "已在庫存",
|
||||
"autoOrganizedPath": "[依路徑範本自動整理]",
|
||||
"errors": {
|
||||
@@ -981,6 +1049,14 @@
|
||||
"save": "更新基礎模型",
|
||||
"cancel": "取消"
|
||||
},
|
||||
"bulkDownloadMissingLoras": {
|
||||
"title": "下載缺失的 LoRAs",
|
||||
"message": "發現 {uniqueCount} 個獨特的缺失 LoRAs(從選取食譜中的 {totalCount} 個總數)。",
|
||||
"previewTitle": "要下載的 LoRAs:",
|
||||
"moreItems": "...還有 {count} 個",
|
||||
"note": "檔案將使用預設路徑模板下載。根據 LoRAs 的數量,這可能需要一些時間。",
|
||||
"downloadButton": "下載 {count} 個 LoRA(s)"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "本機範例圖片",
|
||||
"message": "此模型未找到本機範例圖片。可選擇:",
|
||||
@@ -1032,7 +1108,9 @@
|
||||
"viewOnCivitai": "在 Civitai 查看",
|
||||
"viewOnCivitaiText": "在 Civitai 查看",
|
||||
"viewCreatorProfile": "查看創作者個人檔案",
|
||||
"openFileLocation": "開啟檔案位置"
|
||||
"openFileLocation": "開啟檔案位置",
|
||||
"sendToWorkflow": "傳送到 ComfyUI",
|
||||
"sendToWorkflowText": "傳送到 ComfyUI"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "檔案位置已成功開啟",
|
||||
@@ -1040,6 +1118,9 @@
|
||||
"copied": "路徑已複製到剪貼簿:{{path}}",
|
||||
"clipboardFallback": "路徑:{{path}}"
|
||||
},
|
||||
"sendToWorkflow": {
|
||||
"noFilePath": "無法傳送到 ComfyUI:沒有可用的檔案路徑"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "版本",
|
||||
"fileName": "檔案名稱",
|
||||
@@ -1147,17 +1228,30 @@
|
||||
"days": "{count}天後"
|
||||
},
|
||||
"badges": {
|
||||
"current": "目前版本",
|
||||
"current": "已開啟版本",
|
||||
"currentTooltip": "這是你用來開啟此彈窗的版本",
|
||||
"inLibrary": "已在庫中",
|
||||
"inLibraryTooltip": "此版本已存在於你的本地庫中",
|
||||
"downloaded": "已下載",
|
||||
"downloadedTooltip": "此版本之前下載過,但目前不在你的本地庫中",
|
||||
"newer": "較新版本",
|
||||
"newerTooltip": "此版本比你本地的最新版本更新",
|
||||
"earlyAccess": "搶先體驗",
|
||||
"ignored": "已忽略"
|
||||
"earlyAccessTooltip": "此版本目前需要 Civitai 搶先體驗權限",
|
||||
"ignored": "已忽略",
|
||||
"ignoredTooltip": "此版本已關閉更新通知"
|
||||
},
|
||||
"actions": {
|
||||
"download": "下載",
|
||||
"downloadTooltip": "下載此版本",
|
||||
"downloadEarlyAccessTooltip": "從 Civitai 下載此搶先體驗版本",
|
||||
"delete": "刪除",
|
||||
"deleteTooltip": "刪除此本地版本",
|
||||
"ignore": "忽略",
|
||||
"unignore": "取消忽略",
|
||||
"ignoreTooltip": "忽略此版本的更新通知",
|
||||
"unignoreTooltip": "恢復此版本的更新通知",
|
||||
"viewVersionOnCivitai": "在 Civitai 上查看版本",
|
||||
"earlyAccessTooltip": "需要購買搶先體驗",
|
||||
"resumeModelUpdates": "恢復追蹤此模型的更新",
|
||||
"ignoreModelUpdates": "忽略此模型的更新",
|
||||
@@ -1297,7 +1391,9 @@
|
||||
"recipeReplaced": "配方已取代於工作流",
|
||||
"recipeFailedToSend": "傳送配方到工作流失敗",
|
||||
"noMatchingNodes": "目前工作流程中沒有相容的節點",
|
||||
"noTargetNodeSelected": "未選擇目標節點"
|
||||
"noTargetNodeSelected": "未選擇目標節點",
|
||||
"modelUpdated": "模型已更新到工作流",
|
||||
"modelFailed": "更新模型節點失敗"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"recipe": "配方",
|
||||
@@ -1448,6 +1544,7 @@
|
||||
"pleaseSelectVersion": "請選擇一個版本",
|
||||
"versionExists": "此版本已存在於您的庫中",
|
||||
"downloadCompleted": "下載成功完成",
|
||||
"downloadSkippedByBaseModel": "由於基礎模型 {baseModel} 已被排除,已跳過下載",
|
||||
"autoOrganizeSuccess": "自動整理已成功完成,共 {count} 個 {type} 已整理",
|
||||
"autoOrganizePartialSuccess": "自動整理完成:已移動 {success} 個,{failures} 個失敗,共 {total} 個模型",
|
||||
"autoOrganizeFailed": "自動整理失敗:{error}",
|
||||
@@ -1467,7 +1564,11 @@
|
||||
"nameUpdated": "配方名稱已更新",
|
||||
"tagsUpdated": "配方標籤已更新",
|
||||
"sourceUrlUpdated": "來源網址已更新",
|
||||
"promptUpdated": "提示詞更新成功",
|
||||
"negativePromptUpdated": "負面提示詞更新成功",
|
||||
"promptEditorHint": "按 Enter 儲存,Shift+Enter 換行",
|
||||
"noRecipeId": "無配方 ID",
|
||||
"sendToWorkflowFailed": "傳送配方到工作流失敗:{message}",
|
||||
"copyFailed": "複製配方語法錯誤:{message}",
|
||||
"noMissingLoras": "無缺少的 LoRA 可下載",
|
||||
"missingLorasInfoFailed": "取得缺少 LoRA 資訊失敗",
|
||||
@@ -1495,16 +1596,20 @@
|
||||
"processingError": "處理錯誤:{message}",
|
||||
"folderBrowserError": "載入資料夾瀏覽器錯誤:{message}",
|
||||
"recipeSaveFailed": "儲存配方失敗:{error}",
|
||||
"recipeSaved": "配方儲存成功",
|
||||
"importFailed": "匯入失敗:{message}",
|
||||
"folderTreeFailed": "載入資料夾樹狀結構失敗",
|
||||
"folderTreeError": "載入資料夾樹狀結構錯誤",
|
||||
"batchImportFailed": "[TODO: Translate] Failed to start batch import: {message}",
|
||||
"batchImportCancelling": "[TODO: Translate] Cancelling batch import...",
|
||||
"batchImportCancelFailed": "[TODO: Translate] Failed to cancel batch import: {message}",
|
||||
"batchImportNoUrls": "[TODO: Translate] Please enter at least one URL or file path",
|
||||
"batchImportNoDirectory": "[TODO: Translate] Please enter a directory path",
|
||||
"batchImportBrowseFailed": "[TODO: Translate] Failed to browse directory: {message}",
|
||||
"batchImportDirectorySelected": "[TODO: Translate] Directory selected: {path}"
|
||||
"batchImportFailed": "啟動批量匯入失敗:{message}",
|
||||
"batchImportCancelling": "正在取消批量匯入...",
|
||||
"batchImportCancelFailed": "取消批量匯入失敗:{message}",
|
||||
"batchImportNoUrls": "請輸入至少一個 URL 或檔案路徑",
|
||||
"batchImportNoDirectory": "請輸入目錄路徑",
|
||||
"batchImportBrowseFailed": "瀏覽目錄失敗:{message}",
|
||||
"batchImportDirectorySelected": "已選擇目錄:{path}",
|
||||
"noRecipesSelected": "未選取任何食譜",
|
||||
"noMissingLorasInSelection": "在選取的食譜中未找到缺失的 LoRAs",
|
||||
"noLoraRootConfigured": "未配置 LoRA 根目錄。請在設定中設定預設的 LoRA 根目錄。"
|
||||
},
|
||||
"models": {
|
||||
"noModelsSelected": "未選擇模型",
|
||||
@@ -1571,6 +1676,8 @@
|
||||
"mappingSaveFailed": "儲存基礎模型對應失敗:{message}",
|
||||
"downloadTemplatesUpdated": "下載路徑範本已更新",
|
||||
"downloadTemplatesFailed": "儲存下載路徑範本失敗:{message}",
|
||||
"recipesPathUpdated": "配方儲存路徑已更新",
|
||||
"recipesPathSaveFailed": "更新配方儲存路徑失敗:{message}",
|
||||
"settingsUpdated": "設定已更新:{setting}",
|
||||
"compactModeToggled": "緊湊模式已{state}",
|
||||
"settingSaveFailed": "儲存設定失敗:{message}",
|
||||
@@ -1714,6 +1821,35 @@
|
||||
"moveFailed": "Failed to move item: {message}"
|
||||
}
|
||||
},
|
||||
"doctor": {
|
||||
"kicker": "系統診斷",
|
||||
"title": "醫生",
|
||||
"buttonTitle": "執行診斷與常見修復",
|
||||
"loading": "正在檢查環境...",
|
||||
"footer": "如果修復後問題仍然存在,請匯出診斷套件。",
|
||||
"summary": {
|
||||
"idle": "針對設定、快取完整性與 UI 一致性執行健康檢查。",
|
||||
"ok": "目前環境中未發現任何活動中的問題。",
|
||||
"warning": "找到 {count} 個問題。大多可以直接在此面板修復。",
|
||||
"error": "應先處理 {count} 個問題,應用程式才能完全正常。"
|
||||
},
|
||||
"status": {
|
||||
"ok": "健康",
|
||||
"warning": "需要注意",
|
||||
"error": "需要處理"
|
||||
},
|
||||
"actions": {
|
||||
"runAgain": "重新執行",
|
||||
"exportBundle": "匯出套件"
|
||||
},
|
||||
"toast": {
|
||||
"loadFailed": "載入診斷失敗:{message}",
|
||||
"repairSuccess": "快取重建完成。",
|
||||
"repairFailed": "快取重建失敗:{message}",
|
||||
"exportSuccess": "診斷套件已匯出。",
|
||||
"exportFailed": "匯出診斷套件失敗:{message}"
|
||||
}
|
||||
},
|
||||
"banners": {
|
||||
"versionMismatch": {
|
||||
"title": "偵測到應用程式更新",
|
||||
|
||||
3
package-lock.json
generated
3
package-lock.json
generated
@@ -114,7 +114,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
@@ -138,7 +137,6 @@
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
@@ -1613,7 +1611,6 @@
|
||||
"integrity": "sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"cssstyle": "^4.0.1",
|
||||
"data-urls": "^5.0.0",
|
||||
|
||||
191
py/config.py
191
py/config.py
@@ -25,6 +25,31 @@ standalone_mode = (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _resolve_valid_default_root(
|
||||
current: str, primary_paths: List[str], name: str
|
||||
) -> str:
|
||||
"""Return a valid default root from the current primary path set."""
|
||||
|
||||
valid_paths = [path for path in primary_paths if isinstance(path, str) and path.strip()]
|
||||
if not valid_paths:
|
||||
return ""
|
||||
|
||||
if current in valid_paths:
|
||||
return current
|
||||
|
||||
if current:
|
||||
logger.info(
|
||||
"Repaired stale %s from '%s' to '%s'",
|
||||
name,
|
||||
current,
|
||||
valid_paths[0],
|
||||
)
|
||||
else:
|
||||
logger.info("Auto-setting %s to '%s'", name, valid_paths[0])
|
||||
|
||||
return valid_paths[0]
|
||||
|
||||
|
||||
def _normalize_folder_paths_for_comparison(
|
||||
folder_paths: Mapping[str, Iterable[str]],
|
||||
) -> Dict[str, Set[str]]:
|
||||
@@ -109,6 +134,7 @@ class Config:
|
||||
self.extra_checkpoints_roots: List[str] = []
|
||||
self.extra_unet_roots: List[str] = []
|
||||
self.extra_embeddings_roots: List[str] = []
|
||||
self.recipes_path: str = ""
|
||||
# Scan symbolic links during initialization
|
||||
self._initialize_symlink_mappings()
|
||||
|
||||
@@ -197,25 +223,23 @@ class Config:
|
||||
"Failed to rename legacy 'default' library: %s", rename_error
|
||||
)
|
||||
|
||||
default_lora_root = comfy_library.get("default_lora_root", "")
|
||||
if not default_lora_root and len(self.loras_roots) == 1:
|
||||
default_lora_root = self.loras_roots[0]
|
||||
default_lora_root = _resolve_valid_default_root(
|
||||
comfy_library.get("default_lora_root", ""),
|
||||
list(self.loras_roots or []),
|
||||
"default_lora_root",
|
||||
)
|
||||
|
||||
default_checkpoint_root = comfy_library.get("default_checkpoint_root", "")
|
||||
if (
|
||||
not default_checkpoint_root
|
||||
and self.checkpoints_roots
|
||||
and len(self.checkpoints_roots) == 1
|
||||
):
|
||||
default_checkpoint_root = self.checkpoints_roots[0]
|
||||
default_checkpoint_root = _resolve_valid_default_root(
|
||||
comfy_library.get("default_checkpoint_root", ""),
|
||||
list(self.checkpoints_roots or []),
|
||||
"default_checkpoint_root",
|
||||
)
|
||||
|
||||
default_embedding_root = comfy_library.get("default_embedding_root", "")
|
||||
if (
|
||||
not default_embedding_root
|
||||
and self.embeddings_roots
|
||||
and len(self.embeddings_roots) == 1
|
||||
):
|
||||
default_embedding_root = self.embeddings_roots[0]
|
||||
default_embedding_root = _resolve_valid_default_root(
|
||||
comfy_library.get("default_embedding_root", ""),
|
||||
list(self.embeddings_roots or []),
|
||||
"default_embedding_root",
|
||||
)
|
||||
|
||||
metadata = dict(comfy_library.get("metadata", {}))
|
||||
metadata.setdefault("display_name", "ComfyUI")
|
||||
@@ -629,6 +653,8 @@ class Config:
|
||||
preview_roots.update(self._expand_preview_root(root))
|
||||
for root in self.extra_embeddings_roots or []:
|
||||
preview_roots.update(self._expand_preview_root(root))
|
||||
if self.recipes_path:
|
||||
preview_roots.update(self._expand_preview_root(self.recipes_path))
|
||||
|
||||
for target, link in self._path_mappings.items():
|
||||
preview_roots.update(self._expand_preview_root(target))
|
||||
@@ -705,6 +731,122 @@ class Config:
|
||||
|
||||
return unique_paths
|
||||
|
||||
@staticmethod
|
||||
def _normalize_path_for_comparison(
|
||||
path: str, *, resolve_realpath: bool = False
|
||||
) -> str:
|
||||
"""Normalize a path for equality checks across platforms."""
|
||||
candidate = os.path.realpath(path) if resolve_realpath else path
|
||||
return os.path.normcase(os.path.normpath(candidate)).replace(os.sep, "/")
|
||||
|
||||
def _filter_overlapping_extra_lora_paths(
|
||||
self,
|
||||
primary_paths: Iterable[str],
|
||||
extra_paths: Iterable[str],
|
||||
) -> List[str]:
|
||||
"""Drop extra LoRA paths that resolve to the same physical location as primary roots."""
|
||||
|
||||
primary_map = {
|
||||
self._normalize_path_for_comparison(path, resolve_realpath=True): path
|
||||
for path in primary_paths
|
||||
if isinstance(path, str) and path.strip() and os.path.exists(path)
|
||||
}
|
||||
primary_symlink_map = self._collect_first_level_symlink_targets(primary_paths)
|
||||
filtered: List[str] = []
|
||||
|
||||
for original_path in extra_paths:
|
||||
if not isinstance(original_path, str):
|
||||
continue
|
||||
|
||||
stripped = original_path.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
if not os.path.exists(stripped):
|
||||
continue
|
||||
|
||||
real_path = self._normalize_path_for_comparison(
|
||||
stripped,
|
||||
resolve_realpath=True,
|
||||
)
|
||||
normalized_path = os.path.normpath(stripped).replace(os.sep, "/")
|
||||
primary_path = primary_map.get(real_path)
|
||||
if primary_path:
|
||||
# Config loading should stay tolerant of existing invalid state and warn.
|
||||
logger.warning(
|
||||
"Detected the same LoRA folder in both ComfyUI model paths and "
|
||||
"LoRA Manager Extra Folder Paths. This can cause duplicate items or "
|
||||
"other unexpected behavior, and it usually means the path setup is "
|
||||
"not doing what you intended. LoRA Manager will keep the ComfyUI "
|
||||
"path and ignore this Extra Folder Paths entry: '%s'. Please review "
|
||||
"your path settings and remove the duplicate entry.",
|
||||
normalized_path,
|
||||
)
|
||||
continue
|
||||
|
||||
symlink_path = primary_symlink_map.get(real_path)
|
||||
if symlink_path:
|
||||
# Config loading should stay tolerant of existing invalid state and warn.
|
||||
logger.warning(
|
||||
"Detected the same LoRA folder in both ComfyUI model paths and "
|
||||
"LoRA Manager Extra Folder Paths. This can cause duplicate items or "
|
||||
"other unexpected behavior, and it usually means the path setup is "
|
||||
"not doing what you intended. LoRA Manager will keep the ComfyUI "
|
||||
"path and ignore this Extra Folder Paths entry: '%s'. Please review "
|
||||
"your path settings and remove the duplicate entry.",
|
||||
normalized_path,
|
||||
)
|
||||
continue
|
||||
|
||||
filtered.append(stripped)
|
||||
|
||||
return filtered
|
||||
|
||||
def _collect_first_level_symlink_targets(
|
||||
self, roots: Iterable[str]
|
||||
) -> Dict[str, str]:
|
||||
"""Return real-path -> link-path mappings for first-level symlinks under the given roots."""
|
||||
|
||||
targets: Dict[str, str] = {}
|
||||
for root in roots:
|
||||
if not isinstance(root, str):
|
||||
continue
|
||||
stripped_root = root.strip()
|
||||
if not stripped_root or not os.path.isdir(stripped_root):
|
||||
continue
|
||||
|
||||
try:
|
||||
with os.scandir(stripped_root) as iterator:
|
||||
for entry in iterator:
|
||||
try:
|
||||
if not self._entry_is_symlink(entry):
|
||||
continue
|
||||
target_path = os.path.realpath(entry.path)
|
||||
if not os.path.isdir(target_path):
|
||||
continue
|
||||
|
||||
normalized_target = self._normalize_path_for_comparison(
|
||||
target_path,
|
||||
resolve_realpath=True,
|
||||
)
|
||||
normalized_link = os.path.normpath(entry.path).replace(
|
||||
os.sep, "/"
|
||||
)
|
||||
targets.setdefault(normalized_target, normalized_link)
|
||||
except Exception as inner_exc:
|
||||
logger.debug(
|
||||
"Error collecting LoRA symlink target for %s: %s",
|
||||
entry.path,
|
||||
inner_exc,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug(
|
||||
"Error scanning first-level LoRA symlinks in %s: %s",
|
||||
stripped_root,
|
||||
exc,
|
||||
)
|
||||
|
||||
return targets
|
||||
|
||||
def _prepare_checkpoint_paths(
|
||||
self, checkpoint_paths: Iterable[str], unet_paths: Iterable[str]
|
||||
) -> Tuple[List[str], List[str], List[str]]:
|
||||
@@ -772,9 +914,11 @@ class Config:
|
||||
self,
|
||||
folder_paths: Mapping[str, Iterable[str]],
|
||||
extra_folder_paths: Optional[Mapping[str, Iterable[str]]] = None,
|
||||
recipes_path: str = "",
|
||||
) -> None:
|
||||
self._path_mappings.clear()
|
||||
self._preview_root_paths = set()
|
||||
self.recipes_path = recipes_path if isinstance(recipes_path, str) else ""
|
||||
|
||||
lora_paths = folder_paths.get("loras", []) or []
|
||||
checkpoint_paths = folder_paths.get("checkpoints", []) or []
|
||||
@@ -796,7 +940,11 @@ class Config:
|
||||
extra_unet_paths = extra_paths.get("unet", []) or []
|
||||
extra_embedding_paths = extra_paths.get("embeddings", []) or []
|
||||
|
||||
self.extra_loras_roots = self._prepare_lora_paths(extra_lora_paths)
|
||||
filtered_extra_lora_paths = self._filter_overlapping_extra_lora_paths(
|
||||
self.loras_roots,
|
||||
extra_lora_paths,
|
||||
)
|
||||
self.extra_loras_roots = self._prepare_lora_paths(filtered_extra_lora_paths)
|
||||
(
|
||||
_,
|
||||
self.extra_checkpoints_roots,
|
||||
@@ -1026,7 +1174,12 @@ class Config:
|
||||
if not isinstance(extra_folder_paths, Mapping):
|
||||
extra_folder_paths = None
|
||||
|
||||
self._apply_library_paths(folder_paths, extra_folder_paths)
|
||||
recipes_path = (
|
||||
str(library_config.get("recipes_path", ""))
|
||||
if isinstance(library_config, Mapping)
|
||||
else ""
|
||||
)
|
||||
self._apply_library_paths(folder_paths, extra_folder_paths, recipes_path)
|
||||
|
||||
logger.info(
|
||||
"Applied library settings with %d lora roots (%d extra), %d checkpoint roots (%d extra), and %d embedding roots (%d extra)",
|
||||
|
||||
@@ -222,6 +222,7 @@ class LoraManager:
|
||||
|
||||
# Register DownloadManager with ServiceRegistry
|
||||
await ServiceRegistry.get_download_manager()
|
||||
await ServiceRegistry.get_backup_service()
|
||||
|
||||
from .services.metadata_service import initialize_metadata_providers
|
||||
|
||||
|
||||
@@ -149,9 +149,12 @@ class MetadataHook:
|
||||
# Store the original _async_map_node_over_list function
|
||||
original_map_node_over_list = getattr(execution, map_node_func_name)
|
||||
|
||||
# Wrapped async function, compatible with both stable and nightly
|
||||
async def async_map_node_over_list_with_metadata(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None, *args, **kwargs):
|
||||
hidden_inputs = kwargs.get('hidden_inputs', None)
|
||||
# Wrapped async function - signature must exactly match _async_map_node_over_list
|
||||
async def async_map_node_over_list_with_metadata(
|
||||
prompt_id, unique_id, obj, input_data_all, func,
|
||||
allow_interrupt=False, execution_block_cb=None,
|
||||
pre_execute_cb=None, v3_data=None
|
||||
):
|
||||
# Only collect metadata when calling the main function of nodes
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
try:
|
||||
@@ -164,10 +167,10 @@ class MetadataHook:
|
||||
except Exception as e:
|
||||
logger.error(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
|
||||
# Call original function with all args/kwargs
|
||||
# Call original function with exact parameters
|
||||
results = await original_map_node_over_list(
|
||||
prompt_id, unique_id, obj, input_data_all, func,
|
||||
allow_interrupt, execution_block_cb, pre_execute_cb, *args, **kwargs
|
||||
allow_interrupt, execution_block_cb, pre_execute_cb, v3_data=v3_data
|
||||
)
|
||||
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
|
||||
@@ -595,6 +595,15 @@ class MetadataProcessor:
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
else:
|
||||
# Generic guider nodes often expose separate positive/negative inputs.
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "positive", max_depth=10)
|
||||
if not positive_node_id:
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "negative", max_depth=10)
|
||||
if not negative_node_id:
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", max_depth=10)
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES, IS_SAMPLER
|
||||
|
||||
@@ -427,6 +429,75 @@ class ImageSizeExtractor(NodeMetadataExtractor):
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class RgthreePowerLoraLoaderExtractor(NodeMetadataExtractor):
|
||||
"""Extract LoRA metadata from rgthree Power Lora Loader.
|
||||
|
||||
The node passes LoRAs as dynamic kwargs: LORA_1, LORA_2, ... each containing
|
||||
{'on': bool, 'lora': filename, 'strength': float, 'strengthTwo': float}.
|
||||
"""
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
active_loras = []
|
||||
for key, value in inputs.items():
|
||||
if not key.upper().startswith('LORA_'):
|
||||
continue
|
||||
if not isinstance(value, dict):
|
||||
continue
|
||||
if not value.get('on') or not value.get('lora'):
|
||||
continue
|
||||
lora_name = os.path.splitext(os.path.basename(value['lora']))[0]
|
||||
active_loras.append({
|
||||
"name": lora_name,
|
||||
"strength": round(float(value.get('strength', 1.0)), 2)
|
||||
})
|
||||
|
||||
if active_loras:
|
||||
metadata[LORAS][node_id] = {
|
||||
"lora_list": active_loras,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
|
||||
class TensorRTLoaderExtractor(NodeMetadataExtractor):
|
||||
"""Extract checkpoint metadata from TensorRT Loader.
|
||||
|
||||
extract() parses the engine filename from 'unet_name' as a best-effort
|
||||
fallback (strips profile suffix after '_$' and counter suffix).
|
||||
|
||||
update() checks if the output MODEL has attachments["source_model"]
|
||||
set by the node (NubeBuster fork) and overrides with the real name.
|
||||
Vanilla TRT doesn't set this — the filename parse stands.
|
||||
"""
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "unet_name" not in inputs:
|
||||
return
|
||||
unet_name = inputs.get("unet_name")
|
||||
# Strip path and extension, then drop the $_profile suffix
|
||||
model_name = os.path.splitext(os.path.basename(unet_name))[0]
|
||||
if "_$" in model_name:
|
||||
model_name = model_name[:model_name.index("_$")]
|
||||
# Strip counter suffix (e.g. _00001_) left by ComfyUI's save path
|
||||
model_name = re.sub(r'_\d+_?$', '', model_name)
|
||||
_store_checkpoint_metadata(metadata, node_id, model_name)
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
if not outputs or not isinstance(outputs, list) or len(outputs) == 0:
|
||||
return
|
||||
first_output = outputs[0]
|
||||
if not isinstance(first_output, tuple) or len(first_output) < 1:
|
||||
return
|
||||
model = first_output[0]
|
||||
# NubeBuster fork sets attachments["source_model"] on the ModelPatcher
|
||||
source_model = getattr(model, 'attachments', {}).get("source_model")
|
||||
if source_model:
|
||||
_store_checkpoint_metadata(metadata, node_id, source_model)
|
||||
|
||||
|
||||
class LoraLoaderManagerExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
@@ -577,8 +648,6 @@ class SamplerCustomAdvancedExtractor(BaseSamplerExtractor):
|
||||
# Extract latent dimensions
|
||||
BaseSamplerExtractor.extract_latent_dimensions(node_id, inputs, metadata)
|
||||
|
||||
import json
|
||||
|
||||
class CLIPTextEncodeFluxExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
@@ -715,8 +784,11 @@ NODE_EXTRACTORS = {
|
||||
"UnetLoaderGGUF": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
"LoraLoader": LoraLoaderExtractor,
|
||||
"LoraLoaderLM": LoraLoaderManagerExtractor,
|
||||
"RgthreePowerLoraLoader": RgthreePowerLoraLoaderExtractor,
|
||||
"TensorRTLoader": TensorRTLoaderExtractor,
|
||||
# Conditioning
|
||||
"CLIPTextEncode": CLIPTextEncodeExtractor,
|
||||
"CLIPTextEncodeAttentionBias": CLIPTextEncodeExtractor, # From https://github.com/silveroxides/ComfyUI_PromptAttention
|
||||
"PromptLM": CLIPTextEncodeExtractor,
|
||||
"CLIPTextEncodeFlux": CLIPTextEncodeFluxExtractor, # Add CLIPTextEncodeFlux
|
||||
"WAS_Text_to_Conditioning": CLIPTextEncodeExtractor,
|
||||
|
||||
@@ -4,15 +4,21 @@ from typing import Awaitable, Callable, Dict, List
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
# Use wildcard for CivitAI to support their CDN subdomains (e.g., image-b2.civitai.com)
|
||||
# Security note: This is acceptable because:
|
||||
# 1. CSP img-src only controls image/video loading, not script execution
|
||||
# 2. All *.civitai.com subdomains are controlled by Civitai
|
||||
# 3. Explicit domain list would require constant updates as Civitai adds CDN nodes
|
||||
REMOTE_MEDIA_SOURCES = (
|
||||
"https://image.civitai.com",
|
||||
"https://*.civitai.com",
|
||||
"https://img.genur.art",
|
||||
)
|
||||
|
||||
|
||||
@web.middleware
|
||||
async def relax_csp_for_remote_media(
|
||||
request: web.Request, handler: Callable[[web.Request], Awaitable[web.StreamResponse]]
|
||||
request: web.Request,
|
||||
handler: Callable[[web.Request], Awaitable[web.StreamResponse]],
|
||||
) -> web.StreamResponse:
|
||||
"""Allow LoRA Manager media previews to load from trusted remote domains.
|
||||
|
||||
@@ -43,7 +49,9 @@ async def relax_csp_for_remote_media(
|
||||
directive_order.append(name)
|
||||
directives[name] = values
|
||||
|
||||
def merge_sources(name: str, sources: List[str], defaults: List[str] | None = None) -> None:
|
||||
def merge_sources(
|
||||
name: str, sources: List[str], defaults: List[str] | None = None
|
||||
) -> None:
|
||||
existing = directives.get(name, list(defaults or []))
|
||||
|
||||
for source in sources:
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
from typing import List, Tuple
|
||||
import comfy.sd
|
||||
import folder_paths
|
||||
import comfy.sd # type: ignore
|
||||
import folder_paths # type: ignore
|
||||
from ..utils.utils import get_checkpoint_info_absolute, _format_model_name_for_comfyui
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -15,7 +14,7 @@ class CheckpointLoaderLM:
|
||||
extra folder paths, providing a unified interface for checkpoint loading.
|
||||
"""
|
||||
|
||||
NAME = "CheckpointLoaderLM"
|
||||
NAME = "Checkpoint Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
|
||||
@classmethod
|
||||
@@ -60,7 +59,7 @@ class CheckpointLoaderLM:
|
||||
if item.get("sub_type") == "checkpoint":
|
||||
file_path = item.get("file_path", "")
|
||||
if file_path:
|
||||
# Format as ComfyUI-style: "folder/model_name.ext"
|
||||
# Format using relative path with OS-native separator
|
||||
formatted_name = _format_model_name_for_comfyui(
|
||||
file_path, model_roots
|
||||
)
|
||||
@@ -94,7 +93,7 @@ class CheckpointLoaderLM:
|
||||
"""Load a checkpoint by name, supporting extra folder paths
|
||||
|
||||
Args:
|
||||
ckpt_name: The name of the checkpoint to load (format: "folder/model_name.ext")
|
||||
ckpt_name: The name of the checkpoint to load (relative path with extension)
|
||||
|
||||
Returns:
|
||||
Tuple of (MODEL, CLIP, VAE)
|
||||
@@ -108,10 +107,6 @@ class CheckpointLoaderLM:
|
||||
"Make sure the checkpoint is indexed and try again."
|
||||
)
|
||||
|
||||
# Check if it's a GGUF model
|
||||
if ckpt_path.endswith(".gguf"):
|
||||
return self._load_gguf_checkpoint(ckpt_path, ckpt_name)
|
||||
|
||||
# Load regular checkpoint using ComfyUI's API
|
||||
logger.info(f"Loading checkpoint from: {ckpt_path}")
|
||||
out = comfy.sd.load_checkpoint_guess_config(
|
||||
@@ -121,64 +116,3 @@ class CheckpointLoaderLM:
|
||||
embedding_directory=folder_paths.get_folder_paths("embeddings"),
|
||||
)
|
||||
return out[:3]
|
||||
|
||||
def _load_gguf_checkpoint(self, ckpt_path: str, ckpt_name: str) -> Tuple:
|
||||
"""Load a GGUF format checkpoint
|
||||
|
||||
Args:
|
||||
ckpt_path: Absolute path to the GGUF file
|
||||
ckpt_name: Name of the checkpoint for error messages
|
||||
|
||||
Returns:
|
||||
Tuple of (MODEL, CLIP, VAE) - CLIP and VAE may be None for GGUF
|
||||
"""
|
||||
try:
|
||||
# Try to import ComfyUI-GGUF modules
|
||||
from custom_nodes.ComfyUI_GGUF.loader import gguf_sd_loader
|
||||
from custom_nodes.ComfyUI_GGUF.ops import GGMLOps
|
||||
from custom_nodes.ComfyUI_GGUF.nodes import GGUFModelPatcher
|
||||
except ImportError:
|
||||
raise RuntimeError(
|
||||
f"Cannot load GGUF model '{ckpt_name}'. "
|
||||
"ComfyUI-GGUF is not installed. "
|
||||
"Please install ComfyUI-GGUF from https://github.com/city96/ComfyUI-GGUF "
|
||||
"to load GGUF format models."
|
||||
)
|
||||
|
||||
logger.info(f"Loading GGUF checkpoint from: {ckpt_path}")
|
||||
|
||||
try:
|
||||
# Load GGUF state dict
|
||||
sd, extra = gguf_sd_loader(ckpt_path)
|
||||
|
||||
# Prepare kwargs for metadata if supported
|
||||
kwargs = {}
|
||||
import inspect
|
||||
|
||||
valid_params = inspect.signature(
|
||||
comfy.sd.load_diffusion_model_state_dict
|
||||
).parameters
|
||||
if "metadata" in valid_params:
|
||||
kwargs["metadata"] = extra.get("metadata", {})
|
||||
|
||||
# Load the model
|
||||
model = comfy.sd.load_diffusion_model_state_dict(
|
||||
sd, model_options={"custom_operations": GGMLOps()}, **kwargs
|
||||
)
|
||||
|
||||
if model is None:
|
||||
raise RuntimeError(
|
||||
f"Could not detect model type for GGUF checkpoint: {ckpt_path}"
|
||||
)
|
||||
|
||||
# Wrap with GGUFModelPatcher
|
||||
model = GGUFModelPatcher.clone(model)
|
||||
|
||||
# GGUF checkpoints typically don't include CLIP/VAE
|
||||
return (model, None, None)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading GGUF checkpoint '{ckpt_name}': {e}")
|
||||
raise RuntimeError(
|
||||
f"Failed to load GGUF checkpoint '{ckpt_name}': {str(e)}"
|
||||
)
|
||||
|
||||
161
py/nodes/gguf_import_helper.py
Normal file
161
py/nodes/gguf_import_helper.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
Helper module to safely import ComfyUI-GGUF modules.
|
||||
|
||||
This module provides a robust way to import ComfyUI-GGUF functionality
|
||||
regardless of how ComfyUI loaded it.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import importlib.util
|
||||
import logging
|
||||
from typing import Optional, Tuple, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_gguf_path() -> str:
|
||||
"""Get the path to ComfyUI-GGUF based on this file's location.
|
||||
|
||||
Since ComfyUI-Lora-Manager and ComfyUI-GGUF are both in custom_nodes/,
|
||||
we can derive the GGUF path from our own location.
|
||||
"""
|
||||
# This file is at: custom_nodes/ComfyUI-Lora-Manager/py/nodes/gguf_import_helper.py
|
||||
# ComfyUI-GGUF is at: custom_nodes/ComfyUI-GGUF
|
||||
current_file = os.path.abspath(__file__)
|
||||
# Go up 4 levels: nodes -> py -> ComfyUI-Lora-Manager -> custom_nodes
|
||||
custom_nodes_dir = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(current_file)))
|
||||
)
|
||||
return os.path.join(custom_nodes_dir, "ComfyUI-GGUF")
|
||||
|
||||
|
||||
def _find_gguf_module() -> Optional[Any]:
|
||||
"""Find ComfyUI-GGUF module in sys.modules.
|
||||
|
||||
ComfyUI registers modules using the full path with dots replaced by _x_.
|
||||
"""
|
||||
gguf_path = _get_gguf_path()
|
||||
sys_module_name = gguf_path.replace(".", "_x_")
|
||||
|
||||
logger.debug(f"[GGUF Import] Looking for module '{sys_module_name}' in sys.modules")
|
||||
if sys_module_name in sys.modules:
|
||||
logger.info(f"[GGUF Import] Found module: '{sys_module_name}'")
|
||||
return sys.modules[sys_module_name]
|
||||
|
||||
logger.debug(f"[GGUF Import] Module not found: '{sys_module_name}'")
|
||||
return None
|
||||
|
||||
|
||||
def _load_gguf_modules_directly() -> Optional[Any]:
|
||||
"""Load ComfyUI-GGUF modules directly from file paths."""
|
||||
gguf_path = _get_gguf_path()
|
||||
|
||||
logger.info(f"[GGUF Import] Direct Load: Attempting to load from '{gguf_path}'")
|
||||
|
||||
if not os.path.exists(gguf_path):
|
||||
logger.warning(f"[GGUF Import] Path does not exist: {gguf_path}")
|
||||
return None
|
||||
|
||||
try:
|
||||
namespace = "ComfyUI_GGUF_Dynamic"
|
||||
init_path = os.path.join(gguf_path, "__init__.py")
|
||||
|
||||
if not os.path.exists(init_path):
|
||||
logger.warning(f"[GGUF Import] __init__.py not found at '{init_path}'")
|
||||
return None
|
||||
|
||||
logger.debug(f"[GGUF Import] Loading from '{init_path}'")
|
||||
spec = importlib.util.spec_from_file_location(namespace, init_path)
|
||||
if not spec or not spec.loader:
|
||||
logger.error(f"[GGUF Import] Failed to create spec for '{init_path}'")
|
||||
return None
|
||||
|
||||
package = importlib.util.module_from_spec(spec)
|
||||
package.__path__ = [gguf_path]
|
||||
sys.modules[namespace] = package
|
||||
spec.loader.exec_module(package)
|
||||
logger.debug(f"[GGUF Import] Loaded main package '{namespace}'")
|
||||
|
||||
# Load submodules
|
||||
loaded = []
|
||||
for submod_name in ["loader", "ops", "nodes"]:
|
||||
submod_path = os.path.join(gguf_path, f"{submod_name}.py")
|
||||
if os.path.exists(submod_path):
|
||||
submod_spec = importlib.util.spec_from_file_location(
|
||||
f"{namespace}.{submod_name}", submod_path
|
||||
)
|
||||
if submod_spec and submod_spec.loader:
|
||||
submod = importlib.util.module_from_spec(submod_spec)
|
||||
submod.__package__ = namespace
|
||||
sys.modules[f"{namespace}.{submod_name}"] = submod
|
||||
submod_spec.loader.exec_module(submod)
|
||||
setattr(package, submod_name, submod)
|
||||
loaded.append(submod_name)
|
||||
logger.debug(f"[GGUF Import] Loaded submodule '{submod_name}'")
|
||||
|
||||
logger.info(f"[GGUF Import] Direct Load success: {loaded}")
|
||||
return package
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[GGUF Import] Direct Load failed: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
|
||||
def get_gguf_modules() -> Tuple[Any, Any, Any]:
|
||||
"""Get ComfyUI-GGUF modules (loader, ops, nodes).
|
||||
|
||||
Returns:
|
||||
Tuple of (loader_module, ops_module, nodes_module)
|
||||
|
||||
Raises:
|
||||
RuntimeError: If ComfyUI-GGUF cannot be found or loaded.
|
||||
"""
|
||||
logger.debug("[GGUF Import] Starting module search...")
|
||||
|
||||
# Try to find already loaded module first
|
||||
gguf_module = _find_gguf_module()
|
||||
|
||||
if gguf_module is None:
|
||||
logger.info("[GGUF Import] Not found in sys.modules, trying direct load...")
|
||||
gguf_module = _load_gguf_modules_directly()
|
||||
|
||||
if gguf_module is None:
|
||||
raise RuntimeError(
|
||||
"ComfyUI-GGUF is not installed. "
|
||||
"Please install from https://github.com/city96/ComfyUI-GGUF"
|
||||
)
|
||||
|
||||
# Extract submodules
|
||||
loader = getattr(gguf_module, "loader", None)
|
||||
ops = getattr(gguf_module, "ops", None)
|
||||
nodes = getattr(gguf_module, "nodes", None)
|
||||
|
||||
if loader is None or ops is None or nodes is None:
|
||||
missing = [
|
||||
name
|
||||
for name, mod in [("loader", loader), ("ops", ops), ("nodes", nodes)]
|
||||
if mod is None
|
||||
]
|
||||
raise RuntimeError(f"ComfyUI-GGUF missing submodules: {missing}")
|
||||
|
||||
logger.debug("[GGUF Import] All modules loaded successfully")
|
||||
return loader, ops, nodes
|
||||
|
||||
|
||||
def get_gguf_sd_loader():
|
||||
"""Get the gguf_sd_loader function from ComfyUI-GGUF."""
|
||||
loader, _, _ = get_gguf_modules()
|
||||
return getattr(loader, "gguf_sd_loader")
|
||||
|
||||
|
||||
def get_ggml_ops():
|
||||
"""Get the GGMLOps class from ComfyUI-GGUF."""
|
||||
_, ops, _ = get_gguf_modules()
|
||||
return getattr(ops, "GGMLOps")
|
||||
|
||||
|
||||
def get_gguf_model_patcher():
|
||||
"""Get the GGUFModelPatcher class from ComfyUI-GGUF."""
|
||||
_, _, nodes = get_gguf_modules()
|
||||
return getattr(nodes, "GGUFModelPatcher")
|
||||
@@ -8,6 +8,7 @@ and tracks the cycle progress which persists across workflow save/load.
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ..utils.utils import get_lora_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -54,8 +55,14 @@ class LoraCyclerLM:
|
||||
current_index = cycler_config.get("current_index", 1) # 1-based
|
||||
model_strength = float(cycler_config.get("model_strength", 1.0))
|
||||
clip_strength = float(cycler_config.get("clip_strength", 1.0))
|
||||
use_same_clip_strength = cycler_config.get("use_same_clip_strength", True)
|
||||
use_preset_strength = cycler_config.get("use_preset_strength", False)
|
||||
preset_strength_scale = float(cycler_config.get("preset_strength_scale", 1.0))
|
||||
sort_by = "filename"
|
||||
|
||||
# Include "no lora" option
|
||||
include_no_lora = cycler_config.get("include_no_lora", False)
|
||||
|
||||
# Dual-index mechanism for batch queue synchronization
|
||||
execution_index = cycler_config.get("execution_index") # Can be None
|
||||
# next_index_from_config = cycler_config.get("next_index") # Not used on backend
|
||||
@@ -71,7 +78,10 @@ class LoraCyclerLM:
|
||||
|
||||
total_count = len(lora_list)
|
||||
|
||||
if total_count == 0:
|
||||
# Calculate effective total count (includes no lora option if enabled)
|
||||
effective_total_count = total_count + 1 if include_no_lora else total_count
|
||||
|
||||
if total_count == 0 and not include_no_lora:
|
||||
logger.warning("[LoraCyclerLM] No LoRAs available in pool")
|
||||
return {
|
||||
"result": ([],),
|
||||
@@ -93,15 +103,31 @@ class LoraCyclerLM:
|
||||
else:
|
||||
actual_index = current_index
|
||||
|
||||
# Clamp index to valid range (1-based)
|
||||
clamped_index = max(1, min(actual_index, total_count))
|
||||
# Clamp index to valid range (1-based, includes no lora if enabled)
|
||||
clamped_index = max(1, min(actual_index, effective_total_count))
|
||||
|
||||
# Check if current index is the "no lora" option (last position when include_no_lora is True)
|
||||
is_no_lora = include_no_lora and clamped_index == effective_total_count
|
||||
|
||||
if is_no_lora:
|
||||
# "No LoRA" option - return empty stack
|
||||
lora_stack = []
|
||||
current_lora_name = "No LoRA"
|
||||
current_lora_filename = "No LoRA"
|
||||
else:
|
||||
# Get LoRA at current index (convert to 0-based for list access)
|
||||
current_lora = lora_list[clamped_index - 1]
|
||||
current_lora_name = current_lora["file_name"]
|
||||
current_lora_filename = current_lora["file_name"]
|
||||
|
||||
# Build LORA_STACK with single LoRA
|
||||
if current_lora["file_name"] == "None":
|
||||
lora_path = None
|
||||
else:
|
||||
lora_path, _ = get_lora_info(current_lora["file_name"])
|
||||
|
||||
if not lora_path:
|
||||
if current_lora["file_name"] != "None":
|
||||
logger.warning(
|
||||
f"[LoraCyclerLM] Could not find path for LoRA: {current_lora['file_name']}"
|
||||
)
|
||||
@@ -109,26 +135,67 @@ class LoraCyclerLM:
|
||||
else:
|
||||
# Normalize path separators
|
||||
lora_path = lora_path.replace("/", os.sep)
|
||||
|
||||
if use_preset_strength:
|
||||
lora_metadata = await lora_service.get_lora_metadata_by_filename(
|
||||
current_lora["file_name"]
|
||||
)
|
||||
if lora_metadata:
|
||||
recommended_strength = (
|
||||
lora_service.get_recommended_strength_from_lora_data(
|
||||
lora_metadata
|
||||
)
|
||||
)
|
||||
if recommended_strength is not None:
|
||||
model_strength = round(
|
||||
recommended_strength * preset_strength_scale, 2
|
||||
)
|
||||
|
||||
if use_same_clip_strength:
|
||||
clip_strength = model_strength
|
||||
else:
|
||||
recommended_clip_strength = (
|
||||
lora_service.get_recommended_clip_strength_from_lora_data(
|
||||
lora_metadata
|
||||
)
|
||||
)
|
||||
if recommended_clip_strength is not None:
|
||||
clip_strength = round(
|
||||
recommended_clip_strength * preset_strength_scale, 2
|
||||
)
|
||||
elif use_same_clip_strength:
|
||||
clip_strength = model_strength
|
||||
elif use_same_clip_strength:
|
||||
clip_strength = model_strength
|
||||
|
||||
lora_stack = [(lora_path, model_strength, clip_strength)]
|
||||
|
||||
# Calculate next index (wrap to 1 if at end)
|
||||
next_index = clamped_index + 1
|
||||
if next_index > total_count:
|
||||
if next_index > effective_total_count:
|
||||
next_index = 1
|
||||
|
||||
# Get next LoRA for UI display (what will be used next generation)
|
||||
is_next_no_lora = include_no_lora and next_index == effective_total_count
|
||||
if is_next_no_lora:
|
||||
next_display_name = "No LoRA"
|
||||
next_lora_filename = "No LoRA"
|
||||
else:
|
||||
next_lora = lora_list[next_index - 1]
|
||||
next_display_name = next_lora["file_name"]
|
||||
next_lora_filename = next_lora["file_name"]
|
||||
|
||||
return {
|
||||
"result": (lora_stack,),
|
||||
"ui": {
|
||||
"current_index": [clamped_index],
|
||||
"next_index": [next_index],
|
||||
"total_count": [total_count],
|
||||
"current_lora_name": [current_lora["file_name"]],
|
||||
"current_lora_filename": [current_lora["file_name"]],
|
||||
"total_count": [
|
||||
total_count
|
||||
], # Return actual LoRA count, not effective_total_count
|
||||
"current_lora_name": [current_lora_name],
|
||||
"current_lora_filename": [current_lora_filename],
|
||||
"next_lora_name": [next_display_name],
|
||||
"next_lora_filename": [next_lora["file_name"]],
|
||||
"next_lora_filename": [next_lora_filename],
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,12 +1,129 @@
|
||||
import importlib
|
||||
import logging
|
||||
import re
|
||||
import comfy.utils # type: ignore
|
||||
|
||||
import comfy.sd # type: ignore
|
||||
import comfy.utils # type: ignore
|
||||
|
||||
from ..utils.utils import get_lora_info_absolute
|
||||
from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list, nunchaku_load_lora
|
||||
from .utils import (
|
||||
FlexibleOptionalInputType,
|
||||
any_type,
|
||||
detect_nunchaku_model_kind,
|
||||
extract_lora_name,
|
||||
get_loras_list,
|
||||
nunchaku_load_lora,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_nunchaku_load_qwen_loras():
|
||||
try:
|
||||
module = importlib.import_module(".nunchaku_qwen", __package__)
|
||||
except ImportError as exc:
|
||||
raise RuntimeError(
|
||||
"Qwen-Image LoRA loading requires the ComfyUI runtime with its torch dependency available."
|
||||
) from exc
|
||||
return module.nunchaku_load_qwen_loras
|
||||
|
||||
|
||||
def _collect_stack_entries(lora_stack):
|
||||
entries = []
|
||||
if not lora_stack:
|
||||
return entries
|
||||
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
absolute_lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
entries.append({
|
||||
"name": lora_name,
|
||||
"absolute_path": absolute_lora_path,
|
||||
"input_path": lora_path,
|
||||
"model_strength": float(model_strength),
|
||||
"clip_strength": float(clip_strength),
|
||||
"trigger_words": trigger_words,
|
||||
})
|
||||
return entries
|
||||
|
||||
|
||||
def _collect_widget_entries(kwargs):
|
||||
entries = []
|
||||
for lora in get_loras_list(kwargs):
|
||||
if not lora.get("active", False):
|
||||
continue
|
||||
lora_name = lora["name"]
|
||||
model_strength = float(lora["strength"])
|
||||
clip_strength = float(lora.get("clipStrength", model_strength))
|
||||
lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
entries.append({
|
||||
"name": lora_name,
|
||||
"absolute_path": lora_path,
|
||||
"input_path": lora_path,
|
||||
"model_strength": model_strength,
|
||||
"clip_strength": clip_strength,
|
||||
"trigger_words": trigger_words,
|
||||
})
|
||||
return entries
|
||||
|
||||
|
||||
def _format_loaded_loras(loaded_loras):
|
||||
formatted_loras = []
|
||||
for item in loaded_loras:
|
||||
if item["include_clip_strength"]:
|
||||
formatted_loras.append(
|
||||
f"<lora:{item['name']}:{item['model_strength']}:{item['clip_strength']}>"
|
||||
)
|
||||
else:
|
||||
formatted_loras.append(f"<lora:{item['name']}:{item['model_strength']}>")
|
||||
return " ".join(formatted_loras)
|
||||
|
||||
|
||||
def _apply_entries(model, clip, lora_entries, nunchaku_model_kind):
|
||||
loaded_loras = []
|
||||
all_trigger_words = []
|
||||
|
||||
if nunchaku_model_kind == "qwen_image":
|
||||
nunchaku_load_qwen_loras = _get_nunchaku_load_qwen_loras()
|
||||
qwen_lora_configs = []
|
||||
for entry in lora_entries:
|
||||
qwen_lora_configs.append((entry["absolute_path"], entry["model_strength"]))
|
||||
loaded_loras.append({
|
||||
"name": entry["name"],
|
||||
"model_strength": entry["model_strength"],
|
||||
"clip_strength": entry["model_strength"],
|
||||
"include_clip_strength": False,
|
||||
})
|
||||
all_trigger_words.extend(entry["trigger_words"])
|
||||
if qwen_lora_configs:
|
||||
model = nunchaku_load_qwen_loras(model, qwen_lora_configs)
|
||||
return model, clip, loaded_loras, all_trigger_words
|
||||
|
||||
for entry in lora_entries:
|
||||
if nunchaku_model_kind == "flux":
|
||||
model = nunchaku_load_lora(model, entry["input_path"], entry["model_strength"])
|
||||
else:
|
||||
lora = comfy.utils.load_torch_file(entry["absolute_path"], safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(
|
||||
model,
|
||||
clip,
|
||||
lora,
|
||||
entry["model_strength"],
|
||||
entry["clip_strength"],
|
||||
)
|
||||
|
||||
include_clip_strength = nunchaku_model_kind is None and abs(entry["model_strength"] - entry["clip_strength"]) > 0.001
|
||||
loaded_loras.append({
|
||||
"name": entry["name"],
|
||||
"model_strength": entry["model_strength"],
|
||||
"clip_strength": entry["clip_strength"],
|
||||
"include_clip_strength": include_clip_strength,
|
||||
})
|
||||
all_trigger_words.extend(entry["trigger_words"])
|
||||
|
||||
return model, clip, loaded_loras, all_trigger_words
|
||||
|
||||
|
||||
class LoraLoaderLM:
|
||||
NAME = "Lora Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
@@ -16,7 +133,6 @@ class LoraLoaderLM:
|
||||
return {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
# "clip": ("CLIP",),
|
||||
"text": ("AUTOCOMPLETE_TEXT_LORAS", {
|
||||
"placeholder": "Search LoRAs to add...",
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
@@ -31,107 +147,23 @@ class LoraLoaderLM:
|
||||
|
||||
def load_loras(self, model, text, **kwargs):
|
||||
"""Loads multiple LoRAs based on the kwargs input and lora_stack."""
|
||||
loaded_loras = []
|
||||
all_trigger_words = []
|
||||
del text
|
||||
clip = kwargs.get("clip", None)
|
||||
lora_entries = _collect_stack_entries(kwargs.get("lora_stack", None))
|
||||
lora_entries.extend(_collect_widget_entries(kwargs))
|
||||
|
||||
clip = kwargs.get('clip', None)
|
||||
lora_stack = kwargs.get('lora_stack', None)
|
||||
|
||||
# Check if model is a Nunchaku Flux model - simplified approach
|
||||
is_nunchaku_model = False
|
||||
|
||||
try:
|
||||
model_wrapper = model.model.diffusion_model
|
||||
# Check if model is a Nunchaku Flux model using only class name
|
||||
if model_wrapper.__class__.__name__ == "ComfyFluxWrapper":
|
||||
is_nunchaku_model = True
|
||||
nunchaku_model_kind = detect_nunchaku_model_kind(model)
|
||||
if nunchaku_model_kind == "flux":
|
||||
logger.info("Detected Nunchaku Flux model")
|
||||
except (AttributeError, TypeError):
|
||||
# Not a model with the expected structure
|
||||
pass
|
||||
elif nunchaku_model_kind == "qwen_image":
|
||||
logger.info("Detected Nunchaku Qwen-Image model")
|
||||
|
||||
# First process lora_stack if available
|
||||
if lora_stack:
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
# Extract lora name and convert to absolute path
|
||||
# lora_stack stores relative paths, but load_torch_file needs absolute paths
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
absolute_lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# Use our custom function for Flux models
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged for Nunchaku models
|
||||
else:
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(absolute_lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
all_trigger_words.extend(trigger_words)
|
||||
# Add clip strength to output if different from model strength (except for Nunchaku models)
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Then process loras from kwargs with support for both old and new formats
|
||||
loras_list = get_loras_list(kwargs)
|
||||
for lora in loras_list:
|
||||
if not lora.get('active', False):
|
||||
continue
|
||||
|
||||
lora_name = lora['name']
|
||||
model_strength = float(lora['strength'])
|
||||
# Get clip strength - use model strength as default if not specified
|
||||
clip_strength = float(lora.get('clipStrength', model_strength))
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# For Nunchaku models, use our custom function
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged
|
||||
else:
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
# Include clip strength in output if different from model strength and not a Nunchaku model
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Add trigger words to collection
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
# use ',, ' to separate trigger words for group mode
|
||||
model, clip, loaded_loras, all_trigger_words = _apply_entries(model, clip, lora_entries, nunchaku_model_kind)
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
# Format loaded_loras with support for both formats
|
||||
formatted_loras = []
|
||||
for item in loaded_loras:
|
||||
parts = item.split(":")
|
||||
lora_name = parts[0]
|
||||
strength_parts = parts[1].strip().split(",")
|
||||
|
||||
if len(strength_parts) > 1:
|
||||
# Different model and clip strengths
|
||||
model_str = strength_parts[0].strip()
|
||||
clip_str = strength_parts[1].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}:{clip_str}>")
|
||||
else:
|
||||
# Same strength for both
|
||||
model_str = strength_parts[0].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}>")
|
||||
|
||||
formatted_loras_text = " ".join(formatted_loras)
|
||||
|
||||
formatted_loras_text = _format_loaded_loras(loaded_loras)
|
||||
return (model, clip, trigger_words_text, formatted_loras_text)
|
||||
|
||||
|
||||
class LoraTextLoaderLM:
|
||||
NAME = "LoRA Text Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
@@ -143,13 +175,13 @@ class LoraTextLoaderLM:
|
||||
"model": ("MODEL",),
|
||||
"lora_syntax": ("STRING", {
|
||||
"forceInput": True,
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation"
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
}),
|
||||
},
|
||||
"optional": {
|
||||
"clip": ("CLIP",),
|
||||
"lora_stack": ("LORA_STACK",),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MODEL", "CLIP", "STRING", "STRING")
|
||||
@@ -158,116 +190,40 @@ class LoraTextLoaderLM:
|
||||
|
||||
def parse_lora_syntax(self, text):
|
||||
"""Parse LoRA syntax from text input."""
|
||||
# Pattern to match <lora:name:strength> or <lora:name:model_strength:clip_strength>
|
||||
pattern = r'<lora:([^:>]+):([^:>]+)(?::([^:>]+))?>'
|
||||
pattern = r"<lora:([^:>]+):([^:>]+)(?::([^:>]+))?>"
|
||||
matches = re.findall(pattern, text, re.IGNORECASE)
|
||||
|
||||
loras = []
|
||||
for match in matches:
|
||||
lora_name = match[0]
|
||||
model_strength = float(match[1])
|
||||
clip_strength = float(match[2]) if match[2] else model_strength
|
||||
|
||||
loras.append({
|
||||
'name': lora_name,
|
||||
'model_strength': model_strength,
|
||||
'clip_strength': clip_strength
|
||||
"name": match[0],
|
||||
"model_strength": model_strength,
|
||||
"clip_strength": float(match[2]) if match[2] else model_strength,
|
||||
})
|
||||
|
||||
return loras
|
||||
|
||||
def load_loras_from_text(self, model, lora_syntax, clip=None, lora_stack=None):
|
||||
"""Load LoRAs based on text syntax input."""
|
||||
loaded_loras = []
|
||||
all_trigger_words = []
|
||||
lora_entries = _collect_stack_entries(lora_stack)
|
||||
for lora in self.parse_lora_syntax(lora_syntax):
|
||||
lora_path, trigger_words = get_lora_info_absolute(lora["name"])
|
||||
lora_entries.append({
|
||||
"name": lora["name"],
|
||||
"absolute_path": lora_path,
|
||||
"input_path": lora_path,
|
||||
"model_strength": lora["model_strength"],
|
||||
"clip_strength": lora["clip_strength"],
|
||||
"trigger_words": trigger_words,
|
||||
})
|
||||
|
||||
# Check if model is a Nunchaku Flux model - simplified approach
|
||||
is_nunchaku_model = False
|
||||
|
||||
try:
|
||||
model_wrapper = model.model.diffusion_model
|
||||
# Check if model is a Nunchaku Flux model using only class name
|
||||
if model_wrapper.__class__.__name__ == "ComfyFluxWrapper":
|
||||
is_nunchaku_model = True
|
||||
nunchaku_model_kind = detect_nunchaku_model_kind(model)
|
||||
if nunchaku_model_kind == "flux":
|
||||
logger.info("Detected Nunchaku Flux model")
|
||||
except (AttributeError, TypeError):
|
||||
# Not a model with the expected structure
|
||||
pass
|
||||
elif nunchaku_model_kind == "qwen_image":
|
||||
logger.info("Detected Nunchaku Qwen-Image model")
|
||||
|
||||
# First process lora_stack if available
|
||||
if lora_stack:
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
# Extract lora name and convert to absolute path
|
||||
# lora_stack stores relative paths, but load_torch_file needs absolute paths
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
absolute_lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# Use our custom function for Flux models
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged for Nunchaku models
|
||||
else:
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(absolute_lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
all_trigger_words.extend(trigger_words)
|
||||
# Add clip strength to output if different from model strength (except for Nunchaku models)
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Parse and process LoRAs from text syntax
|
||||
parsed_loras = self.parse_lora_syntax(lora_syntax)
|
||||
for lora in parsed_loras:
|
||||
lora_name = lora['name']
|
||||
model_strength = lora['model_strength']
|
||||
clip_strength = lora['clip_strength']
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# For Nunchaku models, use our custom function
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged
|
||||
else:
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
# Include clip strength in output if different from model strength and not a Nunchaku model
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Add trigger words to collection
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
# use ',, ' to separate trigger words for group mode
|
||||
model, clip, loaded_loras, all_trigger_words = _apply_entries(model, clip, lora_entries, nunchaku_model_kind)
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
# Format loaded_loras with support for both formats
|
||||
formatted_loras = []
|
||||
for item in loaded_loras:
|
||||
parts = item.split(":")
|
||||
lora_name = parts[0].strip()
|
||||
strength_parts = parts[1].strip().split(",")
|
||||
|
||||
if len(strength_parts) > 1:
|
||||
# Different model and clip strengths
|
||||
model_str = strength_parts[0].strip()
|
||||
clip_str = strength_parts[1].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}:{clip_str}>")
|
||||
else:
|
||||
# Same strength for both
|
||||
model_str = strength_parts[0].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}>")
|
||||
|
||||
formatted_loras_text = " ".join(formatted_loras)
|
||||
|
||||
formatted_loras_text = _format_loaded_loras(loaded_loras)
|
||||
return (model, clip, trigger_words_text, formatted_loras_text)
|
||||
@@ -82,6 +82,7 @@ class LoraPoolLM:
|
||||
"folders": {"include": [], "exclude": []},
|
||||
"favoritesOnly": False,
|
||||
"license": {"noCreditRequired": False, "allowSelling": False},
|
||||
"namePatterns": {"include": [], "exclude": [], "useRegex": False},
|
||||
},
|
||||
"preview": {"matchCount": 0, "lastUpdated": 0},
|
||||
}
|
||||
|
||||
@@ -7,10 +7,8 @@ and tracks the last used combination for reuse.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
import os
|
||||
from ..utils.utils import get_lora_info
|
||||
from .utils import extract_lora_name
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
26
py/nodes/lora_stack_combiner.py
Normal file
26
py/nodes/lora_stack_combiner.py
Normal file
@@ -0,0 +1,26 @@
|
||||
class LoraStackCombinerLM:
|
||||
NAME = "Lora Stack Combiner (LoraManager)"
|
||||
CATEGORY = "Lora Manager/stackers"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"lora_stack_a": ("LORA_STACK",),
|
||||
"lora_stack_b": ("LORA_STACK",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("LORA_STACK",)
|
||||
RETURN_NAMES = ("LORA_STACK",)
|
||||
FUNCTION = "combine_stacks"
|
||||
|
||||
def combine_stacks(self, lora_stack_a, lora_stack_b):
|
||||
combined_stack = []
|
||||
|
||||
if lora_stack_a:
|
||||
combined_stack.extend(lora_stack_a)
|
||||
if lora_stack_b:
|
||||
combined_stack.extend(lora_stack_b)
|
||||
|
||||
return (combined_stack,)
|
||||
570
py/nodes/nunchaku_qwen.py
Normal file
570
py/nodes/nunchaku_qwen.py
Normal file
@@ -0,0 +1,570 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""Qwen-Image LoRA support for Nunchaku models.
|
||||
|
||||
Portions of the LoRA mapping/application logic in this file are adapted from
|
||||
ComfyUI-QwenImageLoraLoader by GitHub user ussoewwin:
|
||||
https://github.com/ussoewwin/ComfyUI-QwenImageLoraLoader
|
||||
|
||||
The upstream project is licensed under Apache License 2.0.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import comfy.utils # type: ignore
|
||||
import folder_paths # type: ignore
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from safetensors import safe_open
|
||||
|
||||
from nunchaku.lora.flux.nunchaku_converter import (
|
||||
pack_lowrank_weight,
|
||||
unpack_lowrank_weight,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KEY_MAPPING = [
|
||||
(re.compile(r"^(layers)[._](\d+)[._]attention[._]to[._]([qkv])$"), r"\1.\2.attention.to_qkv", "qkv", lambda m: m.group(3).upper()),
|
||||
(re.compile(r"^(layers)[._](\d+)[._]feed_forward[._](w1|w3)$"), r"\1.\2.feed_forward.net.0.proj", "glu", lambda m: m.group(3)),
|
||||
(re.compile(r"^(layers)[._](\d+)[._]feed_forward[._]w2$"), r"\1.\2.feed_forward.net.2", "regular", None),
|
||||
(re.compile(r"^(layers)[._](\d+)[._](.*)$"), r"\1.\2.\3", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]attn[._]to[._]([qkv])$"), r"\1.\2.attn.to_qkv", "qkv", lambda m: m.group(3).upper()),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]attn[._](q|k|v)[._]proj$"), r"\1.\2.attn.to_qkv", "qkv", lambda m: m.group(3).upper()),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]attn[._]add[._](q|k|v)[._]proj$"), r"\1.\2.attn.add_qkv_proj", "add_qkv", lambda m: m.group(3).upper()),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]out[._]proj[._]context$"), r"\1.\2.attn.to_add_out", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]out[._]proj$"), r"\1.\2.attn.to_out.0", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]attn[._]to[._]out$"), r"\1.\2.attn.to_out.0", "regular", None),
|
||||
(re.compile(r"^(single_transformer_blocks)[._](\d+)[._]attn[._]to[._]([qkv])$"), r"\1.\2.attn.to_qkv", "qkv", lambda m: m.group(3).upper()),
|
||||
(re.compile(r"^(single_transformer_blocks)[._](\d+)[._]attn[._]to[._]out$"), r"\1.\2.attn.to_out", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]ff[._]net[._]0(?:[._]proj)?$"), r"\1.\2.mlp_fc1", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]ff[._]net[._]2$"), r"\1.\2.mlp_fc2", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]ff_context[._]net[._]0(?:[._]proj)?$"), r"\1.\2.mlp_context_fc1", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]ff_context[._]net[._]2$"), r"\1.\2.mlp_context_fc2", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._](img_mlp)[._](net)[._](0)[._](proj)$"), r"\1.\2.\3.\4.\5.\6", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._](img_mlp)[._](net)[._](2)$"), r"\1.\2.\3.\4.\5", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._](txt_mlp)[._](net)[._](0)[._](proj)$"), r"\1.\2.\3.\4.\5.\6", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._](txt_mlp)[._](net)[._](2)$"), r"\1.\2.\3.\4.\5", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._](img_mod)[._](1)$"), r"\1.\2.\3.\4", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._](txt_mod)[._](1)$"), r"\1.\2.\3.\4", "regular", None),
|
||||
(re.compile(r"^(single_transformer_blocks)[._](\d+)[._]proj[._]out$"), r"\1.\2.proj_out", "single_proj_out", None),
|
||||
(re.compile(r"^(single_transformer_blocks)[._](\d+)[._]proj[._]mlp$"), r"\1.\2.mlp_fc1", "regular", None),
|
||||
(re.compile(r"^(single_transformer_blocks)[._](\d+)[._]norm[._]linear$"), r"\1.\2.norm.linear", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]norm1[._]linear$"), r"\1.\2.norm1.linear", "regular", None),
|
||||
(re.compile(r"^(transformer_blocks)[._](\d+)[._]norm1_context[._]linear$"), r"\1.\2.norm1_context.linear", "regular", None),
|
||||
(re.compile(r"^(img_in)$"), r"\1", "regular", None),
|
||||
(re.compile(r"^(txt_in)$"), r"\1", "regular", None),
|
||||
(re.compile(r"^(proj_out)$"), r"\1", "regular", None),
|
||||
(re.compile(r"^(norm_out)[._](linear)$"), r"\1.\2", "regular", None),
|
||||
(re.compile(r"^(time_text_embed)[._](timestep_embedder)[._](linear_1)$"), r"\1.\2.\3", "regular", None),
|
||||
(re.compile(r"^(time_text_embed)[._](timestep_embedder)[._](linear_2)$"), r"\1.\2.\3", "regular", None),
|
||||
]
|
||||
|
||||
_RE_LORA_SUFFIX = re.compile(r"\.(?P<tag>lora(?:[._](?:A|B|down|up)))(?:\.[^.]+)*\.weight$")
|
||||
_RE_ALPHA_SUFFIX = re.compile(r"\.(?:alpha|lora_alpha)(?:\.[^.]+)*$")
|
||||
|
||||
|
||||
def _rename_layer_underscore_layer_name(old_name: str) -> str:
|
||||
rules = [
|
||||
(r"_(\d+)_attn_to_out_(\d+)", r".\1.attn.to_out.\2"),
|
||||
(r"_(\d+)_img_mlp_net_(\d+)_proj", r".\1.img_mlp.net.\2.proj"),
|
||||
(r"_(\d+)_txt_mlp_net_(\d+)_proj", r".\1.txt_mlp.net.\2.proj"),
|
||||
(r"_(\d+)_img_mlp_net_(\d+)", r".\1.img_mlp.net.\2"),
|
||||
(r"_(\d+)_txt_mlp_net_(\d+)", r".\1.txt_mlp.net.\2"),
|
||||
(r"_(\d+)_img_mod_(\d+)", r".\1.img_mod.\2"),
|
||||
(r"_(\d+)_txt_mod_(\d+)", r".\1.txt_mod.\2"),
|
||||
(r"_(\d+)_attn_", r".\1.attn."),
|
||||
]
|
||||
new_name = old_name
|
||||
for pattern, replacement in rules:
|
||||
new_name = re.sub(pattern, replacement, new_name)
|
||||
return new_name
|
||||
|
||||
|
||||
def _is_indexable_module(module):
|
||||
return isinstance(module, (nn.ModuleList, nn.Sequential, list, tuple))
|
||||
|
||||
|
||||
def _get_module_by_name(model: nn.Module, name: str) -> Optional[nn.Module]:
|
||||
if not name:
|
||||
return model
|
||||
module = model
|
||||
for part in name.split("."):
|
||||
if not part:
|
||||
continue
|
||||
if hasattr(module, part):
|
||||
module = getattr(module, part)
|
||||
elif part.isdigit() and _is_indexable_module(module):
|
||||
try:
|
||||
module = module[int(part)]
|
||||
except (IndexError, TypeError):
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
return module
|
||||
|
||||
|
||||
def _resolve_module_name(model: nn.Module, name: str) -> Tuple[str, Optional[nn.Module]]:
|
||||
module = _get_module_by_name(model, name)
|
||||
if module is not None:
|
||||
return name, module
|
||||
|
||||
replacements = [
|
||||
(".attn.to_out.0", ".attn.to_out"),
|
||||
(".attention.to_qkv", ".attention.qkv"),
|
||||
(".attention.to_out.0", ".attention.out"),
|
||||
(".feed_forward.net.0.proj", ".feed_forward.w13"),
|
||||
(".feed_forward.net.2", ".feed_forward.w2"),
|
||||
(".ff.net.0.proj", ".mlp_fc1"),
|
||||
(".ff.net.2", ".mlp_fc2"),
|
||||
(".ff_context.net.0.proj", ".mlp_context_fc1"),
|
||||
(".ff_context.net.2", ".mlp_context_fc2"),
|
||||
]
|
||||
for src, dst in replacements:
|
||||
if src in name:
|
||||
alt = name.replace(src, dst)
|
||||
module = _get_module_by_name(model, alt)
|
||||
if module is not None:
|
||||
return alt, module
|
||||
return name, None
|
||||
|
||||
|
||||
def _classify_and_map_key(key: str) -> Optional[Tuple[str, str, Optional[str], str]]:
|
||||
normalized = key
|
||||
if normalized.startswith("transformer."):
|
||||
normalized = normalized[len("transformer."):]
|
||||
if normalized.startswith("diffusion_model."):
|
||||
normalized = normalized[len("diffusion_model."):]
|
||||
if normalized.startswith("lora_unet_"):
|
||||
normalized = _rename_layer_underscore_layer_name(normalized[len("lora_unet_"):])
|
||||
|
||||
match = _RE_LORA_SUFFIX.search(normalized)
|
||||
if match:
|
||||
tag = match.group("tag")
|
||||
base = normalized[:match.start()]
|
||||
ab = "A" if ("lora_A" in tag or tag.endswith(".A") or "down" in tag) else "B"
|
||||
else:
|
||||
match = _RE_ALPHA_SUFFIX.search(normalized)
|
||||
if not match:
|
||||
return None
|
||||
base = normalized[:match.start()]
|
||||
ab = "alpha"
|
||||
|
||||
for pattern, template, group, comp_fn in KEY_MAPPING:
|
||||
key_match = pattern.match(base)
|
||||
if key_match:
|
||||
return group, key_match.expand(template), comp_fn(key_match) if comp_fn else None, ab
|
||||
return None
|
||||
|
||||
|
||||
def _detect_lora_format(lora_state_dict: Dict[str, torch.Tensor]) -> bool:
|
||||
standard_patterns = (
|
||||
".lora_up.",
|
||||
".lora_down.",
|
||||
".lora_A.",
|
||||
".lora_B.",
|
||||
".lora.up.",
|
||||
".lora.down.",
|
||||
".lora.A.",
|
||||
".lora.B.",
|
||||
)
|
||||
return any(pattern in key for key in lora_state_dict for pattern in standard_patterns)
|
||||
|
||||
|
||||
def _load_lora_state_dict(path_or_dict: Union[str, Path, Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
|
||||
if isinstance(path_or_dict, dict):
|
||||
return path_or_dict
|
||||
path = Path(path_or_dict)
|
||||
if path.suffix == ".safetensors":
|
||||
state_dict: Dict[str, torch.Tensor] = {}
|
||||
with safe_open(path, framework="pt", device="cpu") as handle:
|
||||
for key in handle.keys():
|
||||
state_dict[key] = handle.get_tensor(key)
|
||||
return state_dict
|
||||
return comfy.utils.load_torch_file(str(path), safe_load=True)
|
||||
|
||||
|
||||
def _fuse_glu_lora(glu_weights: Dict[str, torch.Tensor]) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
|
||||
if "w1_A" not in glu_weights or "w3_A" not in glu_weights:
|
||||
return None, None, None
|
||||
a_w1, b_w1 = glu_weights["w1_A"], glu_weights["w1_B"]
|
||||
a_w3, b_w3 = glu_weights["w3_A"], glu_weights["w3_B"]
|
||||
if a_w1.shape[1] != a_w3.shape[1]:
|
||||
return None, None, None
|
||||
a_fused = torch.cat([a_w1, a_w3], dim=0)
|
||||
out1, out3 = b_w1.shape[0], b_w3.shape[0]
|
||||
rank1, rank3 = b_w1.shape[1], b_w3.shape[1]
|
||||
b_fused = torch.zeros(out1 + out3, rank1 + rank3, dtype=b_w1.dtype, device=b_w1.device)
|
||||
b_fused[:out1, :rank1] = b_w1
|
||||
b_fused[out1:, rank1:] = b_w3
|
||||
return a_fused, b_fused, glu_weights.get("w1_alpha")
|
||||
|
||||
|
||||
def _fuse_qkv_lora(qkv_weights: Dict[str, torch.Tensor], model: Optional[nn.Module] = None, base_key: Optional[str] = None) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
|
||||
required_keys = ["Q_A", "Q_B", "K_A", "K_B", "V_A", "V_B"]
|
||||
if not all(key in qkv_weights for key in required_keys):
|
||||
return None, None, None
|
||||
a_q, a_k, a_v = qkv_weights["Q_A"], qkv_weights["K_A"], qkv_weights["V_A"]
|
||||
b_q, b_k, b_v = qkv_weights["Q_B"], qkv_weights["K_B"], qkv_weights["V_B"]
|
||||
if not (a_q.shape == a_k.shape == a_v.shape):
|
||||
return None, None, None
|
||||
if not (b_q.shape[1] == b_k.shape[1] == b_v.shape[1]):
|
||||
return None, None, None
|
||||
|
||||
out_features = None
|
||||
if model is not None and base_key is not None:
|
||||
_, module = _resolve_module_name(model, base_key)
|
||||
out_features = getattr(module, "out_features", None) if module is not None else None
|
||||
|
||||
alpha_fused = None
|
||||
alpha_q = qkv_weights.get("Q_alpha")
|
||||
alpha_k = qkv_weights.get("K_alpha")
|
||||
alpha_v = qkv_weights.get("V_alpha")
|
||||
if alpha_q is not None and alpha_k is not None and alpha_v is not None and alpha_q.item() == alpha_k.item() == alpha_v.item():
|
||||
alpha_fused = alpha_q
|
||||
|
||||
a_fused = torch.cat([a_q, a_k, a_v], dim=0)
|
||||
rank = b_q.shape[1]
|
||||
out_q, out_k, out_v = b_q.shape[0], b_k.shape[0], b_v.shape[0]
|
||||
total_out = out_features if out_features is not None else out_q + out_k + out_v
|
||||
b_fused = torch.zeros(total_out, 3 * rank, dtype=b_q.dtype, device=b_q.device)
|
||||
b_fused[:out_q, :rank] = b_q
|
||||
b_fused[out_q:out_q + out_k, rank:2 * rank] = b_k
|
||||
b_fused[out_q + out_k:out_q + out_k + out_v, 2 * rank:] = b_v
|
||||
return a_fused, b_fused, alpha_fused
|
||||
|
||||
|
||||
def _handle_proj_out_split(lora_dict: Dict[str, Dict[str, torch.Tensor]], base_key: str, model: nn.Module) -> Tuple[Dict[str, Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]], List[str]]:
|
||||
result: Dict[str, Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]] = {}
|
||||
consumed: List[str] = []
|
||||
match = re.search(r"single_transformer_blocks\.(\d+)", base_key)
|
||||
if not match or base_key not in lora_dict:
|
||||
return result, consumed
|
||||
block_idx = match.group(1)
|
||||
block = _get_module_by_name(model, f"single_transformer_blocks.{block_idx}")
|
||||
if block is None:
|
||||
return result, consumed
|
||||
a_full = lora_dict[base_key].get("A")
|
||||
b_full = lora_dict[base_key].get("B")
|
||||
alpha = lora_dict[base_key].get("alpha")
|
||||
attn_to_out = getattr(getattr(block, "attn", None), "to_out", None)
|
||||
mlp_fc2 = getattr(block, "mlp_fc2", None)
|
||||
if a_full is None or b_full is None or attn_to_out is None or mlp_fc2 is None:
|
||||
return result, consumed
|
||||
attn_in = getattr(attn_to_out, "in_features", None)
|
||||
mlp_in = getattr(mlp_fc2, "in_features", None)
|
||||
if attn_in is None or mlp_in is None or a_full.shape[1] != attn_in + mlp_in:
|
||||
return result, consumed
|
||||
result[f"single_transformer_blocks.{block_idx}.attn.to_out"] = (a_full[:, :attn_in], b_full.clone(), alpha)
|
||||
result[f"single_transformer_blocks.{block_idx}.mlp_fc2"] = (a_full[:, attn_in:], b_full.clone(), alpha)
|
||||
consumed.append(base_key)
|
||||
return result, consumed
|
||||
|
||||
|
||||
def _apply_lora_to_module(module: nn.Module, a_tensor: torch.Tensor, b_tensor: torch.Tensor, module_name: str, model: nn.Module) -> None:
|
||||
if not hasattr(module, "in_features") or not hasattr(module, "out_features"):
|
||||
raise ValueError(f"{module_name}: unsupported module without in/out features")
|
||||
if a_tensor.shape[1] != module.in_features or b_tensor.shape[0] != module.out_features:
|
||||
raise ValueError(f"{module_name}: LoRA shape mismatch")
|
||||
|
||||
if module.__class__.__name__ == "AWQW4A16Linear" and hasattr(module, "qweight"):
|
||||
if not hasattr(module, "_lora_original_forward"):
|
||||
module._lora_original_forward = module.forward
|
||||
if not hasattr(module, "_nunchaku_lora_bundle"):
|
||||
module._nunchaku_lora_bundle = []
|
||||
module._nunchaku_lora_bundle.append((a_tensor, b_tensor))
|
||||
|
||||
def _awq_lora_forward(x, *args, **kwargs):
|
||||
out = module._lora_original_forward(x, *args, **kwargs)
|
||||
x_flat = x.reshape(-1, module.in_features)
|
||||
for local_a, local_b in module._nunchaku_lora_bundle:
|
||||
local_a = local_a.to(device=out.device, dtype=out.dtype)
|
||||
local_b = local_b.to(device=out.device, dtype=out.dtype)
|
||||
lora_term = (x_flat @ local_a.transpose(0, 1)) @ local_b.transpose(0, 1)
|
||||
try:
|
||||
out = out + lora_term.reshape(out.shape)
|
||||
except Exception:
|
||||
pass
|
||||
return out
|
||||
|
||||
module.forward = _awq_lora_forward
|
||||
if not hasattr(model, "_lora_slots"):
|
||||
model._lora_slots = {}
|
||||
model._lora_slots[module_name] = {"type": "awq_w4a16"}
|
||||
return
|
||||
|
||||
if hasattr(module, "proj_down") and hasattr(module, "proj_up"):
|
||||
proj_down = unpack_lowrank_weight(module.proj_down.data, down=True)
|
||||
proj_up = unpack_lowrank_weight(module.proj_up.data, down=False)
|
||||
base_rank = proj_down.shape[0] if proj_down.shape[1] == module.in_features else proj_down.shape[1]
|
||||
if proj_down.shape[1] == module.in_features:
|
||||
updated_down = torch.cat([proj_down, a_tensor], dim=0)
|
||||
axis_down = 0
|
||||
else:
|
||||
updated_down = torch.cat([proj_down, a_tensor.T], dim=1)
|
||||
axis_down = 1
|
||||
updated_up = torch.cat([proj_up, b_tensor], dim=1)
|
||||
module.proj_down.data = pack_lowrank_weight(updated_down, down=True)
|
||||
module.proj_up.data = pack_lowrank_weight(updated_up, down=False)
|
||||
module.rank = base_rank + a_tensor.shape[0]
|
||||
if not hasattr(model, "_lora_slots"):
|
||||
model._lora_slots = {}
|
||||
model._lora_slots[module_name] = {
|
||||
"type": "nunchaku",
|
||||
"base_rank": base_rank,
|
||||
"axis_down": axis_down,
|
||||
}
|
||||
return
|
||||
|
||||
if isinstance(module, nn.Linear):
|
||||
if not hasattr(model, "_lora_slots"):
|
||||
model._lora_slots = {}
|
||||
if module_name not in model._lora_slots:
|
||||
model._lora_slots[module_name] = {
|
||||
"type": "linear",
|
||||
"original_weight": module.weight.detach().cpu().clone(),
|
||||
}
|
||||
module.weight.data.add_((b_tensor @ a_tensor).to(dtype=module.weight.dtype, device=module.weight.device))
|
||||
return
|
||||
|
||||
raise ValueError(f"{module_name}: unsupported module type {type(module)}")
|
||||
|
||||
|
||||
def reset_lora_v2(model: nn.Module) -> None:
|
||||
slots = getattr(model, "_lora_slots", None)
|
||||
if not slots:
|
||||
return
|
||||
for name, info in list(slots.items()):
|
||||
module = _get_module_by_name(model, name)
|
||||
if module is None:
|
||||
continue
|
||||
module_type = info.get("type", "nunchaku")
|
||||
if module_type == "nunchaku":
|
||||
base_rank = info["base_rank"]
|
||||
proj_down = unpack_lowrank_weight(module.proj_down.data, down=True)
|
||||
proj_up = unpack_lowrank_weight(module.proj_up.data, down=False)
|
||||
if info.get("axis_down", 0) == 0:
|
||||
proj_down = proj_down[:base_rank, :].clone()
|
||||
else:
|
||||
proj_down = proj_down[:, :base_rank].clone()
|
||||
proj_up = proj_up[:, :base_rank].clone()
|
||||
module.proj_down.data = pack_lowrank_weight(proj_down, down=True)
|
||||
module.proj_up.data = pack_lowrank_weight(proj_up, down=False)
|
||||
module.rank = base_rank
|
||||
elif module_type == "linear" and "original_weight" in info:
|
||||
module.weight.data.copy_(info["original_weight"].to(device=module.weight.device, dtype=module.weight.dtype))
|
||||
elif module_type == "awq_w4a16":
|
||||
if hasattr(module, "_lora_original_forward"):
|
||||
module.forward = module._lora_original_forward
|
||||
for attr in ("_lora_original_forward", "_nunchaku_lora_bundle"):
|
||||
if hasattr(module, attr):
|
||||
delattr(module, attr)
|
||||
model._lora_slots = {}
|
||||
|
||||
|
||||
def compose_loras_v2(model: nn.Module, lora_configs: List[Tuple[Union[str, Path, Dict[str, torch.Tensor]], float]], apply_awq_mod: bool = True) -> bool:
|
||||
del apply_awq_mod # retained for interface compatibility
|
||||
reset_lora_v2(model)
|
||||
aggregated_weights: Dict[str, List[Dict[str, object]]] = defaultdict(list)
|
||||
saw_supported_format = False
|
||||
unresolved_targets = 0
|
||||
|
||||
for index, (path_or_dict, strength) in enumerate(lora_configs):
|
||||
if abs(strength) < 1e-5:
|
||||
continue
|
||||
lora_name = str(path_or_dict) if not isinstance(path_or_dict, dict) else f"lora_{index}"
|
||||
lora_state_dict = _load_lora_state_dict(path_or_dict)
|
||||
if not lora_state_dict or not _detect_lora_format(lora_state_dict):
|
||||
logger.warning("Skipping unsupported Qwen LoRA: %s", lora_name)
|
||||
continue
|
||||
saw_supported_format = True
|
||||
|
||||
grouped_weights: Dict[str, Dict[str, torch.Tensor]] = defaultdict(dict)
|
||||
for key, value in lora_state_dict.items():
|
||||
parsed = _classify_and_map_key(key)
|
||||
if parsed is None:
|
||||
continue
|
||||
group, base_key, component, ab = parsed
|
||||
if component and ab:
|
||||
grouped_weights[base_key][f"{component}_{ab}"] = value
|
||||
else:
|
||||
grouped_weights[base_key][ab] = value
|
||||
|
||||
processed_groups: Dict[str, Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]] = {}
|
||||
handled: set[str] = set()
|
||||
for base_key, weights in grouped_weights.items():
|
||||
if base_key in handled:
|
||||
continue
|
||||
a_tensor = b_tensor = alpha = None
|
||||
if "qkv" in base_key or "add_qkv_proj" in base_key:
|
||||
a_tensor, b_tensor, alpha = _fuse_qkv_lora(weights, model=model, base_key=base_key)
|
||||
elif "w1_A" in weights or "w3_A" in weights:
|
||||
a_tensor, b_tensor, alpha = _fuse_glu_lora(weights)
|
||||
elif ".proj_out" in base_key and "single_transformer_blocks" in base_key:
|
||||
split_map, consumed = _handle_proj_out_split(grouped_weights, base_key, model)
|
||||
processed_groups.update(split_map)
|
||||
handled.update(consumed)
|
||||
continue
|
||||
else:
|
||||
a_tensor, b_tensor, alpha = weights.get("A"), weights.get("B"), weights.get("alpha")
|
||||
if a_tensor is not None and b_tensor is not None:
|
||||
processed_groups[base_key] = (a_tensor, b_tensor, alpha)
|
||||
|
||||
for module_name, (a_tensor, b_tensor, alpha) in processed_groups.items():
|
||||
aggregated_weights[module_name].append({
|
||||
"A": a_tensor,
|
||||
"B": b_tensor,
|
||||
"alpha": alpha,
|
||||
"strength": strength,
|
||||
})
|
||||
|
||||
for module_name, weight_list in aggregated_weights.items():
|
||||
resolved_name, module = _resolve_module_name(model, module_name)
|
||||
if module is None:
|
||||
logger.warning("Skipping unresolved Qwen LoRA target: %s", module_name)
|
||||
unresolved_targets += 1
|
||||
continue
|
||||
all_a = []
|
||||
all_b_scaled = []
|
||||
for item in weight_list:
|
||||
a_tensor = item["A"]
|
||||
b_tensor = item["B"]
|
||||
alpha = item["alpha"]
|
||||
strength = float(item["strength"])
|
||||
rank = a_tensor.shape[0]
|
||||
scale = strength * ((alpha / rank) if alpha is not None else 1.0)
|
||||
if module.__class__.__name__ == "AWQW4A16Linear" and hasattr(module, "qweight"):
|
||||
target_dtype = torch.float16
|
||||
target_device = module.qweight.device
|
||||
elif hasattr(module, "proj_down"):
|
||||
target_dtype = module.proj_down.dtype
|
||||
target_device = module.proj_down.device
|
||||
elif hasattr(module, "weight"):
|
||||
target_dtype = module.weight.dtype
|
||||
target_device = module.weight.device
|
||||
else:
|
||||
target_dtype = torch.float16
|
||||
target_device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
all_a.append(a_tensor.to(dtype=target_dtype, device=target_device))
|
||||
all_b_scaled.append((b_tensor * scale).to(dtype=target_dtype, device=target_device))
|
||||
if not all_a:
|
||||
continue
|
||||
_apply_lora_to_module(module, torch.cat(all_a, dim=0), torch.cat(all_b_scaled, dim=1), resolved_name, model)
|
||||
|
||||
slot_count = len(getattr(model, "_lora_slots", {}) or {})
|
||||
logger.info(
|
||||
"Qwen LoRA composition finished: requested=%d supported=%s applied_targets=%d unresolved=%d",
|
||||
len(lora_configs),
|
||||
saw_supported_format,
|
||||
slot_count,
|
||||
unresolved_targets,
|
||||
)
|
||||
return saw_supported_format
|
||||
|
||||
|
||||
class ComfyQwenImageWrapperLM(nn.Module):
|
||||
def __init__(self, model: nn.Module, config=None, apply_awq_mod: bool = True):
|
||||
super().__init__()
|
||||
self.model = model
|
||||
self.config = {} if config is None else config
|
||||
self.dtype = next(model.parameters()).dtype
|
||||
self.loras: List[Tuple[Union[str, Path, Dict[str, torch.Tensor]], float]] = []
|
||||
self._applied_loras: Optional[List[Tuple[Union[str, Path, Dict[str, torch.Tensor]], float]]] = None
|
||||
self.apply_awq_mod = apply_awq_mod
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
inner = object.__getattribute__(self, "_modules").get("model")
|
||||
except (AttributeError, KeyError):
|
||||
inner = None
|
||||
if inner is None:
|
||||
raise AttributeError(f"{type(self).__name__!s} has no attribute {name}")
|
||||
if name == "model":
|
||||
return inner
|
||||
return getattr(inner, name)
|
||||
|
||||
def process_img(self, *args, **kwargs):
|
||||
return self.model.process_img(*args, **kwargs)
|
||||
|
||||
def _ensure_composed(self):
|
||||
if self._applied_loras != self.loras or (not self.loras and getattr(self.model, "_lora_slots", None)):
|
||||
is_supported_format = compose_loras_v2(self.model, self.loras, apply_awq_mod=self.apply_awq_mod)
|
||||
self._applied_loras = self.loras.copy()
|
||||
has_slots = bool(getattr(self.model, "_lora_slots", None))
|
||||
if self.loras and is_supported_format and not has_slots:
|
||||
logger.warning("Qwen LoRA compose produced 0 target modules. Resetting and retrying once.")
|
||||
reset_lora_v2(self.model)
|
||||
compose_loras_v2(self.model, self.loras, apply_awq_mod=self.apply_awq_mod)
|
||||
has_slots = bool(getattr(self.model, "_lora_slots", None))
|
||||
logger.info("Qwen LoRA retry result: applied_targets=%d", len(getattr(self.model, "_lora_slots", {}) or {}))
|
||||
|
||||
offload_manager = getattr(self.model, "offload_manager", None)
|
||||
if offload_manager is not None:
|
||||
offload_settings = {
|
||||
"num_blocks_on_gpu": getattr(offload_manager, "num_blocks_on_gpu", 1),
|
||||
"use_pin_memory": getattr(offload_manager, "use_pin_memory", False),
|
||||
}
|
||||
logger.info(
|
||||
"Rebuilding Qwen offload manager after LoRA compose: num_blocks_on_gpu=%s use_pin_memory=%s",
|
||||
offload_settings["num_blocks_on_gpu"],
|
||||
offload_settings["use_pin_memory"],
|
||||
)
|
||||
self.model.set_offload(False)
|
||||
self.model.set_offload(True, **offload_settings)
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
self._ensure_composed()
|
||||
return self.model(*args, **kwargs)
|
||||
|
||||
|
||||
def _get_qwen_wrapper_and_transformer(model):
|
||||
model_wrapper = model.model.diffusion_model
|
||||
if hasattr(model_wrapper, "model") and hasattr(model_wrapper, "loras"):
|
||||
transformer = model_wrapper.model
|
||||
if transformer.__class__.__name__.endswith("NunchakuQwenImageTransformer2DModel"):
|
||||
return model_wrapper, transformer
|
||||
if model_wrapper.__class__.__name__.endswith("NunchakuQwenImageTransformer2DModel"):
|
||||
wrapped_model = ComfyQwenImageWrapperLM(model_wrapper, getattr(model_wrapper, "config", {}))
|
||||
model.model.diffusion_model = wrapped_model
|
||||
return wrapped_model, wrapped_model.model
|
||||
raise TypeError(f"This LoRA loader only works with Nunchaku Qwen Image models, but got {type(model_wrapper).__name__}.")
|
||||
|
||||
|
||||
def nunchaku_load_qwen_loras(model, lora_configs: List[Tuple[str, float]], apply_awq_mod: bool = True):
|
||||
model_wrapper, transformer = _get_qwen_wrapper_and_transformer(model)
|
||||
model_wrapper.apply_awq_mod = apply_awq_mod
|
||||
|
||||
saved_config = None
|
||||
if hasattr(model, "model") and hasattr(model.model, "model_config"):
|
||||
saved_config = model.model.model_config
|
||||
model.model.model_config = None
|
||||
|
||||
model_wrapper.model = None
|
||||
try:
|
||||
ret_model = copy.deepcopy(model)
|
||||
finally:
|
||||
if saved_config is not None:
|
||||
model.model.model_config = saved_config
|
||||
model_wrapper.model = transformer
|
||||
|
||||
ret_model_wrapper = ret_model.model.diffusion_model
|
||||
if saved_config is not None:
|
||||
ret_model.model.model_config = saved_config
|
||||
ret_model_wrapper.model = transformer
|
||||
ret_model_wrapper.apply_awq_mod = apply_awq_mod
|
||||
ret_model_wrapper.loras = list(getattr(model_wrapper, "loras", []))
|
||||
|
||||
for lora_name, lora_strength in lora_configs:
|
||||
lora_path = lora_name if os.path.isfile(lora_name) else folder_paths.get_full_path("loras", lora_name)
|
||||
if not lora_path or not os.path.isfile(lora_path):
|
||||
logger.warning("Skipping Qwen LoRA '%s' because it could not be found", lora_name)
|
||||
continue
|
||||
ret_model_wrapper.loras.append((lora_path, lora_strength))
|
||||
|
||||
return ret_model
|
||||
@@ -1,15 +1,38 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
import inspect
|
||||
|
||||
from ..services.wildcard_service import (
|
||||
contains_dynamic_syntax,
|
||||
get_wildcard_service,
|
||||
is_trigger_words_input,
|
||||
)
|
||||
|
||||
class _AllContainer:
|
||||
"""Container that accepts any key for dynamic input validation."""
|
||||
|
||||
def __contains__(self, item):
|
||||
return True
|
||||
class _PromptOptionalInputs:
|
||||
"""Lookup that preserves explicit optional inputs and dynamic trigger slots."""
|
||||
|
||||
def __getitem__(self, key):
|
||||
return ("STRING", {"forceInput": True})
|
||||
def __init__(self, explicit_inputs: dict[str, tuple[str, dict[str, Any]]]) -> None:
|
||||
self._explicit_inputs = explicit_inputs
|
||||
|
||||
def __contains__(self, item: object) -> bool:
|
||||
if not isinstance(item, str):
|
||||
return False
|
||||
return item in self._explicit_inputs or is_trigger_words_input(item)
|
||||
|
||||
def __getitem__(self, key: str) -> tuple[str, dict[str, Any]]:
|
||||
if key in self._explicit_inputs:
|
||||
return self._explicit_inputs[key]
|
||||
if is_trigger_words_input(key):
|
||||
return (
|
||||
"STRING",
|
||||
{
|
||||
"forceInput": True,
|
||||
"tooltip": "Trigger words to prepend. Connect to add more inputs.",
|
||||
},
|
||||
)
|
||||
raise KeyError(key)
|
||||
|
||||
|
||||
class PromptLM:
|
||||
@@ -20,12 +43,19 @@ class PromptLM:
|
||||
DESCRIPTION = (
|
||||
"Encodes a text prompt using a CLIP model into an embedding that can be used "
|
||||
"to guide the diffusion model towards generating specific images. "
|
||||
"Supports dynamic trigger words inputs."
|
||||
"Supports dynamic trigger words inputs and runtime wildcard expansion."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
dyn_inputs = {
|
||||
optional_inputs: dict[str, tuple[str, dict[str, Any]]] = {
|
||||
"seed": (
|
||||
"INT",
|
||||
{
|
||||
"forceInput": True,
|
||||
"tooltip": "Optional seed for wildcard generation. Leave unconnected for non-deterministic wildcard expansion.",
|
||||
},
|
||||
),
|
||||
"trigger_words1": (
|
||||
"STRING",
|
||||
{
|
||||
@@ -35,10 +65,9 @@ class PromptLM:
|
||||
),
|
||||
}
|
||||
|
||||
# Bypass validation for dynamic inputs during graph execution
|
||||
stack = inspect.stack()
|
||||
if len(stack) > 2 and stack[2].function == "get_input_info":
|
||||
dyn_inputs = _AllContainer()
|
||||
optional_inputs = _PromptOptionalInputs(optional_inputs) # type: ignore[assignment]
|
||||
|
||||
return {
|
||||
"required": {
|
||||
@@ -46,8 +75,8 @@ class PromptLM:
|
||||
"AUTOCOMPLETE_TEXT_PROMPT,STRING",
|
||||
{
|
||||
"widgetType": "AUTOCOMPLETE_TEXT_PROMPT",
|
||||
"placeholder": "Enter prompt... /char, /artist for quick tag search",
|
||||
"tooltip": "The text to be encoded.",
|
||||
"placeholder": "Enter prompt... /character, /artist, /wildcard for quick search",
|
||||
"tooltip": "The text to be encoded. Wildcard references inserted with /wildcard are expanded at runtime.",
|
||||
},
|
||||
),
|
||||
"clip": (
|
||||
@@ -55,7 +84,7 @@ class PromptLM:
|
||||
{"tooltip": "The CLIP model used for encoding the text."},
|
||||
),
|
||||
},
|
||||
"optional": dyn_inputs,
|
||||
"optional": optional_inputs,
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING", "STRING")
|
||||
@@ -65,18 +94,37 @@ class PromptLM:
|
||||
)
|
||||
FUNCTION = "encode"
|
||||
|
||||
def encode(self, text: str, clip: Any, **kwargs):
|
||||
# Collect all trigger words from dynamic inputs
|
||||
@classmethod
|
||||
def IS_CHANGED(
|
||||
cls,
|
||||
text: str,
|
||||
clip: Any | None = None,
|
||||
seed: int | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
del clip, kwargs
|
||||
if contains_dynamic_syntax(text) and seed is None:
|
||||
return float("NaN")
|
||||
return False
|
||||
|
||||
def encode(
|
||||
self,
|
||||
text: str,
|
||||
clip: Any,
|
||||
seed: int | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
expanded_text = get_wildcard_service().expand_text(text, seed=seed)
|
||||
|
||||
trigger_words = []
|
||||
for key, value in kwargs.items():
|
||||
if key.startswith("trigger_words") and value:
|
||||
if is_trigger_words_input(key) and value:
|
||||
trigger_words.append(value)
|
||||
|
||||
# Build final prompt
|
||||
if trigger_words:
|
||||
prompt = ", ".join(trigger_words + [text])
|
||||
prompt = ", ".join(trigger_words + [expanded_text])
|
||||
else:
|
||||
prompt = text
|
||||
prompt = expanded_text
|
||||
|
||||
from nodes import CLIPTextEncode # type: ignore
|
||||
|
||||
|
||||
@@ -72,6 +72,13 @@ class SaveImageLM:
|
||||
"tooltip": "Embeds the complete workflow data into the image metadata. Only works with PNG and WebP formats.",
|
||||
},
|
||||
),
|
||||
"save_with_metadata": (
|
||||
"BOOLEAN",
|
||||
{
|
||||
"default": True,
|
||||
"tooltip": "When enabled, embeds generation parameters into the saved image metadata. Disable to skip writing generation metadata.",
|
||||
},
|
||||
),
|
||||
"add_counter_to_filename": (
|
||||
"BOOLEAN",
|
||||
{
|
||||
@@ -350,6 +357,7 @@ class SaveImageLM:
|
||||
lossless_webp=True,
|
||||
quality=100,
|
||||
embed_workflow=False,
|
||||
save_with_metadata=True,
|
||||
add_counter_to_filename=True,
|
||||
):
|
||||
"""Save images with metadata"""
|
||||
@@ -421,7 +429,7 @@ class SaveImageLM:
|
||||
try:
|
||||
if file_format == "png":
|
||||
assert pnginfo is not None
|
||||
if metadata:
|
||||
if save_with_metadata and metadata:
|
||||
pnginfo.add_text("parameters", metadata)
|
||||
if embed_workflow and extra_pnginfo is not None:
|
||||
workflow_json = json.dumps(extra_pnginfo["workflow"])
|
||||
@@ -430,7 +438,7 @@ class SaveImageLM:
|
||||
img.save(file_path, format="PNG", **save_kwargs)
|
||||
elif file_format == "jpeg":
|
||||
# For JPEG, use piexif
|
||||
if metadata:
|
||||
if save_with_metadata and metadata:
|
||||
try:
|
||||
exif_dict = {
|
||||
"Exif": {
|
||||
@@ -448,7 +456,7 @@ class SaveImageLM:
|
||||
# For WebP, use piexif for metadata
|
||||
exif_dict = {}
|
||||
|
||||
if metadata:
|
||||
if save_with_metadata and metadata:
|
||||
exif_dict["Exif"] = {
|
||||
piexif.ExifIFD.UserComment: b"UNICODE\0"
|
||||
+ metadata.encode("utf-16be")
|
||||
@@ -489,6 +497,7 @@ class SaveImageLM:
|
||||
lossless_webp=True,
|
||||
quality=100,
|
||||
embed_workflow=False,
|
||||
save_with_metadata=True,
|
||||
add_counter_to_filename=True,
|
||||
):
|
||||
"""Process and save image with metadata"""
|
||||
@@ -516,7 +525,11 @@ class SaveImageLM:
|
||||
lossless_webp,
|
||||
quality,
|
||||
embed_workflow,
|
||||
save_with_metadata,
|
||||
add_counter_to_filename,
|
||||
)
|
||||
|
||||
return (images,)
|
||||
return {
|
||||
"result": (images,),
|
||||
"ui": {"images": results},
|
||||
}
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ..services.wildcard_service import contains_dynamic_syntax, get_wildcard_service
|
||||
|
||||
|
||||
class TextLM:
|
||||
"""A simple text node with autocomplete support."""
|
||||
|
||||
NAME = "Text (LoraManager)"
|
||||
CATEGORY = "Lora Manager/utils"
|
||||
DESCRIPTION = (
|
||||
"A simple text input node with autocomplete support for tags and styles."
|
||||
"A simple text input node with autocomplete support for tags, styles, and wildcard expansion."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -15,8 +20,17 @@ class TextLM:
|
||||
"AUTOCOMPLETE_TEXT_PROMPT,STRING",
|
||||
{
|
||||
"widgetType": "AUTOCOMPLETE_TEXT_PROMPT",
|
||||
"placeholder": "Enter text... /char, /artist for quick tag search",
|
||||
"tooltip": "The text output.",
|
||||
"placeholder": "Enter text... /character, /artist, /wildcard for quick search",
|
||||
"tooltip": "The text output. Wildcard references inserted with /wildcard are expanded at runtime.",
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional": {
|
||||
"seed": (
|
||||
"INT",
|
||||
{
|
||||
"forceInput": True,
|
||||
"tooltip": "Optional seed for wildcard generation. Leave unconnected for non-deterministic wildcard expansion.",
|
||||
},
|
||||
),
|
||||
},
|
||||
@@ -24,10 +38,14 @@ class TextLM:
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("STRING",)
|
||||
OUTPUT_TOOLTIPS = (
|
||||
"The text output.",
|
||||
)
|
||||
OUTPUT_TOOLTIPS = ("The text output.",)
|
||||
FUNCTION = "process"
|
||||
|
||||
def process(self, text: str):
|
||||
return (text,)
|
||||
@classmethod
|
||||
def IS_CHANGED(cls, text: str, seed: int | None = None):
|
||||
if contains_dynamic_syntax(text) and seed is None:
|
||||
return float("NaN")
|
||||
return False
|
||||
|
||||
def process(self, text: str, seed: int | None = None):
|
||||
return (get_wildcard_service().expand_text(text, seed=seed),)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
from typing import List, Tuple
|
||||
import torch
|
||||
import comfy.sd
|
||||
import comfy.sd # type: ignore
|
||||
from ..utils.utils import get_checkpoint_info_absolute, _format_model_name_for_comfyui
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -16,7 +15,7 @@ class UNETLoaderLM:
|
||||
Supports both regular diffusion models and GGUF format models.
|
||||
"""
|
||||
|
||||
NAME = "UNETLoaderLM"
|
||||
NAME = "Unet Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
|
||||
@classmethod
|
||||
@@ -61,7 +60,7 @@ class UNETLoaderLM:
|
||||
if item.get("sub_type") == "diffusion_model":
|
||||
file_path = item.get("file_path", "")
|
||||
if file_path:
|
||||
# Format as ComfyUI-style: "folder/model_name.ext"
|
||||
# Format using relative path with OS-native separator
|
||||
formatted_name = _format_model_name_for_comfyui(
|
||||
file_path, model_roots
|
||||
)
|
||||
@@ -95,12 +94,14 @@ class UNETLoaderLM:
|
||||
"""Load a diffusion model by name, supporting extra folder paths
|
||||
|
||||
Args:
|
||||
unet_name: The name of the diffusion model to load (format: "folder/model_name.ext")
|
||||
unet_name: The name of the diffusion model to load (relative path with extension)
|
||||
weight_dtype: The dtype to use for model weights
|
||||
|
||||
Returns:
|
||||
Tuple of (MODEL,)
|
||||
"""
|
||||
import torch
|
||||
|
||||
# Get absolute path from cache using ComfyUI-style name
|
||||
unet_path, metadata = get_checkpoint_info_absolute(unet_name)
|
||||
|
||||
@@ -143,18 +144,17 @@ class UNETLoaderLM:
|
||||
Returns:
|
||||
Tuple of (MODEL,)
|
||||
"""
|
||||
import torch
|
||||
from .gguf_import_helper import get_gguf_modules
|
||||
|
||||
# Get ComfyUI-GGUF modules using helper (handles various import scenarios)
|
||||
try:
|
||||
# Try to import ComfyUI-GGUF modules
|
||||
from custom_nodes.ComfyUI_GGUF.loader import gguf_sd_loader
|
||||
from custom_nodes.ComfyUI_GGUF.ops import GGMLOps
|
||||
from custom_nodes.ComfyUI_GGUF.nodes import GGUFModelPatcher
|
||||
except ImportError:
|
||||
raise RuntimeError(
|
||||
f"Cannot load GGUF model '{unet_name}'. "
|
||||
"ComfyUI-GGUF is not installed. "
|
||||
"Please install ComfyUI-GGUF from https://github.com/city96/ComfyUI-GGUF "
|
||||
"to load GGUF format models."
|
||||
)
|
||||
loader_module, ops_module, nodes_module = get_gguf_modules()
|
||||
gguf_sd_loader = getattr(loader_module, "gguf_sd_loader")
|
||||
GGMLOps = getattr(ops_module, "GGMLOps")
|
||||
GGUFModelPatcher = getattr(nodes_module, "GGUFModelPatcher")
|
||||
except RuntimeError as e:
|
||||
raise RuntimeError(f"Cannot load GGUF model '{unet_name}'. {str(e)}")
|
||||
|
||||
logger.info(f"Loading GGUF diffusion model from: {unet_path}")
|
||||
|
||||
|
||||
@@ -158,3 +158,24 @@ def nunchaku_load_lora(model, lora_name, lora_strength):
|
||||
ret_model.model.model_config.unet_config["in_channels"] = new_in_channels
|
||||
|
||||
return ret_model
|
||||
|
||||
|
||||
def detect_nunchaku_model_kind(model):
|
||||
"""Return the supported Nunchaku model kind for a Comfy model, if any."""
|
||||
try:
|
||||
model_wrapper = model.model.diffusion_model
|
||||
except (AttributeError, TypeError):
|
||||
return None
|
||||
|
||||
wrapper_name = model_wrapper.__class__.__name__
|
||||
if wrapper_name == "ComfyFluxWrapper":
|
||||
return "flux"
|
||||
|
||||
inner_model = getattr(model_wrapper, "model", None)
|
||||
inner_name = inner_model.__class__.__name__ if inner_model is not None else ""
|
||||
if wrapper_name.endswith("NunchakuQwenImageTransformer2DModel"):
|
||||
return "qwen_image"
|
||||
if inner_name.endswith("NunchakuQwenImageTransformer2DModel"):
|
||||
return "qwen_image"
|
||||
|
||||
return None
|
||||
|
||||
@@ -13,4 +13,5 @@ GEN_PARAM_KEYS = [
|
||||
'seed',
|
||||
'size',
|
||||
'clip_skip',
|
||||
'denoising_strength',
|
||||
]
|
||||
|
||||
@@ -7,6 +7,7 @@ from .parsers import (
|
||||
MetaFormatParser,
|
||||
AutomaticMetadataParser,
|
||||
CivitaiApiMetadataParser,
|
||||
SuiImageParamsParser,
|
||||
)
|
||||
from .base import RecipeMetadataParser
|
||||
|
||||
@@ -55,6 +56,13 @@ class RecipeParserFactory:
|
||||
# If JSON parsing fails, move on to other parsers
|
||||
pass
|
||||
|
||||
# Try SuiImageParamsParser for SuiImage metadata format
|
||||
try:
|
||||
if SuiImageParamsParser().is_metadata_matching(metadata_str):
|
||||
return SuiImageParamsParser()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Check other parsers that expect string input
|
||||
if RecipeFormatParser().is_metadata_matching(metadata_str):
|
||||
return RecipeFormatParser()
|
||||
|
||||
@@ -1,27 +1,33 @@
|
||||
from typing import Any, Dict, Optional
|
||||
import logging
|
||||
|
||||
from .constants import GEN_PARAM_KEYS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GenParamsMerger:
|
||||
"""Utility to merge generation parameters from multiple sources with priority."""
|
||||
|
||||
ALLOWED_KEYS = set(GEN_PARAM_KEYS)
|
||||
|
||||
BLACKLISTED_KEYS = {
|
||||
"id", "url", "userId", "username", "createdAt", "updatedAt", "hash", "meta",
|
||||
"draft", "extra", "width", "height", "process", "quantity", "workflow",
|
||||
"baseModel", "resources", "disablePoi", "aspectRatio", "Created Date",
|
||||
"experimental", "civitaiResources", "civitai_resources", "Civitai resources",
|
||||
"modelVersionId", "modelId", "hashes", "Model", "Model hash", "checkpoint_hash",
|
||||
"checkpoint", "checksum", "model_checksum"
|
||||
"checkpoint", "checksum", "model_checksum", "raw_metadata",
|
||||
}
|
||||
|
||||
NORMALIZATION_MAPPING = {
|
||||
# Civitai specific
|
||||
"cfg": "cfg_scale",
|
||||
"cfgScale": "cfg_scale",
|
||||
"clipSkip": "clip_skip",
|
||||
"negativePrompt": "negative_prompt",
|
||||
# Case variations
|
||||
"Sampler": "sampler",
|
||||
"sampler_name": "sampler",
|
||||
"scheduler": "sampler",
|
||||
"Steps": "steps",
|
||||
"Seed": "seed",
|
||||
"Size": "size",
|
||||
@@ -36,63 +42,40 @@ class GenParamsMerger:
|
||||
def merge(
|
||||
request_params: Optional[Dict[str, Any]] = None,
|
||||
civitai_meta: Optional[Dict[str, Any]] = None,
|
||||
embedded_metadata: Optional[Dict[str, Any]] = None
|
||||
embedded_metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Merge generation parameters from three sources.
|
||||
|
||||
Priority: request_params > civitai_meta > embedded_metadata
|
||||
|
||||
Args:
|
||||
request_params: Params provided directly in the import request
|
||||
civitai_meta: Params from Civitai Image API 'meta' field
|
||||
embedded_metadata: Params extracted from image EXIF/embedded metadata
|
||||
|
||||
Returns:
|
||||
Merged parameters dictionary
|
||||
"""
|
||||
result = {}
|
||||
result: Dict[str, Any] = {}
|
||||
|
||||
# 1. Start with embedded metadata (lowest priority)
|
||||
if embedded_metadata:
|
||||
# If it's a full recipe metadata, we use its gen_params
|
||||
if "gen_params" in embedded_metadata and isinstance(embedded_metadata["gen_params"], dict):
|
||||
if "gen_params" in embedded_metadata and isinstance(
|
||||
embedded_metadata["gen_params"], dict
|
||||
):
|
||||
GenParamsMerger._update_normalized(result, embedded_metadata["gen_params"])
|
||||
else:
|
||||
# Otherwise assume the dict itself contains gen_params
|
||||
GenParamsMerger._update_normalized(result, embedded_metadata)
|
||||
|
||||
# 2. Layer Civitai meta (medium priority)
|
||||
if civitai_meta:
|
||||
GenParamsMerger._update_normalized(result, civitai_meta)
|
||||
|
||||
# 3. Layer request params (highest priority)
|
||||
if request_params:
|
||||
GenParamsMerger._update_normalized(result, request_params)
|
||||
|
||||
# Filter out blacklisted keys and also the original camelCase keys if they were normalized
|
||||
final_result = {}
|
||||
for k, v in result.items():
|
||||
if k in GenParamsMerger.BLACKLISTED_KEYS:
|
||||
continue
|
||||
if k in GenParamsMerger.NORMALIZATION_MAPPING:
|
||||
continue
|
||||
final_result[k] = v
|
||||
|
||||
return final_result
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _update_normalized(target: Dict[str, Any], source: Dict[str, Any]) -> None:
|
||||
"""Update target dict with normalized keys from source."""
|
||||
for k, v in source.items():
|
||||
normalized_key = GenParamsMerger.NORMALIZATION_MAPPING.get(k, k)
|
||||
target[normalized_key] = v
|
||||
# Also keep the original key for now if it's not the same,
|
||||
# so we can filter at the end or avoid losing it if it wasn't supposed to be renamed?
|
||||
# Actually, if we rename it, we should probably NOT keep both in 'target'
|
||||
# because we want to filter them out at the end anyway.
|
||||
if normalized_key != k:
|
||||
# If we are overwriting an existing snake_case key with a camelCase one's value,
|
||||
# that's fine because of the priority order of calls to _update_normalized.
|
||||
pass
|
||||
target[k] = v
|
||||
"""Update target dict with normalized, persistence-safe keys from source."""
|
||||
for key, value in source.items():
|
||||
if key in GenParamsMerger.BLACKLISTED_KEYS:
|
||||
continue
|
||||
|
||||
normalized_key = GenParamsMerger.NORMALIZATION_MAPPING.get(key, key)
|
||||
if normalized_key not in GenParamsMerger.ALLOWED_KEYS:
|
||||
continue
|
||||
|
||||
target[normalized_key] = value
|
||||
|
||||
@@ -5,6 +5,7 @@ from .comfy import ComfyMetadataParser
|
||||
from .meta_format import MetaFormatParser
|
||||
from .automatic import AutomaticMetadataParser
|
||||
from .civitai_image import CivitaiApiMetadataParser
|
||||
from .sui_image_params import SuiImageParamsParser
|
||||
|
||||
__all__ = [
|
||||
'RecipeFormatParser',
|
||||
@@ -12,4 +13,5 @@ __all__ = [
|
||||
'MetaFormatParser',
|
||||
'AutomaticMetadataParser',
|
||||
'CivitaiApiMetadataParser',
|
||||
'SuiImageParamsParser',
|
||||
]
|
||||
|
||||
@@ -42,6 +42,7 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
"height",
|
||||
"Model",
|
||||
"Model hash",
|
||||
"modelVersionIds",
|
||||
)
|
||||
return any(key in payload for key in civitai_image_fields)
|
||||
|
||||
@@ -429,6 +430,65 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
# Process modelVersionIds from Civitai image API
|
||||
# These are model version IDs returned at root level when meta doesn't contain resources
|
||||
if "modelVersionIds" in metadata and isinstance(
|
||||
metadata["modelVersionIds"], list
|
||||
):
|
||||
for version_id in metadata["modelVersionIds"]:
|
||||
version_id_str = str(version_id)
|
||||
|
||||
# Skip if we've already added this LoRA by version ID
|
||||
if version_id_str in added_loras:
|
||||
continue
|
||||
|
||||
# Initialize lora entry with version ID
|
||||
lora_entry = {
|
||||
"id": version_id,
|
||||
"modelId": 0,
|
||||
"name": "Unknown LoRA",
|
||||
"version": "",
|
||||
"type": "lora",
|
||||
"weight": 1.0,
|
||||
"existsLocally": False,
|
||||
"thumbnailUrl": "/loras_static/images/no-preview.png",
|
||||
"baseModel": "",
|
||||
"size": 0,
|
||||
"downloadUrl": "",
|
||||
"isDeleted": False,
|
||||
}
|
||||
|
||||
# Fetch model info from Civitai
|
||||
if metadata_provider and version_id_str:
|
||||
try:
|
||||
civitai_info = (
|
||||
await metadata_provider.get_model_version_info(
|
||||
version_id_str
|
||||
)
|
||||
)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts,
|
||||
)
|
||||
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
lora_entry = populated_entry
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error fetching Civitai info for model version {version_id}: {e}"
|
||||
)
|
||||
|
||||
# Track this LoRA for deduplication
|
||||
if version_id_str:
|
||||
added_loras[version_id_str] = len(result["loras"])
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
# If we found LoRA hashes in the metadata but haven't already
|
||||
# populated entries for them, fall back to creating LoRAs from
|
||||
# the hashes section. Some Civitai image responses only include
|
||||
|
||||
188
py/recipes/parsers/sui_image_params.py
Normal file
188
py/recipes/parsers/sui_image_params.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""Parser for SuiImage (Stable Diffusion WebUI) metadata format."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any, Optional, List
|
||||
from ..base import RecipeMetadataParser
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SuiImageParamsParser(RecipeMetadataParser):
|
||||
"""Parser for SuiImage metadata JSON format.
|
||||
|
||||
This format is used by some Stable Diffusion WebUI variants.
|
||||
Structure:
|
||||
{
|
||||
"sui_image_params": {
|
||||
"prompt": "...",
|
||||
"negativeprompt": "...",
|
||||
"model": "...",
|
||||
"seed": ...,
|
||||
"steps": ...,
|
||||
...
|
||||
},
|
||||
"sui_models": [
|
||||
{"name": "...", "param": "model", "hash": "..."},
|
||||
...
|
||||
],
|
||||
"sui_extra_data": {...}
|
||||
}
|
||||
"""
|
||||
|
||||
def is_metadata_matching(self, user_comment: str) -> bool:
|
||||
"""Check if the user comment matches the SuiImage metadata format"""
|
||||
try:
|
||||
data = json.loads(user_comment)
|
||||
return isinstance(data, dict) and 'sui_image_params' in data
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return False
|
||||
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from SuiImage metadata format"""
|
||||
try:
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
data = json.loads(user_comment)
|
||||
params = data.get('sui_image_params', {})
|
||||
models = data.get('sui_models', [])
|
||||
|
||||
# Extract prompt and negative prompt
|
||||
prompt = params.get('prompt', '')
|
||||
negative_prompt = params.get('negativeprompt', '') or params.get('negative_prompt', '')
|
||||
|
||||
# Extract generation parameters
|
||||
gen_params = {}
|
||||
if prompt:
|
||||
gen_params['prompt'] = prompt
|
||||
if negative_prompt:
|
||||
gen_params['negative_prompt'] = negative_prompt
|
||||
|
||||
# Map standard parameters
|
||||
param_mapping = {
|
||||
'steps': 'steps',
|
||||
'seed': 'seed',
|
||||
'cfgscale': 'cfg_scale',
|
||||
'cfg_scale': 'cfg_scale',
|
||||
'width': 'width',
|
||||
'height': 'height',
|
||||
'sampler': 'sampler',
|
||||
'scheduler': 'scheduler',
|
||||
'model': 'model',
|
||||
'vae': 'vae',
|
||||
}
|
||||
|
||||
for src_key, dest_key in param_mapping.items():
|
||||
if src_key in params and params[src_key] is not None:
|
||||
gen_params[dest_key] = params[src_key]
|
||||
|
||||
# Add size info if available
|
||||
if 'width' in gen_params and 'height' in gen_params:
|
||||
gen_params['size'] = f"{gen_params['width']}x{gen_params['height']}"
|
||||
|
||||
# Process models - extract checkpoint and loras
|
||||
loras: List[Dict[str, Any]] = []
|
||||
checkpoint: Optional[Dict[str, Any]] = None
|
||||
|
||||
for model in models:
|
||||
model_name = model.get('name', '')
|
||||
param_type = model.get('param', '')
|
||||
model_hash = model.get('hash', '')
|
||||
|
||||
# Remove .safetensors extension for cleaner name
|
||||
clean_name = model_name.replace('.safetensors', '') if model_name else ''
|
||||
|
||||
# Check if this is a LoRA by looking at the name or param type
|
||||
is_lora = 'lora' in model_name.lower() or param_type.lower().startswith('lora')
|
||||
|
||||
if is_lora:
|
||||
lora_entry = {
|
||||
'id': 0,
|
||||
'modelId': 0,
|
||||
'name': clean_name,
|
||||
'version': '',
|
||||
'type': 'lora',
|
||||
'weight': 1.0,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': model_name,
|
||||
'hash': model_hash.replace('0x', '') if model_hash.startswith('0x') else model_hash,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Try to get additional info from metadata provider
|
||||
if metadata_provider and model_hash:
|
||||
try:
|
||||
civitai_info = await metadata_provider.get_model_by_hash(
|
||||
model_hash.replace('0x', '') if model_hash.startswith('0x') else model_hash
|
||||
)
|
||||
if civitai_info:
|
||||
lora_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry, civitai_info, recipe_scanner
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error fetching info for LoRA {clean_name}: {e}")
|
||||
|
||||
if lora_entry:
|
||||
loras.append(lora_entry)
|
||||
elif param_type == 'model' or 'lora' not in model_name.lower():
|
||||
# This is likely a checkpoint
|
||||
checkpoint_entry = {
|
||||
'id': 0,
|
||||
'modelId': 0,
|
||||
'name': clean_name,
|
||||
'version': '',
|
||||
'type': 'checkpoint',
|
||||
'hash': model_hash.replace('0x', '') if model_hash.startswith('0x') else model_hash,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': model_name,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Try to get additional info from metadata provider
|
||||
if metadata_provider and model_hash:
|
||||
try:
|
||||
civitai_info = await metadata_provider.get_model_by_hash(
|
||||
model_hash.replace('0x', '') if model_hash.startswith('0x') else model_hash
|
||||
)
|
||||
if civitai_info:
|
||||
checkpoint_entry = await self.populate_checkpoint_from_civitai(
|
||||
checkpoint_entry, civitai_info
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error fetching info for checkpoint {clean_name}: {e}")
|
||||
|
||||
checkpoint = checkpoint_entry
|
||||
|
||||
# Determine base model from loras or checkpoint
|
||||
base_model = None
|
||||
if loras:
|
||||
base_models = [lora.get('baseModel') for lora in loras if lora.get('baseModel')]
|
||||
if base_models:
|
||||
from collections import Counter
|
||||
base_model_counts = Counter(base_models)
|
||||
base_model = base_model_counts.most_common(1)[0][0]
|
||||
elif checkpoint and checkpoint.get('baseModel'):
|
||||
base_model = checkpoint['baseModel']
|
||||
|
||||
return {
|
||||
'base_model': base_model,
|
||||
'loras': loras,
|
||||
'checkpoint': checkpoint,
|
||||
'gen_params': gen_params,
|
||||
'from_sui_image_params': True
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing SuiImage metadata: {e}", exc_info=True)
|
||||
return {"error": str(e), "loras": []}
|
||||
141
py/routes/handlers/base_model_handlers.py
Normal file
141
py/routes/handlers/base_model_handlers.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""Handlers for base model related endpoints."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Awaitable, Callable, Dict
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from ...services.civitai_base_model_service import get_civitai_base_model_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseModelHandlerSet:
|
||||
"""Collection of handlers for base model operations."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_model_service_factory: Callable[[], Any] = get_civitai_base_model_service,
|
||||
) -> None:
|
||||
self._base_model_service_factory = base_model_service_factory
|
||||
|
||||
def to_route_mapping(
|
||||
self,
|
||||
) -> Dict[str, Callable[[web.Request], Awaitable[web.StreamResponse]]]:
|
||||
"""Return mapping of route names to handler methods."""
|
||||
return {
|
||||
"get_base_models": self.get_base_models,
|
||||
"refresh_base_models": self.refresh_base_models,
|
||||
"get_base_model_categories": self.get_base_model_categories,
|
||||
"get_base_model_cache_status": self.get_base_model_cache_status,
|
||||
}
|
||||
|
||||
async def get_base_models(self, request: web.Request) -> web.Response:
|
||||
"""Get merged base models (hardcoded + remote from Civitai).
|
||||
|
||||
Query Parameters:
|
||||
refresh: If 'true', force refresh from API
|
||||
|
||||
Returns:
|
||||
JSON response with:
|
||||
- models: List of base model names
|
||||
- source: 'cache', 'api', or 'fallback'
|
||||
- last_updated: ISO timestamp
|
||||
- counts: hardcoded_count, remote_count, merged_count
|
||||
"""
|
||||
try:
|
||||
service = await self._base_model_service_factory()
|
||||
|
||||
# Check for refresh parameter
|
||||
force_refresh = request.query.get("refresh", "").lower() == "true"
|
||||
|
||||
result = await service.get_base_models(force_refresh=force_refresh)
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"success": True,
|
||||
"data": result,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_base_models: {e}")
|
||||
return web.json_response(
|
||||
{"success": False, "error": str(e)},
|
||||
status=500,
|
||||
)
|
||||
|
||||
async def refresh_base_models(self, request: web.Request) -> web.Response:
|
||||
"""Force refresh base models from Civitai API.
|
||||
|
||||
Returns:
|
||||
JSON response with refreshed data
|
||||
"""
|
||||
try:
|
||||
service = await self._base_model_service_factory()
|
||||
result = await service.refresh_cache()
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"success": True,
|
||||
"data": result,
|
||||
"message": "Base models cache refreshed successfully",
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in refresh_base_models: {e}")
|
||||
return web.json_response(
|
||||
{"success": False, "error": str(e)},
|
||||
status=500,
|
||||
)
|
||||
|
||||
async def get_base_model_categories(self, request: web.Request) -> web.Response:
|
||||
"""Get categorized base models.
|
||||
|
||||
Returns:
|
||||
JSON response with categorized models
|
||||
"""
|
||||
try:
|
||||
service = await self._base_model_service_factory()
|
||||
categories = service.get_model_categories()
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"success": True,
|
||||
"data": categories,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_base_model_categories: {e}")
|
||||
return web.json_response(
|
||||
{"success": False, "error": str(e)},
|
||||
status=500,
|
||||
)
|
||||
|
||||
async def get_base_model_cache_status(self, request: web.Request) -> web.Response:
|
||||
"""Get cache status for base models.
|
||||
|
||||
Returns:
|
||||
JSON response with cache status
|
||||
"""
|
||||
try:
|
||||
service = await self._base_model_service_factory()
|
||||
status = service.get_cache_status()
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"success": True,
|
||||
"data": status,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_base_model_cache_status: {e}")
|
||||
return web.json_response(
|
||||
{"success": False, "error": str(e)},
|
||||
status=500,
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,6 +19,7 @@ from ...services.download_coordinator import DownloadCoordinator
|
||||
from ...services.metadata_sync_service import MetadataSyncService
|
||||
from ...services.model_file_service import ModelMoveService
|
||||
from ...services.preview_asset_service import PreviewAssetService
|
||||
from ...services.service_registry import ServiceRegistry
|
||||
from ...services.settings_manager import SettingsManager, get_settings_manager
|
||||
from ...services.tag_update_service import TagUpdateService
|
||||
from ...services.use_cases import (
|
||||
@@ -64,7 +65,6 @@ class ModelPageView:
|
||||
self._settings = settings_service
|
||||
self._server_i18n = server_i18n
|
||||
self._logger = logger
|
||||
self._app_version = self._get_app_version()
|
||||
|
||||
def _load_supporters(self) -> dict:
|
||||
"""Load supporters data from JSON file."""
|
||||
@@ -155,7 +155,7 @@ class ModelPageView:
|
||||
"request": request,
|
||||
"folders": [],
|
||||
"t": self._server_i18n.get_translation,
|
||||
"version": self._app_version,
|
||||
"version": self._get_app_version(),
|
||||
}
|
||||
|
||||
if not is_initializing:
|
||||
@@ -309,6 +309,13 @@ class ModelListingHandler:
|
||||
else:
|
||||
allow_selling_generated_content = None # None means no filter applied
|
||||
|
||||
# Name pattern filters for LoRA Pool
|
||||
name_pattern_include = request.query.getall("name_pattern_include", [])
|
||||
name_pattern_exclude = request.query.getall("name_pattern_exclude", [])
|
||||
name_pattern_use_regex = (
|
||||
request.query.get("name_pattern_use_regex", "false").lower() == "true"
|
||||
)
|
||||
|
||||
return {
|
||||
"page": page,
|
||||
"page_size": page_size,
|
||||
@@ -328,6 +335,9 @@ class ModelListingHandler:
|
||||
"credit_required": credit_required,
|
||||
"allow_selling_generated_content": allow_selling_generated_content,
|
||||
"model_types": model_types,
|
||||
"name_pattern_include": name_pattern_include,
|
||||
"name_pattern_exclude": name_pattern_exclude,
|
||||
"name_pattern_use_regex": name_pattern_use_regex,
|
||||
**self._parse_specific_params(request),
|
||||
}
|
||||
|
||||
@@ -1522,6 +1532,20 @@ class ModelCivitaiHandler:
|
||||
|
||||
cache = await self._service.scanner.get_cached_data()
|
||||
version_index = cache.version_index
|
||||
downloaded_version_ids: set[int] = set()
|
||||
try:
|
||||
history_service = await ServiceRegistry.get_downloaded_version_history_service()
|
||||
downloaded_version_ids = set(
|
||||
await history_service.get_downloaded_version_ids(
|
||||
self._service.model_type,
|
||||
model_id,
|
||||
)
|
||||
)
|
||||
except Exception as exc: # pragma: no cover - defensive logging
|
||||
self._logger.debug(
|
||||
"Failed to load download history for CivitAI versions: %s",
|
||||
exc,
|
||||
)
|
||||
|
||||
for version in versions:
|
||||
version_id = None
|
||||
@@ -1538,6 +1562,9 @@ class ModelCivitaiHandler:
|
||||
else None
|
||||
)
|
||||
version["existsLocally"] = cache_entry is not None
|
||||
version["hasBeenDownloaded"] = (
|
||||
version_id in downloaded_version_ids if version_id is not None else False
|
||||
)
|
||||
if cache_entry and isinstance(cache_entry, Mapping):
|
||||
local_path = cache_entry.get("file_path")
|
||||
if local_path:
|
||||
@@ -2256,7 +2283,7 @@ class ModelUpdateHandler:
|
||||
self,
|
||||
record,
|
||||
*,
|
||||
version_context: Optional[Dict[int, Dict[str, Optional[str]]]] = None,
|
||||
version_context: Optional[Dict[int, Dict[str, Any]]] = None,
|
||||
) -> Dict:
|
||||
context = version_context or {}
|
||||
# Check user setting for hiding early access versions
|
||||
@@ -2285,7 +2312,7 @@ class ModelUpdateHandler:
|
||||
|
||||
@staticmethod
|
||||
def _serialize_version(
|
||||
version, context: Optional[Dict[str, Optional[str]]]
|
||||
version, context: Optional[Dict[str, Any]]
|
||||
) -> Dict:
|
||||
context = context or {}
|
||||
preview_override = context.get("preview_override")
|
||||
@@ -2319,6 +2346,7 @@ class ModelUpdateHandler:
|
||||
"sizeBytes": version.size_bytes,
|
||||
"previewUrl": preview_url,
|
||||
"isInLibrary": version.is_in_library,
|
||||
"hasBeenDownloaded": bool(context.get("has_been_downloaded", False)),
|
||||
"shouldIgnore": version.should_ignore,
|
||||
"earlyAccessEndsAt": version.early_access_ends_at,
|
||||
"isEarlyAccess": is_early_access,
|
||||
@@ -2328,8 +2356,31 @@ class ModelUpdateHandler:
|
||||
|
||||
async def _build_version_context(
|
||||
self, record
|
||||
) -> Dict[int, Dict[str, Optional[str]]]:
|
||||
context: Dict[int, Dict[str, Optional[str]]] = {}
|
||||
) -> Dict[int, Dict[str, Any]]:
|
||||
context: Dict[int, Dict[str, Any]] = {}
|
||||
downloaded_version_ids: set[int] = set()
|
||||
try:
|
||||
history_service = await ServiceRegistry.get_downloaded_version_history_service()
|
||||
downloaded_version_ids = set(
|
||||
await history_service.get_downloaded_version_ids(
|
||||
record.model_type,
|
||||
record.model_id,
|
||||
)
|
||||
)
|
||||
except Exception as exc: # pragma: no cover - defensive logging
|
||||
self._logger.debug(
|
||||
"Failed to load download history while building version context: %s",
|
||||
exc,
|
||||
)
|
||||
|
||||
for version in record.versions:
|
||||
context[version.version_id] = {
|
||||
"file_path": None,
|
||||
"file_name": None,
|
||||
"preview_override": None,
|
||||
"has_been_downloaded": version.version_id in downloaded_version_ids,
|
||||
}
|
||||
|
||||
try:
|
||||
cache = await self._service.scanner.get_cached_data()
|
||||
except Exception as exc: # pragma: no cover - defensive logging
|
||||
@@ -2348,16 +2399,21 @@ class ModelUpdateHandler:
|
||||
cache_entry = version_index.get(version.version_id)
|
||||
if isinstance(cache_entry, Mapping):
|
||||
preview = cache_entry.get("preview_url")
|
||||
context_entry: Dict[str, Optional[str]] = {
|
||||
"file_path": cache_entry.get("file_path"),
|
||||
"file_name": cache_entry.get("file_name"),
|
||||
context_entry = context.setdefault(
|
||||
version.version_id,
|
||||
{
|
||||
"file_path": None,
|
||||
"file_name": None,
|
||||
"preview_override": None,
|
||||
}
|
||||
"has_been_downloaded": version.version_id in downloaded_version_ids,
|
||||
},
|
||||
)
|
||||
context_entry["file_path"] = cache_entry.get("file_path")
|
||||
context_entry["file_name"] = cache_entry.get("file_name")
|
||||
if isinstance(preview, str) and preview:
|
||||
context_entry["preview_override"] = config.get_preview_static_url(
|
||||
preview
|
||||
)
|
||||
context[version.version_id] = context_entry
|
||||
return context
|
||||
|
||||
|
||||
|
||||
@@ -81,6 +81,7 @@ class RecipeHandlerSet:
|
||||
"bulk_delete": self.management.bulk_delete,
|
||||
"save_recipe_from_widget": self.management.save_recipe_from_widget,
|
||||
"get_recipes_for_lora": self.query.get_recipes_for_lora,
|
||||
"get_recipes_for_checkpoint": self.query.get_recipes_for_checkpoint,
|
||||
"scan_recipes": self.query.scan_recipes,
|
||||
"move_recipe": self.management.move_recipe,
|
||||
"repair_recipes": self.management.repair_recipes,
|
||||
@@ -218,6 +219,7 @@ class RecipeListingHandler:
|
||||
filters["tags"] = tag_filters
|
||||
|
||||
lora_hash = request.query.get("lora_hash")
|
||||
checkpoint_hash = request.query.get("checkpoint_hash")
|
||||
|
||||
result = await recipe_scanner.get_paginated_data(
|
||||
page=page,
|
||||
@@ -227,6 +229,7 @@ class RecipeListingHandler:
|
||||
filters=filters,
|
||||
search_options=search_options,
|
||||
lora_hash=lora_hash,
|
||||
checkpoint_hash=checkpoint_hash,
|
||||
folder=folder,
|
||||
recursive=recursive,
|
||||
)
|
||||
@@ -423,6 +426,28 @@ class RecipeQueryHandler:
|
||||
self._logger.error("Error getting recipes for Lora: %s", exc)
|
||||
return web.json_response({"success": False, "error": str(exc)}, status=500)
|
||||
|
||||
async def get_recipes_for_checkpoint(self, request: web.Request) -> web.Response:
|
||||
try:
|
||||
await self._ensure_dependencies_ready()
|
||||
recipe_scanner = self._recipe_scanner_getter()
|
||||
if recipe_scanner is None:
|
||||
raise RuntimeError("Recipe scanner unavailable")
|
||||
|
||||
checkpoint_hash = request.query.get("hash")
|
||||
if not checkpoint_hash:
|
||||
return web.json_response(
|
||||
{"success": False, "error": "Checkpoint hash is required"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
matching_recipes = await recipe_scanner.get_recipes_for_checkpoint(
|
||||
checkpoint_hash
|
||||
)
|
||||
return web.json_response({"success": True, "recipes": matching_recipes})
|
||||
except Exception as exc:
|
||||
self._logger.error("Error getting recipes for checkpoint: %s", exc)
|
||||
return web.json_response({"success": False, "error": str(exc)}, status=500)
|
||||
|
||||
async def scan_recipes(self, request: web.Request) -> web.Response:
|
||||
try:
|
||||
await self._ensure_dependencies_ready()
|
||||
@@ -731,6 +756,14 @@ class RecipeManagementHandler:
|
||||
)
|
||||
gen_params_request = self._parse_gen_params(params.get("gen_params"))
|
||||
|
||||
self._logger.info(
|
||||
"Remote recipe import received: url=%s, request_gen_params_keys=%s, lora_count=%d, checkpoint_keys=%s",
|
||||
image_url,
|
||||
sorted(gen_params_request.keys()) if gen_params_request else [],
|
||||
len(lora_entries),
|
||||
sorted(checkpoint_entry.keys()) if isinstance(checkpoint_entry, dict) else [],
|
||||
)
|
||||
|
||||
# 2. Initial Metadata Construction
|
||||
metadata: Dict[str, Any] = {
|
||||
"base_model": params.get("base_model", "") or "",
|
||||
|
||||
@@ -22,11 +22,16 @@ class RouteDefinition:
|
||||
MISC_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
|
||||
RouteDefinition("GET", "/api/lm/settings", "get_settings"),
|
||||
RouteDefinition("POST", "/api/lm/settings", "update_settings"),
|
||||
RouteDefinition("GET", "/api/lm/doctor/diagnostics", "get_doctor_diagnostics"),
|
||||
RouteDefinition("POST", "/api/lm/doctor/repair-cache", "repair_doctor_cache"),
|
||||
RouteDefinition("POST", "/api/lm/doctor/export-bundle", "export_doctor_bundle"),
|
||||
RouteDefinition("GET", "/api/lm/priority-tags", "get_priority_tags"),
|
||||
RouteDefinition("GET", "/api/lm/settings/libraries", "get_settings_libraries"),
|
||||
RouteDefinition("POST", "/api/lm/settings/libraries/activate", "activate_library"),
|
||||
RouteDefinition("GET", "/api/lm/health-check", "health_check"),
|
||||
RouteDefinition("GET", "/api/lm/supporters", "get_supporters"),
|
||||
RouteDefinition("GET", "/api/lm/wildcards/search", "search_wildcards"),
|
||||
RouteDefinition("POST", "/api/lm/wildcards/open-location", "open_wildcards_location"),
|
||||
RouteDefinition("POST", "/api/lm/open-file-location", "open_file_location"),
|
||||
RouteDefinition("POST", "/api/lm/update-usage-stats", "update_usage_stats"),
|
||||
RouteDefinition("GET", "/api/lm/get-usage-stats", "get_usage_stats"),
|
||||
@@ -37,6 +42,21 @@ MISC_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
|
||||
RouteDefinition("POST", "/api/lm/update-node-widget", "update_node_widget"),
|
||||
RouteDefinition("GET", "/api/lm/get-registry", "get_registry"),
|
||||
RouteDefinition("GET", "/api/lm/check-model-exists", "check_model_exists"),
|
||||
RouteDefinition(
|
||||
"GET",
|
||||
"/api/lm/model-version-download-status",
|
||||
"get_model_version_download_status",
|
||||
),
|
||||
RouteDefinition(
|
||||
"POST",
|
||||
"/api/lm/model-version-download-status",
|
||||
"set_model_version_download_status",
|
||||
),
|
||||
RouteDefinition(
|
||||
"GET",
|
||||
"/api/lm/set-model-version-download-status",
|
||||
"set_model_version_download_status",
|
||||
),
|
||||
RouteDefinition("GET", "/api/lm/civitai/user-models", "get_civitai_user_models"),
|
||||
RouteDefinition(
|
||||
"POST", "/api/lm/download-metadata-archive", "download_metadata_archive"
|
||||
@@ -47,6 +67,10 @@ MISC_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
|
||||
RouteDefinition(
|
||||
"GET", "/api/lm/metadata-archive-status", "get_metadata_archive_status"
|
||||
),
|
||||
RouteDefinition("GET", "/api/lm/backup/status", "get_backup_status"),
|
||||
RouteDefinition("POST", "/api/lm/backup/export", "export_backup"),
|
||||
RouteDefinition("POST", "/api/lm/backup/import", "import_backup"),
|
||||
RouteDefinition("POST", "/api/lm/backup/open-location", "open_backup_location"),
|
||||
RouteDefinition(
|
||||
"GET", "/api/lm/model-versions-status", "get_model_versions_status"
|
||||
),
|
||||
@@ -56,6 +80,15 @@ MISC_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
|
||||
RouteDefinition(
|
||||
"GET", "/api/lm/example-workflows/{filename}", "get_example_workflow"
|
||||
),
|
||||
# Base model management routes
|
||||
RouteDefinition("GET", "/api/lm/base-models", "get_base_models"),
|
||||
RouteDefinition("POST", "/api/lm/base-models/refresh", "refresh_base_models"),
|
||||
RouteDefinition(
|
||||
"GET", "/api/lm/base-models/categories", "get_base_model_categories"
|
||||
),
|
||||
RouteDefinition(
|
||||
"GET", "/api/lm/base-models/cache-status", "get_base_model_cache_status"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -19,10 +19,12 @@ from ..services.downloader import get_downloader
|
||||
from ..utils.usage_stats import UsageStats
|
||||
from .handlers.misc_handlers import (
|
||||
CustomWordsHandler,
|
||||
DoctorHandler,
|
||||
ExampleWorkflowsHandler,
|
||||
FileSystemHandler,
|
||||
HealthCheckHandler,
|
||||
LoraCodeHandler,
|
||||
BackupHandler,
|
||||
MetadataArchiveHandler,
|
||||
MiscHandlerSet,
|
||||
ModelExampleFilesHandler,
|
||||
@@ -33,8 +35,10 @@ from .handlers.misc_handlers import (
|
||||
SupportersHandler,
|
||||
TrainedWordsHandler,
|
||||
UsageStatsHandler,
|
||||
WildcardsHandler,
|
||||
build_service_registry_adapter,
|
||||
)
|
||||
from .handlers.base_model_handlers import BaseModelHandlerSet
|
||||
from .misc_route_registrar import MiscRouteRegistrar
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -115,6 +119,7 @@ class MiscRoutes:
|
||||
settings_service=self._settings,
|
||||
metadata_provider_updater=self._metadata_provider_updater,
|
||||
)
|
||||
backup = BackupHandler()
|
||||
filesystem = FileSystemHandler(settings_service=self._settings)
|
||||
node_registry_handler = NodeRegistryHandler(
|
||||
node_registry=self._node_registry,
|
||||
@@ -126,8 +131,11 @@ class MiscRoutes:
|
||||
metadata_provider_factory=self._metadata_provider_factory,
|
||||
)
|
||||
custom_words = CustomWordsHandler()
|
||||
wildcards = WildcardsHandler()
|
||||
supporters = SupportersHandler()
|
||||
doctor = DoctorHandler(settings_service=self._settings)
|
||||
example_workflows = ExampleWorkflowsHandler()
|
||||
base_model = BaseModelHandlerSet()
|
||||
|
||||
return self._handler_set_factory(
|
||||
health=health,
|
||||
@@ -139,10 +147,14 @@ class MiscRoutes:
|
||||
node_registry=node_registry_handler,
|
||||
model_library=model_library,
|
||||
metadata_archive=metadata_archive,
|
||||
backup=backup,
|
||||
filesystem=filesystem,
|
||||
custom_words=custom_words,
|
||||
wildcards=wildcards,
|
||||
supporters=supporters,
|
||||
doctor=doctor,
|
||||
example_workflows=example_workflows,
|
||||
base_model=base_model,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -51,6 +51,9 @@ ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
|
||||
"POST", "/api/lm/recipes/save-from-widget", "save_recipe_from_widget"
|
||||
),
|
||||
RouteDefinition("GET", "/api/lm/recipes/for-lora", "get_recipes_for_lora"),
|
||||
RouteDefinition(
|
||||
"GET", "/api/lm/recipes/for-checkpoint", "get_recipes_for_checkpoint"
|
||||
),
|
||||
RouteDefinition("GET", "/api/lm/recipes/scan", "scan_recipes"),
|
||||
RouteDefinition("POST", "/api/lm/recipes/repair", "repair_recipes"),
|
||||
RouteDefinition("POST", "/api/lm/recipes/cancel-repair", "cancel_repair"),
|
||||
|
||||
411
py/services/backup_service.py
Normal file
411
py/services/backup_service.py
Normal file
@@ -0,0 +1,411 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import zipfile
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Optional
|
||||
|
||||
from ..utils.cache_paths import CacheType, get_cache_base_dir, get_cache_file_path
|
||||
from ..utils.settings_paths import get_settings_dir
|
||||
from .settings_manager import get_settings_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
BACKUP_MANIFEST_VERSION = 1
|
||||
DEFAULT_BACKUP_RETENTION_COUNT = 5
|
||||
DEFAULT_BACKUP_INTERVAL_SECONDS = 24 * 60 * 60
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class BackupEntry:
|
||||
kind: str
|
||||
archive_path: str
|
||||
target_path: str
|
||||
sha256: str
|
||||
size: int
|
||||
mtime: float
|
||||
|
||||
|
||||
class BackupService:
|
||||
"""Create and restore user-state backup archives."""
|
||||
|
||||
_instance: "BackupService | None" = None
|
||||
_instance_lock = asyncio.Lock()
|
||||
|
||||
def __init__(self, *, settings_manager=None, backup_dir: str | None = None) -> None:
|
||||
self._settings = settings_manager or get_settings_manager()
|
||||
self._backup_dir = Path(backup_dir or self._resolve_backup_dir())
|
||||
self._backup_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._lock = asyncio.Lock()
|
||||
self._auto_task: asyncio.Task[None] | None = None
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls) -> "BackupService":
|
||||
async with cls._instance_lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
cls._instance._ensure_auto_snapshot_task()
|
||||
return cls._instance
|
||||
|
||||
@staticmethod
|
||||
def _resolve_backup_dir() -> str:
|
||||
return os.path.join(get_settings_dir(create=True), "backups")
|
||||
|
||||
def get_backup_dir(self) -> str:
|
||||
return str(self._backup_dir)
|
||||
|
||||
def _ensure_auto_snapshot_task(self) -> None:
|
||||
if self._auto_task is not None and not self._auto_task.done():
|
||||
return
|
||||
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
return
|
||||
|
||||
self._auto_task = loop.create_task(self._auto_backup_loop())
|
||||
|
||||
def _get_setting_bool(self, key: str, default: bool) -> bool:
|
||||
try:
|
||||
return bool(self._settings.get(key, default))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
def _get_setting_int(self, key: str, default: int) -> int:
|
||||
try:
|
||||
value = self._settings.get(key, default)
|
||||
return max(1, int(value))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
def _settings_file_path(self) -> str:
|
||||
settings_file = getattr(self._settings, "settings_file", None)
|
||||
if settings_file:
|
||||
return str(settings_file)
|
||||
return os.path.join(get_settings_dir(create=True), "settings.json")
|
||||
|
||||
def _download_history_path(self) -> str:
|
||||
base_dir = get_cache_base_dir(create=True)
|
||||
history_dir = os.path.join(base_dir, "download_history")
|
||||
os.makedirs(history_dir, exist_ok=True)
|
||||
return os.path.join(history_dir, "downloaded_versions.sqlite")
|
||||
|
||||
def _model_update_dir(self) -> str:
|
||||
return str(Path(get_cache_file_path(CacheType.MODEL_UPDATE, create_dir=True)).parent)
|
||||
|
||||
def _model_update_targets(self) -> list[tuple[str, str, str]]:
|
||||
"""Return (kind, archive_path, target_path) tuples for backup."""
|
||||
|
||||
targets: list[tuple[str, str, str]] = []
|
||||
|
||||
settings_path = self._settings_file_path()
|
||||
targets.append(("settings", "settings/settings.json", settings_path))
|
||||
|
||||
history_path = self._download_history_path()
|
||||
targets.append(
|
||||
(
|
||||
"download_history",
|
||||
"cache/download_history/downloaded_versions.sqlite",
|
||||
history_path,
|
||||
)
|
||||
)
|
||||
|
||||
symlink_path = get_cache_file_path(CacheType.SYMLINK, create_dir=True)
|
||||
targets.append(
|
||||
(
|
||||
"symlink_map",
|
||||
"cache/symlink/symlink_map.json",
|
||||
symlink_path,
|
||||
)
|
||||
)
|
||||
|
||||
model_update_dir = Path(self._model_update_dir())
|
||||
if model_update_dir.exists():
|
||||
for sqlite_file in sorted(model_update_dir.glob("*.sqlite")):
|
||||
targets.append(
|
||||
(
|
||||
"model_update",
|
||||
f"cache/model_update/{sqlite_file.name}",
|
||||
str(sqlite_file),
|
||||
)
|
||||
)
|
||||
|
||||
return targets
|
||||
|
||||
@staticmethod
|
||||
def _hash_file(path: str) -> tuple[str, int, float]:
|
||||
digest = hashlib.sha256()
|
||||
total = 0
|
||||
with open(path, "rb") as handle:
|
||||
for chunk in iter(lambda: handle.read(1024 * 1024), b""):
|
||||
total += len(chunk)
|
||||
digest.update(chunk)
|
||||
mtime = os.path.getmtime(path)
|
||||
return digest.hexdigest(), total, mtime
|
||||
|
||||
def _build_manifest(self, entries: Iterable[BackupEntry], *, snapshot_type: str) -> dict[str, Any]:
|
||||
created_at = datetime.now(timezone.utc).isoformat()
|
||||
active_library = None
|
||||
try:
|
||||
active_library = self._settings.get_active_library_name()
|
||||
except Exception:
|
||||
active_library = None
|
||||
|
||||
return {
|
||||
"manifest_version": BACKUP_MANIFEST_VERSION,
|
||||
"created_at": created_at,
|
||||
"snapshot_type": snapshot_type,
|
||||
"active_library": active_library,
|
||||
"files": [
|
||||
{
|
||||
"kind": entry.kind,
|
||||
"archive_path": entry.archive_path,
|
||||
"target_path": entry.target_path,
|
||||
"sha256": entry.sha256,
|
||||
"size": entry.size,
|
||||
"mtime": entry.mtime,
|
||||
}
|
||||
for entry in entries
|
||||
],
|
||||
}
|
||||
|
||||
def _write_archive(self, archive_path: str, entries: list[BackupEntry], manifest: dict[str, Any]) -> None:
|
||||
with zipfile.ZipFile(
|
||||
archive_path,
|
||||
mode="w",
|
||||
compression=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=6,
|
||||
) as zf:
|
||||
zf.writestr(
|
||||
"manifest.json",
|
||||
json.dumps(manifest, indent=2, ensure_ascii=False).encode("utf-8"),
|
||||
)
|
||||
for entry in entries:
|
||||
zf.write(entry.target_path, arcname=entry.archive_path)
|
||||
|
||||
async def create_snapshot(self, *, snapshot_type: str = "manual", persist: bool = False) -> dict[str, Any]:
|
||||
"""Create a backup archive.
|
||||
|
||||
If ``persist`` is true, the archive is stored in the backup directory
|
||||
and retained according to the configured retention policy.
|
||||
"""
|
||||
|
||||
async with self._lock:
|
||||
raw_targets = self._model_update_targets()
|
||||
entries: list[BackupEntry] = []
|
||||
for kind, archive_path, target_path in raw_targets:
|
||||
if not os.path.exists(target_path):
|
||||
continue
|
||||
sha256, size, mtime = self._hash_file(target_path)
|
||||
entries.append(
|
||||
BackupEntry(
|
||||
kind=kind,
|
||||
archive_path=archive_path,
|
||||
target_path=target_path,
|
||||
sha256=sha256,
|
||||
size=size,
|
||||
mtime=mtime,
|
||||
)
|
||||
)
|
||||
|
||||
if not entries:
|
||||
raise FileNotFoundError("No backupable files were found")
|
||||
|
||||
manifest = self._build_manifest(entries, snapshot_type=snapshot_type)
|
||||
archive_name = self._build_archive_name(snapshot_type=snapshot_type)
|
||||
fd, temp_path = tempfile.mkstemp(suffix=".zip", dir=str(self._backup_dir))
|
||||
os.close(fd)
|
||||
|
||||
try:
|
||||
self._write_archive(temp_path, entries, manifest)
|
||||
if persist:
|
||||
final_path = self._backup_dir / archive_name
|
||||
os.replace(temp_path, final_path)
|
||||
self._prune_snapshots()
|
||||
return {
|
||||
"archive_path": str(final_path),
|
||||
"archive_name": final_path.name,
|
||||
"manifest": manifest,
|
||||
}
|
||||
|
||||
with open(temp_path, "rb") as handle:
|
||||
data = handle.read()
|
||||
return {
|
||||
"archive_name": archive_name,
|
||||
"archive_bytes": data,
|
||||
"manifest": manifest,
|
||||
}
|
||||
finally:
|
||||
with contextlib.suppress(FileNotFoundError):
|
||||
os.remove(temp_path)
|
||||
|
||||
def _build_archive_name(self, *, snapshot_type: str) -> str:
|
||||
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
|
||||
return f"lora-manager-backup-{timestamp}-{snapshot_type}.zip"
|
||||
|
||||
def _prune_snapshots(self) -> None:
|
||||
retention = self._get_setting_int(
|
||||
"backup_retention_count", DEFAULT_BACKUP_RETENTION_COUNT
|
||||
)
|
||||
archives = sorted(
|
||||
self._backup_dir.glob("lora-manager-backup-*-auto.zip"),
|
||||
key=lambda path: path.stat().st_mtime,
|
||||
reverse=True,
|
||||
)
|
||||
for path in archives[retention:]:
|
||||
with contextlib.suppress(OSError):
|
||||
path.unlink()
|
||||
|
||||
async def restore_snapshot(self, archive_path: str) -> dict[str, Any]:
|
||||
"""Restore backup contents from a ZIP archive."""
|
||||
|
||||
async with self._lock:
|
||||
try:
|
||||
zf = zipfile.ZipFile(archive_path, mode="r")
|
||||
except zipfile.BadZipFile as exc:
|
||||
raise ValueError("Backup archive is not a valid ZIP file") from exc
|
||||
|
||||
with zf:
|
||||
try:
|
||||
manifest = json.loads(zf.read("manifest.json").decode("utf-8"))
|
||||
except KeyError as exc:
|
||||
raise ValueError("Backup archive is missing manifest.json") from exc
|
||||
|
||||
if not isinstance(manifest, dict):
|
||||
raise ValueError("Backup manifest is invalid")
|
||||
if manifest.get("manifest_version") != BACKUP_MANIFEST_VERSION:
|
||||
raise ValueError("Backup manifest version is not supported")
|
||||
|
||||
files = manifest.get("files", [])
|
||||
if not isinstance(files, list):
|
||||
raise ValueError("Backup manifest file list is invalid")
|
||||
|
||||
extracted_paths: list[tuple[str, str]] = []
|
||||
temp_dir = Path(tempfile.mkdtemp(prefix="lora-manager-restore-"))
|
||||
try:
|
||||
for item in files:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
archive_member = item.get("archive_path")
|
||||
if not isinstance(archive_member, str) or not archive_member:
|
||||
continue
|
||||
archive_member_path = Path(archive_member)
|
||||
if archive_member_path.is_absolute() or ".." in archive_member_path.parts:
|
||||
raise ValueError(f"Invalid archive member path: {archive_member}")
|
||||
|
||||
kind = item.get("kind")
|
||||
target_path = self._resolve_restore_target(kind, archive_member)
|
||||
if target_path is None:
|
||||
continue
|
||||
|
||||
extracted_path = temp_dir / archive_member_path
|
||||
extracted_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with zf.open(archive_member) as source, open(
|
||||
extracted_path, "wb"
|
||||
) as destination:
|
||||
shutil.copyfileobj(source, destination)
|
||||
|
||||
expected_hash = item.get("sha256")
|
||||
if isinstance(expected_hash, str) and expected_hash:
|
||||
actual_hash, _, _ = self._hash_file(str(extracted_path))
|
||||
if actual_hash != expected_hash:
|
||||
raise ValueError(
|
||||
f"Checksum mismatch for {archive_member}"
|
||||
)
|
||||
|
||||
extracted_paths.append((str(extracted_path), target_path))
|
||||
|
||||
for extracted_path, target_path in extracted_paths:
|
||||
os.makedirs(os.path.dirname(target_path), exist_ok=True)
|
||||
os.replace(extracted_path, target_path)
|
||||
finally:
|
||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"restored_files": len(extracted_paths),
|
||||
"snapshot_type": manifest.get("snapshot_type"),
|
||||
}
|
||||
|
||||
def _resolve_restore_target(self, kind: Any, archive_member: str) -> str | None:
|
||||
if kind == "settings":
|
||||
return self._settings_file_path()
|
||||
if kind == "download_history":
|
||||
return self._download_history_path()
|
||||
if kind == "symlink_map":
|
||||
return get_cache_file_path(CacheType.SYMLINK, create_dir=True)
|
||||
if kind == "model_update":
|
||||
filename = os.path.basename(archive_member)
|
||||
return str(Path(get_cache_file_path(CacheType.MODEL_UPDATE, create_dir=True)).parent / filename)
|
||||
return None
|
||||
|
||||
async def create_auto_snapshot_if_due(self) -> Optional[dict[str, Any]]:
|
||||
if not self._get_setting_bool("backup_auto_enabled", True):
|
||||
return None
|
||||
|
||||
latest = self.get_latest_auto_snapshot()
|
||||
now = time.time()
|
||||
if latest and now - latest["mtime"] < DEFAULT_BACKUP_INTERVAL_SECONDS:
|
||||
return None
|
||||
|
||||
return await self.create_snapshot(snapshot_type="auto", persist=True)
|
||||
|
||||
async def _auto_backup_loop(self) -> None:
|
||||
while True:
|
||||
try:
|
||||
await self.create_auto_snapshot_if_due()
|
||||
await asyncio.sleep(DEFAULT_BACKUP_INTERVAL_SECONDS)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as exc: # pragma: no cover - defensive guard
|
||||
logger.warning("Automatic backup snapshot failed: %s", exc, exc_info=True)
|
||||
await asyncio.sleep(60)
|
||||
|
||||
def get_available_snapshots(self) -> list[dict[str, Any]]:
|
||||
snapshots: list[dict[str, Any]] = []
|
||||
for path in sorted(self._backup_dir.glob("lora-manager-backup-*.zip")):
|
||||
try:
|
||||
stat = path.stat()
|
||||
except OSError:
|
||||
continue
|
||||
snapshots.append(
|
||||
{
|
||||
"name": path.name,
|
||||
"path": str(path),
|
||||
"size": stat.st_size,
|
||||
"mtime": stat.st_mtime,
|
||||
"is_auto": path.name.endswith("-auto.zip"),
|
||||
}
|
||||
)
|
||||
snapshots.sort(key=lambda item: item["mtime"], reverse=True)
|
||||
return snapshots
|
||||
|
||||
def get_latest_auto_snapshot(self) -> Optional[dict[str, Any]]:
|
||||
autos = [snapshot for snapshot in self.get_available_snapshots() if snapshot["is_auto"]]
|
||||
if not autos:
|
||||
return None
|
||||
return autos[0]
|
||||
|
||||
def get_status(self) -> dict[str, Any]:
|
||||
snapshots = self.get_available_snapshots()
|
||||
return {
|
||||
"backupDir": self.get_backup_dir(),
|
||||
"enabled": self._get_setting_bool("backup_auto_enabled", True),
|
||||
"retentionCount": self._get_setting_int(
|
||||
"backup_retention_count", DEFAULT_BACKUP_RETENTION_COUNT
|
||||
),
|
||||
"snapshotCount": len(snapshots),
|
||||
"latestSnapshot": snapshots[0] if snapshots else None,
|
||||
"latestAutoSnapshot": self.get_latest_auto_snapshot(),
|
||||
}
|
||||
@@ -58,6 +58,7 @@ class CacheEntryValidator:
|
||||
'preview_nsfw_level': (0, False),
|
||||
'notes': ('', False),
|
||||
'usage_tips': ('', False),
|
||||
'hash_status': ('completed', False),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@@ -90,13 +91,31 @@ class CacheEntryValidator:
|
||||
|
||||
errors: List[str] = []
|
||||
repaired = False
|
||||
|
||||
# If auto_repair is on, we work on a copy. If not, we still need a safe way to check fields.
|
||||
working_entry = dict(entry) if auto_repair else entry
|
||||
|
||||
# Determine effective hash_status for validation logic
|
||||
hash_status = entry.get('hash_status')
|
||||
if hash_status is None:
|
||||
if auto_repair:
|
||||
working_entry['hash_status'] = 'completed'
|
||||
repaired = True
|
||||
hash_status = 'completed'
|
||||
|
||||
for field_name, (default_value, is_required) in cls.CORE_FIELDS.items():
|
||||
value = working_entry.get(field_name)
|
||||
# Get current value from the original entry to avoid side effects during validation
|
||||
value = entry.get(field_name)
|
||||
|
||||
# Check if field is missing or None
|
||||
if value is None:
|
||||
# Special case: sha256 can be None/empty if hash_status is pending
|
||||
if field_name == 'sha256' and hash_status == 'pending':
|
||||
if auto_repair:
|
||||
working_entry[field_name] = ''
|
||||
repaired = True
|
||||
continue
|
||||
|
||||
if is_required:
|
||||
errors.append(f"Required field '{field_name}' is missing or None")
|
||||
if auto_repair:
|
||||
@@ -107,6 +126,10 @@ class CacheEntryValidator:
|
||||
# Validate field type and value
|
||||
field_error = cls._validate_field(field_name, value, default_value)
|
||||
if field_error:
|
||||
# Special case: allow empty string for sha256 if pending
|
||||
if field_name == 'sha256' and hash_status == 'pending' and value == '':
|
||||
continue
|
||||
|
||||
errors.append(field_error)
|
||||
if auto_repair:
|
||||
working_entry[field_name] = cls._get_default_copy(default_value)
|
||||
@@ -127,7 +150,7 @@ class CacheEntryValidator:
|
||||
# Special validation: sha256 must not be empty for required field
|
||||
# BUT allow empty sha256 when hash_status is pending (lazy hash calculation)
|
||||
sha256 = working_entry.get('sha256', '')
|
||||
hash_status = working_entry.get('hash_status', 'completed')
|
||||
# Use the effective hash_status we determined earlier
|
||||
if not sha256 or (isinstance(sha256, str) and not sha256.strip()):
|
||||
# Allow empty sha256 for lazy hash calculation (checkpoints)
|
||||
if hash_status != 'pending':
|
||||
@@ -144,8 +167,13 @@ class CacheEntryValidator:
|
||||
if isinstance(sha256, str):
|
||||
normalized_sha = sha256.lower().strip()
|
||||
if normalized_sha != sha256:
|
||||
if auto_repair:
|
||||
working_entry['sha256'] = normalized_sha
|
||||
repaired = True
|
||||
else:
|
||||
# If not auto-repairing, we don't consider case difference as a "critical error"
|
||||
# that invalidates the entry, but we also don't mark it repaired.
|
||||
pass
|
||||
|
||||
# Determine if entry is valid
|
||||
# Entry is valid if no critical required field errors remain after repair
|
||||
|
||||
@@ -105,12 +105,18 @@ class CheckpointScanner(ModelScanner):
|
||||
return None
|
||||
|
||||
# Load current metadata
|
||||
metadata, _ = await MetadataManager.load_metadata(
|
||||
metadata, should_skip = await MetadataManager.load_metadata(
|
||||
file_path, self.model_class
|
||||
)
|
||||
if metadata is None:
|
||||
if should_skip:
|
||||
logger.error(f"Invalid metadata found for {file_path}")
|
||||
return None
|
||||
created_metadata = await self._create_default_metadata(file_path)
|
||||
if created_metadata is None:
|
||||
logger.error(f"No metadata found for {file_path}")
|
||||
return None
|
||||
metadata = created_metadata
|
||||
|
||||
# Check if hash is already calculated
|
||||
if metadata.hash_status == "completed" and metadata.sha256:
|
||||
|
||||
430
py/services/civitai_base_model_service.py
Normal file
430
py/services/civitai_base_model_service.py
Normal file
@@ -0,0 +1,430 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from ..utils.constants import SUPPORTED_DOWNLOAD_SKIP_BASE_MODELS
|
||||
from .downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CivitaiBaseModelService:
|
||||
"""Service for fetching and managing Civitai base models.
|
||||
|
||||
This service provides:
|
||||
- Fetching base models from Civitai API
|
||||
- Caching with TTL (7 days default)
|
||||
- Merging hardcoded and remote base models
|
||||
- Generating abbreviations for new/unknown models
|
||||
"""
|
||||
|
||||
_instance: Optional[CivitaiBaseModelService] = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
# Default TTL for cache in seconds (7 days)
|
||||
DEFAULT_CACHE_TTL = 7 * 24 * 60 * 60
|
||||
|
||||
# Civitai API endpoint for enums
|
||||
CIVITAI_ENUMS_URL = "https://civitai.com/api/v1/enums"
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls) -> CivitaiBaseModelService:
|
||||
"""Get singleton instance of the service."""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the service."""
|
||||
if hasattr(self, "_initialized"):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
# Cache storage
|
||||
self._cache: Optional[Dict[str, Any]] = None
|
||||
self._cache_timestamp: Optional[datetime] = None
|
||||
self._cache_ttl = self.DEFAULT_CACHE_TTL
|
||||
|
||||
# Hardcoded models for fallback
|
||||
self._hardcoded_models = set(SUPPORTED_DOWNLOAD_SKIP_BASE_MODELS)
|
||||
|
||||
logger.info("CivitaiBaseModelService initialized")
|
||||
|
||||
async def get_base_models(self, force_refresh: bool = False) -> Dict[str, Any]:
|
||||
"""Get merged base models (hardcoded + remote).
|
||||
|
||||
Args:
|
||||
force_refresh: If True, fetch from API regardless of cache state.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- models: List of merged base model names
|
||||
- source: 'cache', 'api', or 'fallback'
|
||||
- last_updated: ISO timestamp of last successful API fetch
|
||||
- hardcoded_count: Number of hardcoded models
|
||||
- remote_count: Number of remote models
|
||||
- merged_count: Total unique models
|
||||
"""
|
||||
# Check if cache is valid
|
||||
if not force_refresh and self._is_cache_valid():
|
||||
logger.debug("Returning cached base models")
|
||||
return self._build_response("cache")
|
||||
|
||||
# Try to fetch from API
|
||||
try:
|
||||
remote_models = await self._fetch_from_civitai()
|
||||
if remote_models:
|
||||
self._update_cache(remote_models)
|
||||
return self._build_response("api")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch base models from Civitai: {e}")
|
||||
|
||||
# Fallback to hardcoded models
|
||||
return self._build_response("fallback")
|
||||
|
||||
async def refresh_cache(self) -> Dict[str, Any]:
|
||||
"""Force refresh the cache from Civitai API.
|
||||
|
||||
Returns:
|
||||
Response dict same as get_base_models()
|
||||
"""
|
||||
return await self.get_base_models(force_refresh=True)
|
||||
|
||||
def get_cache_status(self) -> Dict[str, Any]:
|
||||
"""Get current cache status.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- has_cache: Whether cache exists
|
||||
- last_updated: ISO timestamp or None
|
||||
- is_expired: Whether cache is expired
|
||||
- ttl_seconds: TTL in seconds
|
||||
- age_seconds: Age of cache in seconds (if exists)
|
||||
"""
|
||||
if self._cache is None or self._cache_timestamp is None:
|
||||
return {
|
||||
"has_cache": False,
|
||||
"last_updated": None,
|
||||
"is_expired": True,
|
||||
"ttl_seconds": self._cache_ttl,
|
||||
"age_seconds": None,
|
||||
}
|
||||
|
||||
age = (datetime.now(timezone.utc) - self._cache_timestamp).total_seconds()
|
||||
return {
|
||||
"has_cache": True,
|
||||
"last_updated": self._cache_timestamp.isoformat(),
|
||||
"is_expired": age > self._cache_ttl,
|
||||
"ttl_seconds": self._cache_ttl,
|
||||
"age_seconds": int(age),
|
||||
}
|
||||
|
||||
def generate_abbreviation(self, model_name: str) -> str:
|
||||
"""Generate abbreviation for a base model name.
|
||||
|
||||
Algorithm:
|
||||
1. Extract version patterns (e.g., "2.5" from "Wan Video 2.5")
|
||||
2. Extract main acronym (e.g., "SD" from "SD 1.5")
|
||||
3. Handle special cases (Flux, Wan, etc.)
|
||||
4. Fallback to first letters of words (max 4 chars)
|
||||
|
||||
Args:
|
||||
model_name: Full base model name
|
||||
|
||||
Returns:
|
||||
Generated abbreviation (max 4 characters)
|
||||
"""
|
||||
if not model_name or not isinstance(model_name, str):
|
||||
return "OTH"
|
||||
|
||||
name = model_name.strip()
|
||||
if not name:
|
||||
return "OTH"
|
||||
|
||||
# Check if it's already in hardcoded abbreviations
|
||||
# This is a simplified check - in practice you'd have a mapping
|
||||
lower_name = name.lower()
|
||||
|
||||
# Special cases
|
||||
special_cases = {
|
||||
"sd 1.4": "SD1",
|
||||
"sd 1.5": "SD1",
|
||||
"sd 1.5 lcm": "SD1",
|
||||
"sd 1.5 hyper": "SD1",
|
||||
"sd 2.0": "SD2",
|
||||
"sd 2.1": "SD2",
|
||||
"sd 3": "SD3",
|
||||
"sd 3.5": "SD3",
|
||||
"sd 3.5 medium": "SD3",
|
||||
"sd 3.5 large": "SD3",
|
||||
"sd 3.5 large turbo": "SD3",
|
||||
"sdxl 1.0": "XL",
|
||||
"sdxl lightning": "XL",
|
||||
"sdxl hyper": "XL",
|
||||
"flux.1 d": "F1D",
|
||||
"flux.1 s": "F1S",
|
||||
"flux.1 krea": "F1KR",
|
||||
"flux.1 kontext": "F1KX",
|
||||
"flux.2 d": "F2D",
|
||||
"flux.2 klein 9b": "FK9",
|
||||
"flux.2 klein 9b-base": "FK9B",
|
||||
"flux.2 klein 4b": "FK4",
|
||||
"flux.2 klein 4b-base": "FK4B",
|
||||
"auraflow": "AF",
|
||||
"chroma": "CHR",
|
||||
"pixart a": "PXA",
|
||||
"pixart e": "PXE",
|
||||
"hunyuan 1": "HY",
|
||||
"hunyuan video": "HYV",
|
||||
"lumina": "L",
|
||||
"kolors": "KLR",
|
||||
"noobai": "NAI",
|
||||
"illustrious": "IL",
|
||||
"pony": "PONY",
|
||||
"pony v7": "PNY7",
|
||||
"hidream": "HID",
|
||||
"qwen": "QWEN",
|
||||
"zimageturbo": "ZIT",
|
||||
"zimagebase": "ZIB",
|
||||
"anima": "ANI",
|
||||
"svd": "SVD",
|
||||
"ltxv": "LTXV",
|
||||
"ltxv2": "LTV2",
|
||||
"ltxv 2.3": "LTX",
|
||||
"cogvideox": "CVX",
|
||||
"mochi": "MCHI",
|
||||
"wan video": "WAN",
|
||||
"wan video 1.3b t2v": "WAN",
|
||||
"wan video 14b t2v": "WAN",
|
||||
"wan video 14b i2v 480p": "WAN",
|
||||
"wan video 14b i2v 720p": "WAN",
|
||||
"wan video 2.2 ti2v-5b": "WAN",
|
||||
"wan video 2.2 t2v-a14b": "WAN",
|
||||
"wan video 2.2 i2v-a14b": "WAN",
|
||||
"wan video 2.5 t2v": "WAN",
|
||||
"wan video 2.5 i2v": "WAN",
|
||||
}
|
||||
|
||||
if lower_name in special_cases:
|
||||
return special_cases[lower_name]
|
||||
|
||||
# Try to extract acronym from version pattern
|
||||
# e.g., "Model Name 2.5" -> "MN25"
|
||||
version_match = re.search(r"(\d+(?:\.\d+)?)", name)
|
||||
version = version_match.group(1) if version_match else ""
|
||||
|
||||
# Remove version and common words
|
||||
words = re.sub(r"\d+(?:\.\d+)?", "", name)
|
||||
words = re.sub(
|
||||
r"\b(model|video|diffusion|checkpoint|textualinversion)\b",
|
||||
"",
|
||||
words,
|
||||
flags=re.I,
|
||||
)
|
||||
words = words.strip()
|
||||
|
||||
# Get first letters of remaining words
|
||||
tokens = re.findall(r"[A-Za-z]+", words)
|
||||
if tokens:
|
||||
# Build abbreviation from first letters
|
||||
abbrev = "".join(token[0].upper() for token in tokens)
|
||||
# Add version if present
|
||||
if version:
|
||||
# Clean version (remove dots for abbreviation)
|
||||
version_clean = version.replace(".", "")
|
||||
abbrev = abbrev[: 4 - len(version_clean)] + version_clean
|
||||
return abbrev[:4]
|
||||
|
||||
# Final fallback: just take first 4 alphanumeric chars
|
||||
alphanumeric = re.sub(r"[^A-Za-z0-9]", "", name)
|
||||
if alphanumeric:
|
||||
return alphanumeric[:4].upper()
|
||||
|
||||
return "OTH"
|
||||
|
||||
async def _fetch_from_civitai(self) -> Optional[Set[str]]:
|
||||
"""Fetch base models from Civitai API.
|
||||
|
||||
Returns:
|
||||
Set of base model names, or None if failed
|
||||
"""
|
||||
try:
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.make_request(
|
||||
"GET",
|
||||
self.CIVITAI_ENUMS_URL,
|
||||
use_auth=False, # enums endpoint doesn't require auth
|
||||
)
|
||||
|
||||
if not success:
|
||||
logger.warning(f"Failed to fetch enums from Civitai: {result}")
|
||||
return None
|
||||
|
||||
if isinstance(result, str):
|
||||
data = json.loads(result)
|
||||
else:
|
||||
data = result
|
||||
|
||||
# Extract base models from response
|
||||
base_models = set()
|
||||
|
||||
# Use ActiveBaseModel if available (recommended active models)
|
||||
if "ActiveBaseModel" in data:
|
||||
base_models.update(data["ActiveBaseModel"])
|
||||
logger.info(f"Fetched {len(base_models)} models from ActiveBaseModel")
|
||||
# Fallback to full BaseModel list
|
||||
elif "BaseModel" in data:
|
||||
base_models.update(data["BaseModel"])
|
||||
logger.info(f"Fetched {len(base_models)} models from BaseModel")
|
||||
else:
|
||||
logger.warning("No base model data found in Civitai response")
|
||||
return None
|
||||
|
||||
return base_models
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching from Civitai: {e}")
|
||||
return None
|
||||
|
||||
def _update_cache(self, remote_models: Set[str]) -> None:
|
||||
"""Update internal cache with fetched models.
|
||||
|
||||
Args:
|
||||
remote_models: Set of base model names from API
|
||||
"""
|
||||
self._cache = {
|
||||
"remote_models": sorted(remote_models),
|
||||
"hardcoded_models": sorted(self._hardcoded_models),
|
||||
}
|
||||
self._cache_timestamp = datetime.now(timezone.utc)
|
||||
logger.info(f"Cache updated with {len(remote_models)} remote models")
|
||||
|
||||
def _is_cache_valid(self) -> bool:
|
||||
"""Check if current cache is valid (not expired).
|
||||
|
||||
Returns:
|
||||
True if cache exists and is not expired
|
||||
"""
|
||||
if self._cache is None or self._cache_timestamp is None:
|
||||
return False
|
||||
|
||||
age = (datetime.now(timezone.utc) - self._cache_timestamp).total_seconds()
|
||||
return age <= self._cache_ttl
|
||||
|
||||
def _build_response(self, source: str) -> Dict[str, Any]:
|
||||
"""Build response dictionary.
|
||||
|
||||
Args:
|
||||
source: 'cache', 'api', or 'fallback'
|
||||
|
||||
Returns:
|
||||
Response dictionary
|
||||
"""
|
||||
if source == "fallback" or self._cache is None:
|
||||
# Use only hardcoded models
|
||||
merged = sorted(self._hardcoded_models)
|
||||
return {
|
||||
"models": merged,
|
||||
"source": source,
|
||||
"last_updated": None,
|
||||
"hardcoded_count": len(self._hardcoded_models),
|
||||
"remote_count": 0,
|
||||
"merged_count": len(merged),
|
||||
}
|
||||
|
||||
# Merge hardcoded and remote models
|
||||
remote_set = set(self._cache.get("remote_models", []))
|
||||
merged = sorted(self._hardcoded_models | remote_set)
|
||||
|
||||
return {
|
||||
"models": merged,
|
||||
"source": source,
|
||||
"last_updated": self._cache_timestamp.isoformat()
|
||||
if self._cache_timestamp
|
||||
else None,
|
||||
"hardcoded_count": len(self._hardcoded_models),
|
||||
"remote_count": len(remote_set),
|
||||
"merged_count": len(merged),
|
||||
}
|
||||
|
||||
def get_model_categories(self) -> Dict[str, List[str]]:
|
||||
"""Get categorized base models.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping category names to lists of model names
|
||||
"""
|
||||
# Define category patterns
|
||||
categories = {
|
||||
"Stable Diffusion 1.x": ["SD 1.4", "SD 1.5", "SD 1.5 LCM", "SD 1.5 Hyper"],
|
||||
"Stable Diffusion 2.x": ["SD 2.0", "SD 2.1"],
|
||||
"Stable Diffusion 3.x": [
|
||||
"SD 3",
|
||||
"SD 3.5",
|
||||
"SD 3.5 Medium",
|
||||
"SD 3.5 Large",
|
||||
"SD 3.5 Large Turbo",
|
||||
],
|
||||
"SDXL": ["SDXL 1.0", "SDXL Lightning", "SDXL Hyper"],
|
||||
"Flux Models": [
|
||||
"Flux.1 D",
|
||||
"Flux.1 S",
|
||||
"Flux.1 Krea",
|
||||
"Flux.1 Kontext",
|
||||
"Flux.2 D",
|
||||
"Flux.2 Klein 9B",
|
||||
"Flux.2 Klein 9B-base",
|
||||
"Flux.2 Klein 4B",
|
||||
"Flux.2 Klein 4B-base",
|
||||
],
|
||||
"Video Models": [
|
||||
"SVD",
|
||||
"LTXV",
|
||||
"LTXV2",
|
||||
"LTXV 2.3",
|
||||
"CogVideoX",
|
||||
"Mochi",
|
||||
"Hunyuan Video",
|
||||
"Wan Video",
|
||||
"Wan Video 1.3B t2v",
|
||||
"Wan Video 14B t2v",
|
||||
"Wan Video 14B i2v 480p",
|
||||
"Wan Video 14B i2v 720p",
|
||||
"Wan Video 2.2 TI2V-5B",
|
||||
"Wan Video 2.2 T2V-A14B",
|
||||
"Wan Video 2.2 I2V-A14B",
|
||||
"Wan Video 2.5 T2V",
|
||||
"Wan Video 2.5 I2V",
|
||||
],
|
||||
"Other Models": [
|
||||
"Illustrious",
|
||||
"Pony",
|
||||
"Pony V7",
|
||||
"HiDream",
|
||||
"Qwen",
|
||||
"AuraFlow",
|
||||
"Chroma",
|
||||
"ZImageTurbo",
|
||||
"ZImageBase",
|
||||
"PixArt a",
|
||||
"PixArt E",
|
||||
"Hunyuan 1",
|
||||
"Lumina",
|
||||
"Kolors",
|
||||
"NoobAI",
|
||||
"Anima",
|
||||
],
|
||||
}
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
# Convenience function for getting the singleton instance
|
||||
async def get_civitai_base_model_service() -> CivitaiBaseModelService:
|
||||
"""Get the singleton instance of CivitaiBaseModelService."""
|
||||
return await CivitaiBaseModelService.get_instance()
|
||||
@@ -490,14 +490,33 @@ class CivitaiClient:
|
||||
"""
|
||||
try:
|
||||
url = f"{self.base_url}/images?imageId={image_id}&nsfw=X"
|
||||
requested_id = int(image_id)
|
||||
|
||||
logger.debug(f"Fetching image info for ID: {image_id}")
|
||||
success, result = await self._make_request("GET", url, use_auth=True)
|
||||
|
||||
if success:
|
||||
if result and "items" in result and len(result["items"]) > 0:
|
||||
if result and "items" in result and isinstance(result["items"], list):
|
||||
items = result["items"]
|
||||
|
||||
# First, try to find the item with matching ID
|
||||
for item in items:
|
||||
if isinstance(item, dict) and item.get("id") == requested_id:
|
||||
logger.debug(f"Successfully fetched image info for ID: {image_id}")
|
||||
return result["items"][0]
|
||||
return item
|
||||
|
||||
# No matching ID found - log warning with details about returned items
|
||||
returned_ids = [
|
||||
item.get("id") for item in items
|
||||
if isinstance(item, dict) and "id" in item
|
||||
]
|
||||
logger.warning(
|
||||
f"CivitAI API returned no matching image for requested ID {image_id}. "
|
||||
f"Returned {len(items)} item(s) with IDs: {returned_ids}. "
|
||||
f"This may indicate the image was deleted, hidden, or there is a database lag."
|
||||
)
|
||||
return None
|
||||
|
||||
logger.warning(f"No image found with ID: {image_id}")
|
||||
return None
|
||||
|
||||
@@ -505,6 +524,10 @@ class CivitaiClient:
|
||||
return None
|
||||
except RateLimitError:
|
||||
raise
|
||||
except ValueError as e:
|
||||
error_msg = f"Invalid image ID format: {image_id}"
|
||||
logger.error(error_msg)
|
||||
return None
|
||||
except Exception as e:
|
||||
error_msg = f"Error fetching image info: {e}"
|
||||
logger.error(error_msg)
|
||||
|
||||
@@ -7,11 +7,13 @@ with category filtering and enriched results including post counts.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_EMBEDDED_COMMAND_PATTERN = re.compile(r"\s/\w")
|
||||
class CustomWordsService:
|
||||
"""Service for autocomplete via TagFTSIndex.
|
||||
|
||||
@@ -77,12 +79,28 @@ class CustomWordsService:
|
||||
Returns:
|
||||
List of dicts with tag_name, category, and post_count.
|
||||
"""
|
||||
normalized_search = search_term.strip()
|
||||
if not normalized_search:
|
||||
return []
|
||||
|
||||
# Prompt widgets should only send the active token, but guard against
|
||||
# accidental full-prompt queries reaching the FTS path.
|
||||
if (
|
||||
"__" in normalized_search
|
||||
or "," in normalized_search
|
||||
or ">" in normalized_search
|
||||
or "\n" in normalized_search
|
||||
or "\r" in normalized_search
|
||||
or _EMBEDDED_COMMAND_PATTERN.search(normalized_search)
|
||||
):
|
||||
logger.debug("Skipping prompt-like custom words query: %s", normalized_search)
|
||||
return []
|
||||
|
||||
tag_index = self._get_tag_index()
|
||||
if tag_index is not None:
|
||||
results = tag_index.search(
|
||||
search_term, categories=categories, limit=limit, offset=offset
|
||||
return tag_index.search(
|
||||
normalized_search, categories=categories, limit=limit, offset=offset
|
||||
)
|
||||
return results
|
||||
|
||||
logger.debug("TagFTSIndex not available, returning empty results")
|
||||
return []
|
||||
|
||||
@@ -13,13 +13,13 @@ from ..utils.models import LoraMetadata, CheckpointMetadata, EmbeddingMetadata
|
||||
from ..utils.constants import (
|
||||
CARD_PREVIEW_WIDTH,
|
||||
DIFFUSION_MODEL_BASE_MODELS,
|
||||
SUPPORTED_DOWNLOAD_SKIP_BASE_MODELS,
|
||||
VALID_LORA_TYPES,
|
||||
)
|
||||
from ..utils.civitai_utils import rewrite_preview_url
|
||||
from ..utils.preview_selection import select_preview_media
|
||||
from ..utils.preview_selection import resolve_mature_threshold, select_preview_media
|
||||
from ..utils.utils import sanitize_folder_name
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..utils.file_utils import calculate_sha256
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from .service_registry import ServiceRegistry
|
||||
from .settings_manager import get_settings_manager
|
||||
@@ -64,6 +64,19 @@ class DownloadManager:
|
||||
"""Get the checkpoint scanner from registry"""
|
||||
return await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
async def _has_been_downloaded(self, model_type: str, model_version_id: int) -> bool:
|
||||
try:
|
||||
history_service = await ServiceRegistry.get_downloaded_version_history_service()
|
||||
return await history_service.has_been_downloaded(model_type, model_version_id)
|
||||
except Exception as exc:
|
||||
logger.debug(
|
||||
"Failed to read download history for %s version %s: %s",
|
||||
model_type,
|
||||
model_version_id,
|
||||
exc,
|
||||
)
|
||||
return False
|
||||
|
||||
async def download_from_civitai(
|
||||
self,
|
||||
model_id: int = None,
|
||||
@@ -229,7 +242,9 @@ class DownloadManager:
|
||||
# Update status based on result
|
||||
if task_id in self._active_downloads:
|
||||
self._active_downloads[task_id]["status"] = (
|
||||
"completed" if result["success"] else "failed"
|
||||
result.get("status", "completed")
|
||||
if result["success"]
|
||||
else "failed"
|
||||
)
|
||||
if not result["success"]:
|
||||
self._active_downloads[task_id]["error"] = result.get(
|
||||
@@ -353,10 +368,105 @@ class DownloadManager:
|
||||
"error": f'Model type "{model_type_from_info}" is not supported for download',
|
||||
}
|
||||
|
||||
resolved_version_id = model_version_id
|
||||
raw_version_id = version_info.get("id")
|
||||
if resolved_version_id is None and raw_version_id is not None:
|
||||
try:
|
||||
resolved_version_id = int(raw_version_id)
|
||||
except (TypeError, ValueError):
|
||||
resolved_version_id = None
|
||||
|
||||
if (
|
||||
get_settings_manager().get_skip_previously_downloaded_model_versions()
|
||||
and resolved_version_id is not None
|
||||
and await self._has_been_downloaded(model_type, resolved_version_id)
|
||||
):
|
||||
file_name = ""
|
||||
files = version_info.get("files")
|
||||
if isinstance(files, list):
|
||||
primary_file = next(
|
||||
(
|
||||
file_info
|
||||
for file_info in files
|
||||
if isinstance(file_info, dict) and file_info.get("primary")
|
||||
),
|
||||
None,
|
||||
)
|
||||
selected_file = primary_file
|
||||
if selected_file is None:
|
||||
selected_file = next(
|
||||
(file_info for file_info in files if isinstance(file_info, dict)),
|
||||
None,
|
||||
)
|
||||
if isinstance(selected_file, dict):
|
||||
raw_file_name = selected_file.get("name", "")
|
||||
if isinstance(raw_file_name, str):
|
||||
file_name = raw_file_name.strip()
|
||||
|
||||
message = (
|
||||
f"Skipped download for '{file_name or version_info.get('name') or f'model_version:{resolved_version_id}'}' "
|
||||
f"because version {resolved_version_id} was already downloaded before"
|
||||
)
|
||||
logger.info(message)
|
||||
return {
|
||||
"success": True,
|
||||
"skipped": True,
|
||||
"status": "skipped",
|
||||
"reason": "previously_downloaded_version",
|
||||
"message": message,
|
||||
"model_version_id": resolved_version_id,
|
||||
"file_name": file_name,
|
||||
"download_id": download_id,
|
||||
}
|
||||
|
||||
excluded_base_models = get_settings_manager().get_download_skip_base_models()
|
||||
base_model_value = version_info.get("baseModel", "")
|
||||
if (
|
||||
isinstance(base_model_value, str)
|
||||
and base_model_value in SUPPORTED_DOWNLOAD_SKIP_BASE_MODELS
|
||||
and base_model_value in excluded_base_models
|
||||
):
|
||||
file_name = ""
|
||||
files = version_info.get("files")
|
||||
if isinstance(files, list):
|
||||
primary_file = next(
|
||||
(
|
||||
file_info
|
||||
for file_info in files
|
||||
if isinstance(file_info, dict) and file_info.get("primary")
|
||||
),
|
||||
None,
|
||||
)
|
||||
selected_file = primary_file
|
||||
if selected_file is None:
|
||||
selected_file = next(
|
||||
(file_info for file_info in files if isinstance(file_info, dict)),
|
||||
None,
|
||||
)
|
||||
if isinstance(selected_file, dict):
|
||||
raw_file_name = selected_file.get("name", "")
|
||||
if isinstance(raw_file_name, str):
|
||||
file_name = raw_file_name.strip()
|
||||
|
||||
message = (
|
||||
f"Skipped download for '{file_name or version_info.get('name') or f'model_version:{model_version_id or model_id}'}' "
|
||||
f"because base model '{base_model_value}' is excluded in settings"
|
||||
)
|
||||
logger.info(message)
|
||||
return {
|
||||
"success": True,
|
||||
"skipped": True,
|
||||
"status": "skipped",
|
||||
"reason": "base_model_excluded",
|
||||
"message": message,
|
||||
"base_model": base_model_value,
|
||||
"file_name": file_name,
|
||||
"download_id": download_id,
|
||||
}
|
||||
|
||||
# Check if this checkpoint should be treated as a diffusion model based on baseModel
|
||||
is_diffusion_model = False
|
||||
if model_type == "checkpoint":
|
||||
base_model_value = version_info.get("baseModel", "")
|
||||
if base_model_value in DIFFUSION_MODEL_BASE_MODELS:
|
||||
is_diffusion_model = True
|
||||
logger.info(
|
||||
@@ -594,6 +704,13 @@ class DownloadManager:
|
||||
or version_info.get("modelId")
|
||||
or (version_info.get("model") or {}).get("id")
|
||||
)
|
||||
await self._record_downloaded_version_history(
|
||||
model_type,
|
||||
resolved_model_id,
|
||||
version_info,
|
||||
model_version_id,
|
||||
save_path,
|
||||
)
|
||||
await self._sync_downloaded_version(
|
||||
model_type,
|
||||
resolved_model_id,
|
||||
@@ -623,6 +740,55 @@ class DownloadManager:
|
||||
}
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
async def _record_downloaded_version_history(
|
||||
self,
|
||||
model_type: str,
|
||||
model_id_value,
|
||||
version_info: Dict,
|
||||
fallback_version_id=None,
|
||||
file_path: str | None = None,
|
||||
) -> None:
|
||||
try:
|
||||
history_service = await ServiceRegistry.get_downloaded_version_history_service()
|
||||
except Exception as exc:
|
||||
logger.debug(
|
||||
"Skipping download history sync; failed to acquire history service: %s",
|
||||
exc,
|
||||
)
|
||||
return
|
||||
|
||||
if history_service is None:
|
||||
return
|
||||
|
||||
resolved_model_id = model_id_value
|
||||
if resolved_model_id is None:
|
||||
resolved_model_id = version_info.get("modelId")
|
||||
if resolved_model_id is None:
|
||||
model_info = version_info.get("model")
|
||||
if isinstance(model_info, dict):
|
||||
resolved_model_id = model_info.get("id")
|
||||
|
||||
version_id = version_info.get("id")
|
||||
if version_id is None:
|
||||
version_id = fallback_version_id
|
||||
|
||||
try:
|
||||
await history_service.mark_downloaded(
|
||||
model_type,
|
||||
int(version_id),
|
||||
model_id=int(resolved_model_id) if resolved_model_id is not None else None,
|
||||
source="download",
|
||||
file_path=file_path,
|
||||
)
|
||||
except (TypeError, ValueError):
|
||||
logger.debug(
|
||||
"Skipping download history sync; invalid identifiers model=%s version=%s",
|
||||
resolved_model_id,
|
||||
version_id,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to sync download history for %s: %s", model_type, exc)
|
||||
|
||||
async def _sync_downloaded_version(
|
||||
self,
|
||||
model_type: str,
|
||||
@@ -847,9 +1013,13 @@ class DownloadManager:
|
||||
blur_mature_content = bool(
|
||||
settings_manager.get("blur_mature_content", True)
|
||||
)
|
||||
mature_threshold = resolve_mature_threshold(
|
||||
{"mature_blur_level": settings_manager.get("mature_blur_level", "R")}
|
||||
)
|
||||
selected_image, nsfw_level = select_preview_media(
|
||||
images,
|
||||
blur_mature_content=blur_mature_content,
|
||||
mature_threshold=mature_threshold,
|
||||
)
|
||||
|
||||
preview_url = selected_image.get("url") if selected_image else None
|
||||
@@ -965,11 +1135,12 @@ class DownloadManager:
|
||||
for download_url in download_urls:
|
||||
use_auth = download_url.startswith("https://civitai.com/api/download/")
|
||||
download_kwargs = {
|
||||
"progress_callback": lambda progress,
|
||||
snapshot=None: self._handle_download_progress(
|
||||
"progress_callback": lambda progress, snapshot=None: (
|
||||
self._handle_download_progress(
|
||||
progress,
|
||||
progress_callback,
|
||||
snapshot,
|
||||
)
|
||||
),
|
||||
"use_auth": use_auth, # Only use authentication for Civitai downloads
|
||||
}
|
||||
@@ -1238,7 +1409,8 @@ class DownloadManager:
|
||||
entry.file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
# Update size to actual downloaded file size
|
||||
entry.size = os.path.getsize(file_path)
|
||||
entry.sha256 = await calculate_sha256(file_path)
|
||||
# Use SHA256 from API metadata (already set in from_civitai_info)
|
||||
# Do not recalculate to avoid blocking during ComfyUI execution
|
||||
entries.append(entry)
|
||||
|
||||
return entries
|
||||
|
||||
313
py/services/downloaded_version_history_service.py
Normal file
313
py/services/downloaded_version_history_service.py
Normal file
@@ -0,0 +1,313 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sqlite3
|
||||
import time
|
||||
from typing import Iterable, Mapping, Optional, Sequence
|
||||
|
||||
from ..utils.cache_paths import get_cache_base_dir
|
||||
from .settings_manager import get_settings_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _normalize_model_type(model_type: str | None) -> Optional[str]:
|
||||
if not isinstance(model_type, str):
|
||||
return None
|
||||
normalized = model_type.strip().lower()
|
||||
if normalized in {"lora", "locon", "dora"}:
|
||||
return "lora"
|
||||
if normalized == "checkpoint":
|
||||
return "checkpoint"
|
||||
if normalized in {"embedding", "textualinversion"}:
|
||||
return "embedding"
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_int(value) -> Optional[int]:
|
||||
try:
|
||||
if value is None:
|
||||
return None
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _resolve_database_path() -> str:
|
||||
base_dir = get_cache_base_dir(create=True)
|
||||
history_dir = os.path.join(base_dir, "download_history")
|
||||
os.makedirs(history_dir, exist_ok=True)
|
||||
return os.path.join(history_dir, "downloaded_versions.sqlite")
|
||||
|
||||
|
||||
class DownloadedVersionHistoryService:
|
||||
_SCHEMA = """
|
||||
CREATE TABLE IF NOT EXISTS downloaded_model_versions (
|
||||
model_type TEXT NOT NULL,
|
||||
version_id INTEGER NOT NULL,
|
||||
model_id INTEGER,
|
||||
first_seen_at REAL NOT NULL,
|
||||
last_seen_at REAL NOT NULL,
|
||||
source TEXT NOT NULL,
|
||||
last_file_path TEXT,
|
||||
last_library_name TEXT,
|
||||
is_deleted_override INTEGER NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY (model_type, version_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_downloaded_model_versions_model
|
||||
ON downloaded_model_versions(model_type, model_id);
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: str | None = None, *, settings_manager=None) -> None:
|
||||
self._db_path = db_path or _resolve_database_path()
|
||||
self._settings = settings_manager or get_settings_manager()
|
||||
self._lock = asyncio.Lock()
|
||||
self._schema_initialized = False
|
||||
self._ensure_directory()
|
||||
self._initialize_schema()
|
||||
|
||||
def _ensure_directory(self) -> None:
|
||||
directory = os.path.dirname(self._db_path)
|
||||
if directory:
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
|
||||
def _connect(self) -> sqlite3.Connection:
|
||||
conn = sqlite3.connect(self._db_path, check_same_thread=False)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def _initialize_schema(self) -> None:
|
||||
if self._schema_initialized:
|
||||
return
|
||||
with self._connect() as conn:
|
||||
conn.executescript(self._SCHEMA)
|
||||
conn.commit()
|
||||
self._schema_initialized = True
|
||||
|
||||
def get_database_path(self) -> str:
|
||||
return self._db_path
|
||||
|
||||
def _get_active_library_name(self) -> str | None:
|
||||
try:
|
||||
value = self._settings.get_active_library_name()
|
||||
except Exception:
|
||||
return None
|
||||
return value or None
|
||||
|
||||
async def mark_downloaded(
|
||||
self,
|
||||
model_type: str,
|
||||
version_id: int,
|
||||
*,
|
||||
model_id: int | None = None,
|
||||
source: str = "manual",
|
||||
file_path: str | None = None,
|
||||
library_name: str | None = None,
|
||||
) -> None:
|
||||
normalized_type = _normalize_model_type(model_type)
|
||||
normalized_version_id = _normalize_int(version_id)
|
||||
normalized_model_id = _normalize_int(model_id)
|
||||
if normalized_type is None or normalized_version_id is None:
|
||||
return
|
||||
|
||||
active_library_name = library_name or self._get_active_library_name()
|
||||
timestamp = time.time()
|
||||
|
||||
async with self._lock:
|
||||
with self._connect() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO downloaded_model_versions (
|
||||
model_type, version_id, model_id, first_seen_at, last_seen_at,
|
||||
source, last_file_path, last_library_name, is_deleted_override
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, 0)
|
||||
ON CONFLICT(model_type, version_id) DO UPDATE SET
|
||||
model_id = COALESCE(excluded.model_id, downloaded_model_versions.model_id),
|
||||
last_seen_at = excluded.last_seen_at,
|
||||
source = excluded.source,
|
||||
last_file_path = COALESCE(excluded.last_file_path, downloaded_model_versions.last_file_path),
|
||||
last_library_name = COALESCE(excluded.last_library_name, downloaded_model_versions.last_library_name),
|
||||
is_deleted_override = 0
|
||||
""",
|
||||
(
|
||||
normalized_type,
|
||||
normalized_version_id,
|
||||
normalized_model_id,
|
||||
timestamp,
|
||||
timestamp,
|
||||
source,
|
||||
file_path,
|
||||
active_library_name,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
async def mark_downloaded_bulk(
|
||||
self,
|
||||
model_type: str,
|
||||
records: Sequence[Mapping[str, object]],
|
||||
*,
|
||||
source: str = "scan",
|
||||
library_name: str | None = None,
|
||||
) -> None:
|
||||
normalized_type = _normalize_model_type(model_type)
|
||||
if normalized_type is None or not records:
|
||||
return
|
||||
|
||||
timestamp = time.time()
|
||||
active_library_name = library_name or self._get_active_library_name()
|
||||
payload: list[tuple[object, ...]] = []
|
||||
for record in records:
|
||||
version_id = _normalize_int(record.get("version_id"))
|
||||
if version_id is None:
|
||||
continue
|
||||
payload.append(
|
||||
(
|
||||
normalized_type,
|
||||
version_id,
|
||||
_normalize_int(record.get("model_id")),
|
||||
timestamp,
|
||||
timestamp,
|
||||
source,
|
||||
record.get("file_path"),
|
||||
active_library_name,
|
||||
)
|
||||
)
|
||||
|
||||
if not payload:
|
||||
return
|
||||
|
||||
async with self._lock:
|
||||
with self._connect() as conn:
|
||||
conn.executemany(
|
||||
"""
|
||||
INSERT INTO downloaded_model_versions (
|
||||
model_type, version_id, model_id, first_seen_at, last_seen_at,
|
||||
source, last_file_path, last_library_name, is_deleted_override
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, 0)
|
||||
ON CONFLICT(model_type, version_id) DO UPDATE SET
|
||||
model_id = COALESCE(excluded.model_id, downloaded_model_versions.model_id),
|
||||
last_seen_at = excluded.last_seen_at,
|
||||
source = excluded.source,
|
||||
last_file_path = COALESCE(excluded.last_file_path, downloaded_model_versions.last_file_path),
|
||||
last_library_name = COALESCE(excluded.last_library_name, downloaded_model_versions.last_library_name),
|
||||
is_deleted_override = 0
|
||||
""",
|
||||
payload,
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
async def mark_not_downloaded(self, model_type: str, version_id: int) -> None:
|
||||
normalized_type = _normalize_model_type(model_type)
|
||||
normalized_version_id = _normalize_int(version_id)
|
||||
if normalized_type is None or normalized_version_id is None:
|
||||
return
|
||||
|
||||
timestamp = time.time()
|
||||
|
||||
async with self._lock:
|
||||
with self._connect() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO downloaded_model_versions (
|
||||
model_type, version_id, model_id, first_seen_at, last_seen_at,
|
||||
source, last_file_path, last_library_name, is_deleted_override
|
||||
) VALUES (?, ?, NULL, ?, ?, 'manual', NULL, ?, 1)
|
||||
ON CONFLICT(model_type, version_id) DO UPDATE SET
|
||||
last_seen_at = excluded.last_seen_at,
|
||||
source = excluded.source,
|
||||
last_library_name = COALESCE(excluded.last_library_name, downloaded_model_versions.last_library_name),
|
||||
is_deleted_override = 1
|
||||
""",
|
||||
(
|
||||
normalized_type,
|
||||
normalized_version_id,
|
||||
timestamp,
|
||||
timestamp,
|
||||
self._get_active_library_name(),
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
async def has_been_downloaded(self, model_type: str, version_id: int) -> bool:
|
||||
normalized_type = _normalize_model_type(model_type)
|
||||
normalized_version_id = _normalize_int(version_id)
|
||||
if normalized_type is None or normalized_version_id is None:
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
with self._connect() as conn:
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT is_deleted_override
|
||||
FROM downloaded_model_versions
|
||||
WHERE model_type = ? AND version_id = ?
|
||||
""",
|
||||
(normalized_type, normalized_version_id),
|
||||
).fetchone()
|
||||
return bool(row) and not bool(row["is_deleted_override"])
|
||||
|
||||
async def get_downloaded_version_ids(
|
||||
self, model_type: str, model_id: int
|
||||
) -> list[int]:
|
||||
normalized_type = _normalize_model_type(model_type)
|
||||
normalized_model_id = _normalize_int(model_id)
|
||||
if normalized_type is None or normalized_model_id is None:
|
||||
return []
|
||||
|
||||
async with self._lock:
|
||||
with self._connect() as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT version_id
|
||||
FROM downloaded_model_versions
|
||||
WHERE model_type = ? AND model_id = ? AND is_deleted_override = 0
|
||||
ORDER BY version_id ASC
|
||||
""",
|
||||
(normalized_type, normalized_model_id),
|
||||
).fetchall()
|
||||
return [int(row["version_id"]) for row in rows]
|
||||
|
||||
async def get_downloaded_version_ids_bulk(
|
||||
self, model_type: str, model_ids: Iterable[int]
|
||||
) -> dict[int, set[int]]:
|
||||
normalized_type = _normalize_model_type(model_type)
|
||||
if normalized_type is None:
|
||||
return {}
|
||||
|
||||
normalized_model_ids = sorted(
|
||||
{
|
||||
value
|
||||
for value in (_normalize_int(model_id) for model_id in model_ids)
|
||||
if value is not None
|
||||
}
|
||||
)
|
||||
if not normalized_model_ids:
|
||||
return {}
|
||||
|
||||
placeholders = ", ".join(["?"] * len(normalized_model_ids))
|
||||
params: list[object] = [normalized_type, *normalized_model_ids]
|
||||
|
||||
async with self._lock:
|
||||
with self._connect() as conn:
|
||||
rows = conn.execute(
|
||||
f"""
|
||||
SELECT model_id, version_id
|
||||
FROM downloaded_model_versions
|
||||
WHERE model_type = ?
|
||||
AND model_id IN ({placeholders})
|
||||
AND is_deleted_override = 0
|
||||
""",
|
||||
params,
|
||||
).fetchall()
|
||||
|
||||
result: dict[int, set[int]] = {}
|
||||
for row in rows:
|
||||
model_id = _normalize_int(row["model_id"])
|
||||
version_id = _normalize_int(row["version_id"])
|
||||
if model_id is None or version_id is None:
|
||||
continue
|
||||
result.setdefault(model_id, set()).add(version_id)
|
||||
return result
|
||||
@@ -44,7 +44,9 @@ class DownloadStreamControl:
|
||||
self._event.set()
|
||||
self._reconnect_requested = False
|
||||
self.last_progress_timestamp: Optional[float] = None
|
||||
self.stall_timeout: float = float(stall_timeout) if stall_timeout is not None else 120.0
|
||||
self.stall_timeout: float = (
|
||||
float(stall_timeout) if stall_timeout is not None else 120.0
|
||||
)
|
||||
|
||||
def is_set(self) -> bool:
|
||||
return self._event.is_set()
|
||||
@@ -85,7 +87,9 @@ class DownloadStreamControl:
|
||||
self.last_progress_timestamp = timestamp or datetime.now().timestamp()
|
||||
self._reconnect_requested = False
|
||||
|
||||
def time_since_last_progress(self, *, now: Optional[float] = None) -> Optional[float]:
|
||||
def time_since_last_progress(
|
||||
self, *, now: Optional[float] = None
|
||||
) -> Optional[float]:
|
||||
if self.last_progress_timestamp is None:
|
||||
return None
|
||||
reference = now if now is not None else datetime.now().timestamp()
|
||||
@@ -120,7 +124,7 @@ class Downloader:
|
||||
def __init__(self):
|
||||
"""Initialize the downloader with optimal settings"""
|
||||
# Check if already initialized for singleton pattern
|
||||
if hasattr(self, '_initialized'):
|
||||
if hasattr(self, "_initialized"):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
@@ -131,7 +135,9 @@ class Downloader:
|
||||
self._session_lock = asyncio.Lock()
|
||||
|
||||
# Configuration
|
||||
self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better throughput
|
||||
self.chunk_size = (
|
||||
16 * 1024 * 1024
|
||||
) # 16MB chunks to balance I/O reduction and memory usage
|
||||
self.max_retries = 5
|
||||
self.base_delay = 2.0 # Base delay for exponential backoff
|
||||
self.session_timeout = 300 # 5 minutes
|
||||
@@ -139,10 +145,10 @@ class Downloader:
|
||||
|
||||
# Default headers
|
||||
self.default_headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0',
|
||||
"User-Agent": "ComfyUI-LoRA-Manager/1.0",
|
||||
# Explicitly request uncompressed payloads so aiohttp doesn't need optional
|
||||
# decoders (e.g. zstandard) that may be missing in runtime environments.
|
||||
'Accept-Encoding': 'identity',
|
||||
"Accept-Encoding": "identity",
|
||||
}
|
||||
|
||||
@property
|
||||
@@ -158,7 +164,7 @@ class Downloader:
|
||||
@property
|
||||
def proxy_url(self) -> Optional[str]:
|
||||
"""Get the current proxy URL (initialize if needed)"""
|
||||
if not hasattr(self, '_proxy_url'):
|
||||
if not hasattr(self, "_proxy_url"):
|
||||
self._proxy_url = None
|
||||
return self._proxy_url
|
||||
|
||||
@@ -169,14 +175,14 @@ class Downloader:
|
||||
|
||||
try:
|
||||
settings_manager = get_settings_manager()
|
||||
settings_timeout = settings_manager.get('download_stall_timeout_seconds')
|
||||
settings_timeout = settings_manager.get("download_stall_timeout_seconds")
|
||||
except Exception as exc: # pragma: no cover - defensive guard
|
||||
logger.debug("Failed to read stall timeout from settings: %s", exc)
|
||||
|
||||
raw_value = (
|
||||
settings_timeout
|
||||
if settings_timeout not in (None, "")
|
||||
else os.environ.get('COMFYUI_DOWNLOAD_STALL_TIMEOUT')
|
||||
else os.environ.get("COMFYUI_DOWNLOAD_STALL_TIMEOUT")
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -191,11 +197,13 @@ class Downloader:
|
||||
if self._session is None:
|
||||
return True
|
||||
|
||||
if not hasattr(self, '_session_created_at') or self._session_created_at is None:
|
||||
if not hasattr(self, "_session_created_at") or self._session_created_at is None:
|
||||
return True
|
||||
|
||||
# Refresh if session is older than timeout
|
||||
if (datetime.now() - self._session_created_at).total_seconds() > self.session_timeout:
|
||||
if (
|
||||
datetime.now() - self._session_created_at
|
||||
).total_seconds() > self.session_timeout:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -217,12 +225,12 @@ class Downloader:
|
||||
# Check for app-level proxy settings
|
||||
proxy_url = None
|
||||
settings_manager = get_settings_manager()
|
||||
if settings_manager.get('proxy_enabled', False):
|
||||
proxy_host = settings_manager.get('proxy_host', '').strip()
|
||||
proxy_port = settings_manager.get('proxy_port', '').strip()
|
||||
proxy_type = settings_manager.get('proxy_type', 'http').lower()
|
||||
proxy_username = settings_manager.get('proxy_username', '').strip()
|
||||
proxy_password = settings_manager.get('proxy_password', '').strip()
|
||||
if settings_manager.get("proxy_enabled", False):
|
||||
proxy_host = settings_manager.get("proxy_host", "").strip()
|
||||
proxy_port = settings_manager.get("proxy_port", "").strip()
|
||||
proxy_type = settings_manager.get("proxy_type", "http").lower()
|
||||
proxy_username = settings_manager.get("proxy_username", "").strip()
|
||||
proxy_password = settings_manager.get("proxy_password", "").strip()
|
||||
|
||||
if proxy_host and proxy_port:
|
||||
# Build proxy URL
|
||||
@@ -231,37 +239,46 @@ class Downloader:
|
||||
else:
|
||||
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
|
||||
|
||||
logger.debug(f"Using app-level proxy: {proxy_type}://{proxy_host}:{proxy_port}")
|
||||
logger.debug(
|
||||
f"Using app-level proxy: {proxy_type}://{proxy_host}:{proxy_port}"
|
||||
)
|
||||
logger.debug("Proxy mode: app-level proxy is active.")
|
||||
else:
|
||||
logger.debug("Proxy mode: system-level proxy (trust_env) will be used if configured in environment.")
|
||||
logger.debug(
|
||||
"Proxy mode: system-level proxy (trust_env) will be used if configured in environment."
|
||||
)
|
||||
# Optimize TCP connection parameters
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=8, # Concurrent connections
|
||||
ttl_dns_cache=300, # DNS cache timeout
|
||||
force_close=False, # Keep connections for reuse
|
||||
enable_cleanup_closed=True
|
||||
enable_cleanup_closed=True,
|
||||
)
|
||||
|
||||
# Configure timeout parameters
|
||||
timeout = aiohttp.ClientTimeout(
|
||||
total=None, # No total timeout for large downloads
|
||||
connect=60, # Connection timeout
|
||||
sock_read=300 # 5 minute socket read timeout
|
||||
sock_read=300, # 5 minute socket read timeout
|
||||
)
|
||||
|
||||
self._session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=proxy_url is None, # Only use system proxy if no app-level proxy is set
|
||||
timeout=timeout
|
||||
trust_env=proxy_url
|
||||
is None, # Only use system proxy if no app-level proxy is set
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# Store proxy URL for use in requests
|
||||
self._proxy_url = proxy_url
|
||||
self._session_created_at = datetime.now()
|
||||
|
||||
logger.debug("Created new HTTP session with proxy settings. App-level proxy: %s, System-level proxy (trust_env): %s", bool(proxy_url), proxy_url is None)
|
||||
logger.debug(
|
||||
"Created new HTTP session with proxy settings. App-level proxy: %s, System-level proxy (trust_env): %s",
|
||||
bool(proxy_url),
|
||||
proxy_url is None,
|
||||
)
|
||||
|
||||
def _get_auth_headers(self, use_auth: bool = False) -> Dict[str, str]:
|
||||
"""Get headers with optional authentication"""
|
||||
@@ -270,10 +287,10 @@ class Downloader:
|
||||
if use_auth:
|
||||
# Add CivitAI API key if available
|
||||
settings_manager = get_settings_manager()
|
||||
api_key = settings_manager.get('civitai_api_key')
|
||||
api_key = settings_manager.get("civitai_api_key")
|
||||
if api_key:
|
||||
headers['Authorization'] = f'Bearer {api_key}'
|
||||
headers['Content-Type'] = 'application/json'
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
return headers
|
||||
|
||||
@@ -303,7 +320,7 @@ class Downloader:
|
||||
Tuple[bool, str]: (success, save_path or error message)
|
||||
"""
|
||||
retry_count = 0
|
||||
part_path = save_path + '.part' if allow_resume else save_path
|
||||
part_path = save_path + ".part" if allow_resume else save_path
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
@@ -323,50 +340,71 @@ class Downloader:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[download_file] Using app-level proxy: {self.proxy_url}")
|
||||
logger.debug(
|
||||
f"[download_file] Using app-level proxy: {self.proxy_url}"
|
||||
)
|
||||
else:
|
||||
logger.debug("[download_file] Using system-level proxy (trust_env) if configured.")
|
||||
logger.debug(
|
||||
"[download_file] Using system-level proxy (trust_env) if configured."
|
||||
)
|
||||
|
||||
# Add Range header for resume if we have partial data
|
||||
request_headers = headers.copy()
|
||||
if allow_resume and resume_offset > 0:
|
||||
request_headers['Range'] = f'bytes={resume_offset}-'
|
||||
request_headers["Range"] = f"bytes={resume_offset}-"
|
||||
|
||||
# Disable compression for better chunked downloads
|
||||
request_headers['Accept-Encoding'] = 'identity'
|
||||
request_headers["Accept-Encoding"] = "identity"
|
||||
|
||||
logger.debug(f"Download attempt {retry_count + 1}/{self.max_retries + 1} from: {url}")
|
||||
logger.debug(
|
||||
f"Download attempt {retry_count + 1}/{self.max_retries + 1} from: {url}"
|
||||
)
|
||||
if resume_offset > 0:
|
||||
logger.debug(f"Requesting range from byte {resume_offset}")
|
||||
|
||||
async with session.get(url, headers=request_headers, allow_redirects=True, proxy=self.proxy_url) as response:
|
||||
async with session.get(
|
||||
url,
|
||||
headers=request_headers,
|
||||
allow_redirects=True,
|
||||
proxy=self.proxy_url,
|
||||
) as response:
|
||||
# Handle different response codes
|
||||
if response.status == 200:
|
||||
# Full content response
|
||||
if resume_offset > 0:
|
||||
# Server doesn't support ranges, restart from beginning
|
||||
logger.warning("Server doesn't support range requests, restarting download")
|
||||
logger.warning(
|
||||
"Server doesn't support range requests, restarting download"
|
||||
)
|
||||
resume_offset = 0
|
||||
if os.path.exists(part_path):
|
||||
os.remove(part_path)
|
||||
elif response.status == 206:
|
||||
# Partial content response (resume successful)
|
||||
content_range = response.headers.get('Content-Range')
|
||||
content_range = response.headers.get("Content-Range")
|
||||
if content_range:
|
||||
# Parse total size from Content-Range header (e.g., "bytes 1024-2047/2048")
|
||||
range_parts = content_range.split('/')
|
||||
range_parts = content_range.split("/")
|
||||
if len(range_parts) == 2:
|
||||
total_size = int(range_parts[1])
|
||||
logger.info(f"Successfully resumed download from byte {resume_offset}")
|
||||
logger.info(
|
||||
f"Successfully resumed download from byte {resume_offset}"
|
||||
)
|
||||
elif response.status == 416:
|
||||
# Range not satisfiable - file might be complete or corrupted
|
||||
if allow_resume and os.path.exists(part_path):
|
||||
part_size = os.path.getsize(part_path)
|
||||
logger.warning(f"Range not satisfiable. Part file size: {part_size}")
|
||||
logger.warning(
|
||||
f"Range not satisfiable. Part file size: {part_size}"
|
||||
)
|
||||
# Try to get actual file size
|
||||
head_response = await session.head(url, headers=headers, proxy=self.proxy_url)
|
||||
head_response = await session.head(
|
||||
url, headers=headers, proxy=self.proxy_url
|
||||
)
|
||||
if head_response.status == 200:
|
||||
actual_size = int(head_response.headers.get('content-length', 0))
|
||||
actual_size = int(
|
||||
head_response.headers.get("content-length", 0)
|
||||
)
|
||||
if part_size == actual_size:
|
||||
# File is complete, just rename it
|
||||
if allow_resume:
|
||||
@@ -388,21 +426,36 @@ class Downloader:
|
||||
resume_offset = 0
|
||||
continue
|
||||
elif response.status == 401:
|
||||
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
|
||||
return False, "Invalid or missing API key, or early access restriction."
|
||||
logger.warning(
|
||||
f"Unauthorized access to resource: {url} (Status 401)"
|
||||
)
|
||||
return (
|
||||
False,
|
||||
"Invalid or missing API key, or early access restriction.",
|
||||
)
|
||||
elif response.status == 403:
|
||||
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
|
||||
return False, "Access forbidden: You don't have permission to download this file."
|
||||
logger.warning(
|
||||
f"Forbidden access to resource: {url} (Status 403)"
|
||||
)
|
||||
return (
|
||||
False,
|
||||
"Access forbidden: You don't have permission to download this file.",
|
||||
)
|
||||
elif response.status == 404:
|
||||
logger.warning(f"Resource not found: {url} (Status 404)")
|
||||
return False, "File not found - the download link may be invalid or expired."
|
||||
return (
|
||||
False,
|
||||
"File not found - the download link may be invalid or expired.",
|
||||
)
|
||||
else:
|
||||
logger.error(f"Download failed for {url} with status {response.status}")
|
||||
logger.error(
|
||||
f"Download failed for {url} with status {response.status}"
|
||||
)
|
||||
return False, f"Download failed with status {response.status}"
|
||||
|
||||
# Get total file size for progress calculation (if not set from Content-Range)
|
||||
if total_size == 0:
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
total_size = int(response.headers.get("content-length", 0))
|
||||
if response.status == 206:
|
||||
# For partial content, add the offset to get total file size
|
||||
total_size += resume_offset
|
||||
@@ -417,7 +470,7 @@ class Downloader:
|
||||
|
||||
# Stream download to file with progress updates
|
||||
loop = asyncio.get_running_loop()
|
||||
mode = 'ab' if (allow_resume and resume_offset > 0) else 'wb'
|
||||
mode = "ab" if (allow_resume and resume_offset > 0) else "wb"
|
||||
control = pause_event
|
||||
|
||||
if control is not None:
|
||||
@@ -425,7 +478,9 @@ class Downloader:
|
||||
|
||||
with open(part_path, mode) as f:
|
||||
while True:
|
||||
active_stall_timeout = control.stall_timeout if control else self.stall_timeout
|
||||
active_stall_timeout = (
|
||||
control.stall_timeout if control else self.stall_timeout
|
||||
)
|
||||
|
||||
if control is not None:
|
||||
if control.is_paused():
|
||||
@@ -437,7 +492,9 @@ class Downloader:
|
||||
"Reconnect requested after resume"
|
||||
)
|
||||
elif control.consume_reconnect_request():
|
||||
raise DownloadRestartRequested("Reconnect requested")
|
||||
raise DownloadRestartRequested(
|
||||
"Reconnect requested"
|
||||
)
|
||||
|
||||
try:
|
||||
chunk = await asyncio.wait_for(
|
||||
@@ -466,22 +523,32 @@ class Downloader:
|
||||
control.mark_progress(timestamp=now.timestamp())
|
||||
|
||||
# Limit progress update frequency to reduce overhead
|
||||
time_diff = (now - last_progress_report_time).total_seconds()
|
||||
time_diff = (
|
||||
now - last_progress_report_time
|
||||
).total_seconds()
|
||||
|
||||
if progress_callback and time_diff >= 1.0:
|
||||
progress_samples.append((now, current_size))
|
||||
cutoff = now - timedelta(seconds=5)
|
||||
while progress_samples and progress_samples[0][0] < cutoff:
|
||||
while (
|
||||
progress_samples and progress_samples[0][0] < cutoff
|
||||
):
|
||||
progress_samples.popleft()
|
||||
|
||||
percent = (current_size / total_size) * 100 if total_size else 0.0
|
||||
percent = (
|
||||
(current_size / total_size) * 100
|
||||
if total_size
|
||||
else 0.0
|
||||
)
|
||||
bytes_per_second = 0.0
|
||||
if len(progress_samples) >= 2:
|
||||
first_time, first_bytes = progress_samples[0]
|
||||
last_time, last_bytes = progress_samples[-1]
|
||||
elapsed = (last_time - first_time).total_seconds()
|
||||
if elapsed > 0:
|
||||
bytes_per_second = (last_bytes - first_bytes) / elapsed
|
||||
bytes_per_second = (
|
||||
last_bytes - first_bytes
|
||||
) / elapsed
|
||||
|
||||
progress_snapshot = DownloadProgress(
|
||||
percent_complete=percent,
|
||||
@@ -491,21 +558,23 @@ class Downloader:
|
||||
timestamp=now.timestamp(),
|
||||
)
|
||||
|
||||
await self._dispatch_progress_callback(progress_callback, progress_snapshot)
|
||||
await self._dispatch_progress_callback(
|
||||
progress_callback, progress_snapshot
|
||||
)
|
||||
last_progress_report_time = now
|
||||
|
||||
# Download completed successfully
|
||||
# Verify file size integrity before finalizing
|
||||
final_size = os.path.getsize(part_path) if os.path.exists(part_path) else 0
|
||||
final_size = (
|
||||
os.path.getsize(part_path) if os.path.exists(part_path) else 0
|
||||
)
|
||||
expected_size = total_size if total_size > 0 else None
|
||||
|
||||
integrity_error: Optional[str] = None
|
||||
if final_size <= 0:
|
||||
integrity_error = "Downloaded file is empty"
|
||||
elif expected_size is not None and final_size != expected_size:
|
||||
integrity_error = (
|
||||
f"File size mismatch. Expected: {expected_size}, Got: {final_size}"
|
||||
)
|
||||
integrity_error = f"File size mismatch. Expected: {expected_size}, Got: {final_size}"
|
||||
|
||||
if integrity_error is not None:
|
||||
logger.error(
|
||||
@@ -555,7 +624,9 @@ class Downloader:
|
||||
rename_attempt = 0
|
||||
rename_success = False
|
||||
|
||||
while rename_attempt < max_rename_attempts and not rename_success:
|
||||
while (
|
||||
rename_attempt < max_rename_attempts and not rename_success
|
||||
):
|
||||
try:
|
||||
# If the destination file exists, remove it first (Windows safe)
|
||||
if os.path.exists(save_path):
|
||||
@@ -566,11 +637,18 @@ class Downloader:
|
||||
except PermissionError as e:
|
||||
rename_attempt += 1
|
||||
if rename_attempt < max_rename_attempts:
|
||||
logger.info(f"File still in use, retrying rename in 2 seconds (attempt {rename_attempt}/{max_rename_attempts})")
|
||||
logger.info(
|
||||
f"File still in use, retrying rename in 2 seconds (attempt {rename_attempt}/{max_rename_attempts})"
|
||||
)
|
||||
await asyncio.sleep(2)
|
||||
else:
|
||||
logger.error(f"Failed to rename file after {max_rename_attempts} attempts: {e}")
|
||||
return False, f"Failed to finalize download: {str(e)}"
|
||||
logger.error(
|
||||
f"Failed to rename file after {max_rename_attempts} attempts: {e}"
|
||||
)
|
||||
return (
|
||||
False,
|
||||
f"Failed to finalize download: {str(e)}",
|
||||
)
|
||||
|
||||
final_size = os.path.getsize(save_path)
|
||||
|
||||
@@ -583,8 +661,9 @@ class Downloader:
|
||||
bytes_per_second=0.0,
|
||||
timestamp=datetime.now().timestamp(),
|
||||
)
|
||||
await self._dispatch_progress_callback(progress_callback, final_snapshot)
|
||||
|
||||
await self._dispatch_progress_callback(
|
||||
progress_callback, final_snapshot
|
||||
)
|
||||
|
||||
return True, save_path
|
||||
|
||||
@@ -597,7 +676,9 @@ class Downloader:
|
||||
DownloadRestartRequested,
|
||||
) as e:
|
||||
retry_count += 1
|
||||
logger.warning(f"Network error during download (attempt {retry_count}/{self.max_retries + 1}): {e}")
|
||||
logger.warning(
|
||||
f"Network error during download (attempt {retry_count}/{self.max_retries + 1}): {e}"
|
||||
)
|
||||
|
||||
if retry_count <= self.max_retries:
|
||||
# Calculate delay with exponential backoff
|
||||
@@ -615,7 +696,10 @@ class Downloader:
|
||||
continue
|
||||
else:
|
||||
logger.error(f"Max retries exceeded for download: {e}")
|
||||
return False, f"Network error after {self.max_retries + 1} attempts: {str(e)}"
|
||||
return (
|
||||
False,
|
||||
f"Network error after {self.max_retries + 1} attempts: {str(e)}",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected download error: {e}")
|
||||
@@ -645,7 +729,7 @@ class Downloader:
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
return_headers: bool = False
|
||||
return_headers: bool = False,
|
||||
) -> Tuple[bool, Union[bytes, str], Optional[Dict]]:
|
||||
"""
|
||||
Download a file to memory (for small files like preview images)
|
||||
@@ -663,16 +747,22 @@ class Downloader:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[download_to_memory] Using app-level proxy: {self.proxy_url}")
|
||||
logger.debug(
|
||||
f"[download_to_memory] Using app-level proxy: {self.proxy_url}"
|
||||
)
|
||||
else:
|
||||
logger.debug("[download_to_memory] Using system-level proxy (trust_env) if configured.")
|
||||
logger.debug(
|
||||
"[download_to_memory] Using system-level proxy (trust_env) if configured."
|
||||
)
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
async with session.get(url, headers=headers, proxy=self.proxy_url) as response:
|
||||
async with session.get(
|
||||
url, headers=headers, proxy=self.proxy_url
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
content = await response.read()
|
||||
if return_headers:
|
||||
@@ -700,7 +790,7 @@ class Downloader:
|
||||
self,
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
) -> Tuple[bool, Union[Dict, str]]:
|
||||
"""
|
||||
Get response headers without downloading the full content
|
||||
@@ -717,16 +807,22 @@ class Downloader:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[get_response_headers] Using app-level proxy: {self.proxy_url}")
|
||||
logger.debug(
|
||||
f"[get_response_headers] Using app-level proxy: {self.proxy_url}"
|
||||
)
|
||||
else:
|
||||
logger.debug("[get_response_headers] Using system-level proxy (trust_env) if configured.")
|
||||
logger.debug(
|
||||
"[get_response_headers] Using system-level proxy (trust_env) if configured."
|
||||
)
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
async with session.head(url, headers=headers, proxy=self.proxy_url) as response:
|
||||
async with session.head(
|
||||
url, headers=headers, proxy=self.proxy_url
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
return True, dict(response.headers)
|
||||
else:
|
||||
@@ -742,7 +838,7 @@ class Downloader:
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
) -> Tuple[bool, Union[Dict, str]]:
|
||||
"""
|
||||
Make a generic HTTP request and return JSON response
|
||||
@@ -763,7 +859,9 @@ class Downloader:
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[make_request] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[make_request] Using system-level proxy (trust_env) if configured.")
|
||||
logger.debug(
|
||||
"[make_request] Using system-level proxy (trust_env) if configured."
|
||||
)
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
@@ -771,10 +869,12 @@ class Downloader:
|
||||
headers.update(custom_headers)
|
||||
|
||||
# Add proxy to kwargs if not already present
|
||||
if 'proxy' not in kwargs:
|
||||
kwargs['proxy'] = self.proxy_url
|
||||
if "proxy" not in kwargs:
|
||||
kwargs["proxy"] = self.proxy_url
|
||||
|
||||
async with session.request(method, url, headers=headers, **kwargs) as response:
|
||||
async with session.request(
|
||||
method, url, headers=headers, **kwargs
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
# Try to parse as JSON, fall back to text
|
||||
try:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from .base_model_service import BaseModelService
|
||||
@@ -48,7 +49,9 @@ class LoraService(BaseModelService):
|
||||
"notes": lora_data.get("notes", ""),
|
||||
"favorite": lora_data.get("favorite", False),
|
||||
"update_available": bool(lora_data.get("update_available", False)),
|
||||
"skip_metadata_refresh": bool(lora_data.get("skip_metadata_refresh", False)),
|
||||
"skip_metadata_refresh": bool(
|
||||
lora_data.get("skip_metadata_refresh", False)
|
||||
),
|
||||
"sub_type": sub_type,
|
||||
"civitai": self.filter_civitai_data(
|
||||
lora_data.get("civitai", {}), minimal=True
|
||||
@@ -62,6 +65,68 @@ class LoraService(BaseModelService):
|
||||
if first_letter:
|
||||
data = self._filter_by_first_letter(data, first_letter)
|
||||
|
||||
# Handle name pattern filters
|
||||
name_pattern_include = kwargs.get("name_pattern_include", [])
|
||||
name_pattern_exclude = kwargs.get("name_pattern_exclude", [])
|
||||
name_pattern_use_regex = kwargs.get("name_pattern_use_regex", False)
|
||||
|
||||
if name_pattern_include or name_pattern_exclude:
|
||||
import re
|
||||
|
||||
def matches_pattern(name, pattern, use_regex):
|
||||
"""Check if name matches pattern (regex or substring)"""
|
||||
if not name:
|
||||
return False
|
||||
if use_regex:
|
||||
try:
|
||||
return bool(re.search(pattern, name, re.IGNORECASE))
|
||||
except re.error:
|
||||
# Invalid regex, fall back to substring match
|
||||
return pattern.lower() in name.lower()
|
||||
else:
|
||||
return pattern.lower() in name.lower()
|
||||
|
||||
def matches_any_pattern(name, patterns, use_regex):
|
||||
"""Check if name matches any of the patterns"""
|
||||
if not patterns:
|
||||
return True
|
||||
return any(matches_pattern(name, p, use_regex) for p in patterns)
|
||||
|
||||
filtered = []
|
||||
for lora in data:
|
||||
model_name = lora.get("model_name", "")
|
||||
file_name = lora.get("file_name", "")
|
||||
names_to_check = [n for n in [model_name, file_name] if n]
|
||||
|
||||
# Check exclude patterns first
|
||||
excluded = False
|
||||
if name_pattern_exclude:
|
||||
for name in names_to_check:
|
||||
if matches_any_pattern(
|
||||
name, name_pattern_exclude, name_pattern_use_regex
|
||||
):
|
||||
excluded = True
|
||||
break
|
||||
|
||||
if excluded:
|
||||
continue
|
||||
|
||||
# Check include patterns
|
||||
if name_pattern_include:
|
||||
included = False
|
||||
for name in names_to_check:
|
||||
if matches_any_pattern(
|
||||
name, name_pattern_include, name_pattern_use_regex
|
||||
):
|
||||
included = True
|
||||
break
|
||||
if not included:
|
||||
continue
|
||||
|
||||
filtered.append(lora)
|
||||
|
||||
data = filtered
|
||||
|
||||
return data
|
||||
|
||||
def _filter_by_first_letter(self, data: List[Dict], letter: str) -> List[Dict]:
|
||||
@@ -214,6 +279,42 @@ class LoraService(BaseModelService):
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_recommended_strength_from_lora_data(lora_data: Dict) -> Optional[float]:
|
||||
"""Parse usage_tips JSON and extract recommended model strength."""
|
||||
try:
|
||||
usage_tips = lora_data.get("usage_tips", "")
|
||||
if not usage_tips:
|
||||
return None
|
||||
tips_data = json.loads(usage_tips)
|
||||
return tips_data.get("strength")
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_recommended_clip_strength_from_lora_data(
|
||||
lora_data: Dict,
|
||||
) -> Optional[float]:
|
||||
"""Parse usage_tips JSON and extract recommended clip strength."""
|
||||
try:
|
||||
usage_tips = lora_data.get("usage_tips", "")
|
||||
if not usage_tips:
|
||||
return None
|
||||
tips_data = json.loads(usage_tips)
|
||||
return tips_data.get("clipStrength")
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
return None
|
||||
|
||||
async def get_lora_metadata_by_filename(self, filename: str) -> Optional[Dict]:
|
||||
"""Return cached raw metadata for a LoRA matching the given filename."""
|
||||
cache = await self.scanner.get_cached_data(force_refresh=False)
|
||||
|
||||
for lora in cache.raw_data if cache else []:
|
||||
if lora.get("file_name") == filename:
|
||||
return lora
|
||||
|
||||
return None
|
||||
|
||||
def find_duplicate_hashes(self) -> Dict:
|
||||
"""Find LoRAs with duplicate SHA256 hashes"""
|
||||
return self.scanner._hash_index.get_duplicate_hashes()
|
||||
@@ -264,34 +365,10 @@ class LoraService(BaseModelService):
|
||||
List of LoRA dicts with randomized strengths
|
||||
"""
|
||||
import random
|
||||
import json
|
||||
|
||||
# Use a local Random instance to avoid affecting global random state
|
||||
# This ensures each execution with a different seed produces different results
|
||||
rng = random.Random(seed)
|
||||
|
||||
def get_recommended_strength(lora_data: Dict) -> Optional[float]:
|
||||
"""Parse usage_tips JSON and extract recommended strength"""
|
||||
try:
|
||||
usage_tips = lora_data.get("usage_tips", "")
|
||||
if not usage_tips:
|
||||
return None
|
||||
tips_data = json.loads(usage_tips)
|
||||
return tips_data.get("strength")
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
return None
|
||||
|
||||
def get_recommended_clip_strength(lora_data: Dict) -> Optional[float]:
|
||||
"""Parse usage_tips JSON and extract recommended clip strength"""
|
||||
try:
|
||||
usage_tips = lora_data.get("usage_tips", "")
|
||||
if not usage_tips:
|
||||
return None
|
||||
tips_data = json.loads(usage_tips)
|
||||
return tips_data.get("clipStrength")
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
return None
|
||||
|
||||
if locked_loras is None:
|
||||
locked_loras = []
|
||||
|
||||
@@ -339,7 +416,9 @@ class LoraService(BaseModelService):
|
||||
result_loras = []
|
||||
for lora in selected:
|
||||
if use_recommended_strength:
|
||||
recommended_strength = get_recommended_strength(lora)
|
||||
recommended_strength = self.get_recommended_strength_from_lora_data(
|
||||
lora
|
||||
)
|
||||
if recommended_strength is not None:
|
||||
scale = rng.uniform(
|
||||
recommended_strength_scale_min, recommended_strength_scale_max
|
||||
@@ -357,7 +436,9 @@ class LoraService(BaseModelService):
|
||||
if use_same_clip_strength:
|
||||
clip_str = model_str
|
||||
elif use_recommended_strength:
|
||||
recommended_clip_strength = get_recommended_clip_strength(lora)
|
||||
recommended_clip_strength = (
|
||||
self.get_recommended_clip_strength_from_lora_data(lora)
|
||||
)
|
||||
if recommended_clip_strength is not None:
|
||||
scale = rng.uniform(
|
||||
recommended_strength_scale_min, recommended_strength_scale_max
|
||||
@@ -368,9 +449,7 @@ class LoraService(BaseModelService):
|
||||
rng.uniform(clip_strength_min, clip_strength_max), 2
|
||||
)
|
||||
else:
|
||||
clip_str = round(
|
||||
rng.uniform(clip_strength_min, clip_strength_max), 2
|
||||
)
|
||||
clip_str = round(rng.uniform(clip_strength_min, clip_strength_max), 2)
|
||||
|
||||
result_loras.append(
|
||||
{
|
||||
@@ -485,12 +564,69 @@ class LoraService(BaseModelService):
|
||||
if bool(lora.get("license_flags", 127) & (1 << 1))
|
||||
]
|
||||
|
||||
# Apply name pattern filters
|
||||
name_patterns = filter_section.get("namePatterns", {})
|
||||
include_patterns = name_patterns.get("include", [])
|
||||
exclude_patterns = name_patterns.get("exclude", [])
|
||||
use_regex = name_patterns.get("useRegex", False)
|
||||
|
||||
if include_patterns or exclude_patterns:
|
||||
import re
|
||||
|
||||
def matches_pattern(name, pattern, use_regex):
|
||||
"""Check if name matches pattern (regex or substring)"""
|
||||
if not name:
|
||||
return False
|
||||
if use_regex:
|
||||
try:
|
||||
return bool(re.search(pattern, name, re.IGNORECASE))
|
||||
except re.error:
|
||||
# Invalid regex, fall back to substring match
|
||||
return pattern.lower() in name.lower()
|
||||
else:
|
||||
return pattern.lower() in name.lower()
|
||||
|
||||
def matches_any_pattern(name, patterns, use_regex):
|
||||
"""Check if name matches any of the patterns"""
|
||||
if not patterns:
|
||||
return True
|
||||
return any(matches_pattern(name, p, use_regex) for p in patterns)
|
||||
|
||||
filtered = []
|
||||
for lora in available_loras:
|
||||
model_name = lora.get("model_name", "")
|
||||
file_name = lora.get("file_name", "")
|
||||
names_to_check = [n for n in [model_name, file_name] if n]
|
||||
|
||||
# Check exclude patterns first
|
||||
excluded = False
|
||||
if exclude_patterns:
|
||||
for name in names_to_check:
|
||||
if matches_any_pattern(name, exclude_patterns, use_regex):
|
||||
excluded = True
|
||||
break
|
||||
|
||||
if excluded:
|
||||
continue
|
||||
|
||||
# Check include patterns
|
||||
if include_patterns:
|
||||
included = False
|
||||
for name in names_to_check:
|
||||
if matches_any_pattern(name, include_patterns, use_regex):
|
||||
included = True
|
||||
break
|
||||
if not included:
|
||||
continue
|
||||
|
||||
filtered.append(lora)
|
||||
|
||||
available_loras = filtered
|
||||
|
||||
return available_loras
|
||||
|
||||
async def get_cycler_list(
|
||||
self,
|
||||
pool_config: Optional[Dict] = None,
|
||||
sort_by: str = "filename"
|
||||
self, pool_config: Optional[Dict] = None, sort_by: str = "filename"
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Get filtered and sorted LoRA list for cycling.
|
||||
@@ -518,16 +654,16 @@ class LoraService(BaseModelService):
|
||||
available_loras,
|
||||
key=lambda x: (
|
||||
(x.get("model_name") or x.get("file_name", "")).lower(),
|
||||
x.get("file_path", "").lower()
|
||||
)
|
||||
x.get("file_path", "").lower(),
|
||||
),
|
||||
)
|
||||
else: # Default to filename
|
||||
available_loras = sorted(
|
||||
available_loras,
|
||||
key=lambda x: (
|
||||
x.get("file_name", "").lower(),
|
||||
x.get("file_path", "").lower()
|
||||
)
|
||||
x.get("file_path", "").lower(),
|
||||
),
|
||||
)
|
||||
|
||||
# Return minimal data needed for cycling
|
||||
|
||||
@@ -122,11 +122,25 @@ async def get_metadata_provider(provider_name: str = None):
|
||||
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
|
||||
try:
|
||||
provider = (
|
||||
provider_manager._get_provider(provider_name)
|
||||
if provider_name
|
||||
else provider_manager._get_provider()
|
||||
)
|
||||
except ValueError as e:
|
||||
# Provider not initialized, attempt to initialize
|
||||
if "No default provider set" in str(e) or "not registered" in str(e):
|
||||
logger.warning(f"Metadata provider not initialized ({e}), initializing now...")
|
||||
await initialize_metadata_providers()
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
provider = (
|
||||
provider_manager._get_provider(provider_name)
|
||||
if provider_name
|
||||
else provider_manager._get_provider()
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
return _wrap_provider_with_rate_limit(provider_name, provider)
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ from ..utils.metadata_manager import MetadataManager
|
||||
from ..utils.civitai_utils import resolve_license_info
|
||||
from .model_cache import ModelCache
|
||||
from .model_hash_index import ModelHashIndex
|
||||
from ..utils.constants import PREVIEW_EXTENSIONS
|
||||
from .model_lifecycle_service import delete_model_artifacts
|
||||
from .service_registry import ServiceRegistry
|
||||
from .websocket_manager import ws_manager
|
||||
@@ -412,6 +411,7 @@ class ModelScanner:
|
||||
if scan_result:
|
||||
await self._apply_scan_result(scan_result)
|
||||
await self._save_persistent_cache(scan_result)
|
||||
await self._sync_download_history(scan_result.raw_data, source='scan')
|
||||
|
||||
# Send final progress update
|
||||
await ws_manager.broadcast_init_progress({
|
||||
@@ -517,6 +517,7 @@ class ModelScanner:
|
||||
)
|
||||
|
||||
await self._apply_scan_result(scan_result)
|
||||
await self._sync_download_history(adjusted_raw_data, source='scan')
|
||||
|
||||
await ws_manager.broadcast_init_progress({
|
||||
'stage': 'loading_cache',
|
||||
@@ -577,6 +578,7 @@ class ModelScanner:
|
||||
excluded_models=list(self._excluded_models)
|
||||
)
|
||||
await self._save_persistent_cache(snapshot)
|
||||
await self._sync_download_history(snapshot.raw_data, source='scan')
|
||||
def _count_model_files(self) -> int:
|
||||
"""Count all model files with supported extensions in all roots
|
||||
|
||||
@@ -705,6 +707,7 @@ class ModelScanner:
|
||||
scan_result = await self._gather_model_data()
|
||||
await self._apply_scan_result(scan_result)
|
||||
await self._save_persistent_cache(scan_result)
|
||||
await self._sync_download_history(scan_result.raw_data, source='scan')
|
||||
|
||||
logger.info(
|
||||
f"{self.model_type.capitalize()} Scanner: Cache initialization completed in {time.time() - start_time:.2f} seconds, "
|
||||
@@ -733,19 +736,24 @@ class ModelScanner:
|
||||
# Get current cached file paths
|
||||
cached_paths = {item['file_path'] for item in self._cache.raw_data}
|
||||
path_to_item = {item['file_path']: item for item in self._cache.raw_data}
|
||||
cached_real_paths = {}
|
||||
for cached_path in cached_paths:
|
||||
try:
|
||||
cached_real_paths.setdefault(os.path.realpath(cached_path), cached_path)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Track found files and new files
|
||||
found_paths = set()
|
||||
new_files = []
|
||||
visited_real_paths = set()
|
||||
discovered_real_files = set()
|
||||
|
||||
# Scan all model roots
|
||||
for root_path in self.get_model_roots():
|
||||
if not os.path.exists(root_path):
|
||||
continue
|
||||
|
||||
# Track visited real paths to avoid symlink loops
|
||||
visited_real_paths = set()
|
||||
|
||||
# Recursively scan directory
|
||||
for root, _, files in os.walk(root_path, followlinks=True):
|
||||
real_root = os.path.realpath(root)
|
||||
@@ -758,12 +766,18 @@ class ModelScanner:
|
||||
if ext in self.file_extensions:
|
||||
# Construct paths exactly as they would be in cache
|
||||
file_path = os.path.join(root, file).replace(os.sep, '/')
|
||||
real_file_path = os.path.realpath(os.path.join(root, file))
|
||||
|
||||
# Check if this file is already in cache
|
||||
if file_path in cached_paths:
|
||||
found_paths.add(file_path)
|
||||
continue
|
||||
|
||||
cached_real_match = cached_real_paths.get(real_file_path)
|
||||
if cached_real_match:
|
||||
found_paths.add(cached_real_match)
|
||||
continue
|
||||
|
||||
if file_path in self._excluded_models:
|
||||
continue
|
||||
|
||||
@@ -779,6 +793,10 @@ class ModelScanner:
|
||||
if matched:
|
||||
continue
|
||||
|
||||
if real_file_path in discovered_real_files:
|
||||
continue
|
||||
|
||||
discovered_real_files.add(real_file_path)
|
||||
# This is a new file to process
|
||||
new_files.append(file_path)
|
||||
|
||||
@@ -1087,6 +1105,49 @@ class ModelScanner:
|
||||
|
||||
await self._cache.resort()
|
||||
|
||||
async def _sync_download_history(
|
||||
self,
|
||||
raw_data: List[Mapping[str, Any]],
|
||||
*,
|
||||
source: str,
|
||||
) -> None:
|
||||
records: List[Dict[str, Any]] = []
|
||||
for item in raw_data or []:
|
||||
if not isinstance(item, Mapping):
|
||||
continue
|
||||
civitai = item.get('civitai')
|
||||
if not isinstance(civitai, Mapping):
|
||||
continue
|
||||
|
||||
version_id = civitai.get('id')
|
||||
if version_id in (None, ''):
|
||||
continue
|
||||
|
||||
records.append(
|
||||
{
|
||||
'version_id': version_id,
|
||||
'model_id': civitai.get('modelId'),
|
||||
'file_path': item.get('file_path'),
|
||||
}
|
||||
)
|
||||
|
||||
if not records:
|
||||
return
|
||||
|
||||
try:
|
||||
history_service = await ServiceRegistry.get_downloaded_version_history_service()
|
||||
await history_service.mark_downloaded_bulk(
|
||||
self.model_type,
|
||||
records,
|
||||
source=source,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug(
|
||||
"%s Scanner: Failed to sync download history: %s",
|
||||
self.model_type.capitalize(),
|
||||
exc,
|
||||
)
|
||||
|
||||
async def _gather_model_data(
|
||||
self,
|
||||
*,
|
||||
@@ -1100,6 +1161,8 @@ class ModelScanner:
|
||||
tags_count: Dict[str, int] = {}
|
||||
excluded_models: List[str] = []
|
||||
processed_files = 0
|
||||
processed_real_files: Set[str] = set()
|
||||
visited_real_dirs: Set[str] = set()
|
||||
|
||||
async def handle_progress() -> None:
|
||||
if progress_callback is None:
|
||||
@@ -1116,9 +1179,10 @@ class ModelScanner:
|
||||
|
||||
try:
|
||||
real_path = os.path.realpath(current_path)
|
||||
if real_path in visited_paths:
|
||||
if real_path in visited_paths or real_path in visited_real_dirs:
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
visited_real_dirs.add(real_path)
|
||||
|
||||
with os.scandir(current_path) as iterator:
|
||||
entries = list(iterator)
|
||||
@@ -1131,6 +1195,11 @@ class ModelScanner:
|
||||
continue
|
||||
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
real_file_path = os.path.realpath(entry.path)
|
||||
if real_file_path in processed_real_files:
|
||||
continue
|
||||
|
||||
processed_real_files.add(real_file_path)
|
||||
result = await self._process_model_file(
|
||||
file_path,
|
||||
root_path,
|
||||
@@ -1443,11 +1512,10 @@ class ModelScanner:
|
||||
if not file_path:
|
||||
return None
|
||||
|
||||
base_name = os.path.splitext(file_path)[0]
|
||||
|
||||
for ext in PREVIEW_EXTENSIONS:
|
||||
preview_path = f"{base_name}{ext}"
|
||||
if os.path.exists(preview_path):
|
||||
dir_path = os.path.dirname(file_path)
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
preview_path = find_preview_file(base_name, dir_path)
|
||||
if preview_path:
|
||||
return config.get_preview_static_url(preview_path)
|
||||
|
||||
return None
|
||||
|
||||
@@ -12,8 +12,9 @@ from typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence
|
||||
|
||||
from .errors import RateLimitError, ResourceNotFoundError
|
||||
from .settings_manager import get_settings_manager
|
||||
from ..utils.cache_paths import CacheType, resolve_cache_path_with_migration
|
||||
from ..utils.civitai_utils import rewrite_preview_url
|
||||
from ..utils.preview_selection import select_preview_media
|
||||
from ..utils.preview_selection import resolve_mature_threshold, select_preview_media
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -234,12 +235,52 @@ class ModelUpdateService:
|
||||
ON model_update_versions(model_id);
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: str, *, ttl_seconds: int = 24 * 60 * 60, settings_manager=None) -> None:
|
||||
self._db_path = db_path
|
||||
def __init__(
|
||||
self,
|
||||
db_path: str | None = None,
|
||||
*,
|
||||
ttl_seconds: int = 24 * 60 * 60,
|
||||
settings_manager=None,
|
||||
) -> None:
|
||||
self._settings = settings_manager or get_settings_manager()
|
||||
self._library_name = self._get_active_library_name()
|
||||
self._db_path = db_path or self._resolve_default_path(self._library_name)
|
||||
self._ttl_seconds = ttl_seconds
|
||||
self._lock = asyncio.Lock()
|
||||
self._schema_initialized = False
|
||||
self._settings = settings_manager or get_settings_manager()
|
||||
self._custom_db_path = db_path is not None
|
||||
self._ensure_directory()
|
||||
self._initialize_schema()
|
||||
|
||||
def _get_active_library_name(self) -> str:
|
||||
try:
|
||||
value = self._settings.get_active_library_name()
|
||||
except Exception:
|
||||
value = None
|
||||
return value or "default"
|
||||
|
||||
def _resolve_default_path(self, library_name: str) -> str:
|
||||
env_override = os.environ.get("LORA_MANAGER_MODEL_UPDATE_DB")
|
||||
return resolve_cache_path_with_migration(
|
||||
CacheType.MODEL_UPDATE,
|
||||
library_name=library_name,
|
||||
env_override=env_override,
|
||||
)
|
||||
|
||||
def on_library_changed(self) -> None:
|
||||
"""Switch to the database for the active library."""
|
||||
|
||||
if self._custom_db_path:
|
||||
return
|
||||
|
||||
library_name = self._get_active_library_name()
|
||||
new_path = self._resolve_default_path(library_name)
|
||||
if new_path == self._db_path:
|
||||
return
|
||||
|
||||
self._library_name = library_name
|
||||
self._db_path = new_path
|
||||
self._schema_initialized = False
|
||||
self._ensure_directory()
|
||||
self._initialize_schema()
|
||||
|
||||
@@ -262,11 +303,114 @@ class ModelUpdateService:
|
||||
conn.execute("PRAGMA foreign_keys = ON")
|
||||
conn.executescript(self._SCHEMA)
|
||||
self._apply_migrations(conn)
|
||||
self._migrate_from_legacy_snapshot(conn)
|
||||
self._schema_initialized = True
|
||||
except Exception as exc: # pragma: no cover - defensive guard
|
||||
logger.error("Failed to initialize update schema: %s", exc, exc_info=True)
|
||||
raise
|
||||
|
||||
def _migrate_from_legacy_snapshot(self, conn: sqlite3.Connection) -> None:
|
||||
"""Copy update tracking data out of the legacy model snapshot database."""
|
||||
|
||||
if self._custom_db_path:
|
||||
return
|
||||
|
||||
try:
|
||||
from .persistent_model_cache import get_persistent_cache
|
||||
|
||||
legacy_path = get_persistent_cache(self._library_name).get_database_path()
|
||||
except Exception:
|
||||
return
|
||||
|
||||
if not legacy_path or os.path.abspath(legacy_path) == os.path.abspath(self._db_path):
|
||||
return
|
||||
if not os.path.exists(legacy_path):
|
||||
return
|
||||
|
||||
try:
|
||||
existing_row = conn.execute(
|
||||
"SELECT 1 FROM model_update_status LIMIT 1"
|
||||
).fetchone()
|
||||
if existing_row:
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
|
||||
try:
|
||||
with sqlite3.connect(legacy_path, check_same_thread=False) as legacy_conn:
|
||||
legacy_conn.row_factory = sqlite3.Row
|
||||
status_rows = legacy_conn.execute(
|
||||
"""
|
||||
SELECT model_id, model_type, last_checked_at, should_ignore_model
|
||||
FROM model_update_status
|
||||
"""
|
||||
).fetchall()
|
||||
if not status_rows:
|
||||
return
|
||||
|
||||
version_rows = legacy_conn.execute(
|
||||
"""
|
||||
SELECT model_id, version_id, sort_index, name, base_model, released_at,
|
||||
size_bytes, preview_url, is_in_library, should_ignore,
|
||||
early_access_ends_at, is_early_access
|
||||
FROM model_update_versions
|
||||
ORDER BY model_id ASC, sort_index ASC, version_id ASC
|
||||
"""
|
||||
).fetchall()
|
||||
|
||||
conn.execute("BEGIN")
|
||||
conn.executemany(
|
||||
"""
|
||||
INSERT OR REPLACE INTO model_update_status (
|
||||
model_id, model_type, last_checked_at, should_ignore_model
|
||||
) VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
[
|
||||
(
|
||||
int(row["model_id"]),
|
||||
row["model_type"],
|
||||
row["last_checked_at"],
|
||||
int(row["should_ignore_model"] or 0),
|
||||
)
|
||||
for row in status_rows
|
||||
],
|
||||
)
|
||||
conn.executemany(
|
||||
"""
|
||||
INSERT OR REPLACE INTO model_update_versions (
|
||||
model_id, version_id, sort_index, name, base_model, released_at,
|
||||
size_bytes, preview_url, is_in_library, should_ignore,
|
||||
early_access_ends_at, is_early_access
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
[
|
||||
(
|
||||
int(row["model_id"]),
|
||||
int(row["version_id"]),
|
||||
int(row["sort_index"] or 0),
|
||||
row["name"],
|
||||
row["base_model"],
|
||||
row["released_at"],
|
||||
row["size_bytes"],
|
||||
row["preview_url"],
|
||||
int(row["is_in_library"] or 0),
|
||||
int(row["should_ignore"] or 0),
|
||||
row["early_access_ends_at"],
|
||||
int(row["is_early_access"] or 0),
|
||||
)
|
||||
for row in version_rows
|
||||
],
|
||||
)
|
||||
conn.commit()
|
||||
logger.info(
|
||||
"Migrated model update tracking data from legacy snapshot DB for %s",
|
||||
self._library_name,
|
||||
)
|
||||
except sqlite3.OperationalError as exc:
|
||||
logger.debug("Legacy model update migration skipped: %s", exc)
|
||||
except Exception as exc: # pragma: no cover - defensive guard
|
||||
logger.warning("Failed to migrate model update data: %s", exc, exc_info=True)
|
||||
|
||||
def _apply_migrations(self, conn: sqlite3.Connection) -> None:
|
||||
"""Ensure legacy databases match the current schema without dropping data."""
|
||||
|
||||
@@ -1252,14 +1396,23 @@ class ModelUpdateService:
|
||||
return None
|
||||
|
||||
blur_mature_content = True
|
||||
mature_threshold = resolve_mature_threshold({"mature_blur_level": "R"})
|
||||
settings = getattr(self, "_settings", None)
|
||||
if settings is not None and hasattr(settings, "get"):
|
||||
try:
|
||||
blur_mature_content = bool(settings.get("blur_mature_content", True))
|
||||
mature_threshold = resolve_mature_threshold(
|
||||
{"mature_blur_level": settings.get("mature_blur_level", "R")}
|
||||
)
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
blur_mature_content = True
|
||||
mature_threshold = resolve_mature_threshold({"mature_blur_level": "R"})
|
||||
|
||||
selected, _ = select_preview_media(candidates, blur_mature_content=blur_mature_content)
|
||||
selected, _ = select_preview_media(
|
||||
candidates,
|
||||
blur_mature_content=blur_mature_content,
|
||||
mature_threshold=mature_threshold,
|
||||
)
|
||||
if not selected:
|
||||
return None
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ class PersistentModelCache:
|
||||
"exclude",
|
||||
"db_checked",
|
||||
"last_checked_at",
|
||||
"hash_status",
|
||||
)
|
||||
_MODEL_UPDATE_COLUMNS: Tuple[str, ...] = _MODEL_COLUMNS[2:]
|
||||
_instances: Dict[str, "PersistentModelCache"] = {}
|
||||
@@ -186,6 +187,7 @@ class PersistentModelCache:
|
||||
"civitai_deleted": bool(row["civitai_deleted"]),
|
||||
"skip_metadata_refresh": bool(row["skip_metadata_refresh"]),
|
||||
"license_flags": int(license_value),
|
||||
"hash_status": row["hash_status"] or "completed",
|
||||
}
|
||||
raw_data.append(item)
|
||||
|
||||
@@ -449,6 +451,7 @@ class PersistentModelCache:
|
||||
exclude INTEGER,
|
||||
db_checked INTEGER,
|
||||
last_checked_at REAL,
|
||||
hash_status TEXT,
|
||||
PRIMARY KEY (model_type, file_path)
|
||||
);
|
||||
|
||||
@@ -496,6 +499,7 @@ class PersistentModelCache:
|
||||
"skip_metadata_refresh": "INTEGER DEFAULT 0",
|
||||
# Persisting without explicit flags should assume CivitAI's documented defaults (0b111001 == 57).
|
||||
"license_flags": f"INTEGER DEFAULT {DEFAULT_LICENSE_FLAGS}",
|
||||
"hash_status": "TEXT DEFAULT 'completed'",
|
||||
}
|
||||
|
||||
for column, definition in required_columns.items():
|
||||
@@ -570,6 +574,7 @@ class PersistentModelCache:
|
||||
1 if item.get("exclude") else 0,
|
||||
1 if item.get("db_checked") else 0,
|
||||
float(item.get("last_checked_at") or 0.0),
|
||||
item.get("hash_status", "completed"),
|
||||
)
|
||||
|
||||
def _insert_model_sql(self) -> str:
|
||||
|
||||
@@ -9,7 +9,7 @@ from urllib.parse import urlparse
|
||||
|
||||
from ..utils.constants import CARD_PREVIEW_WIDTH, PREVIEW_EXTENSIONS
|
||||
from ..utils.civitai_utils import rewrite_preview_url
|
||||
from ..utils.preview_selection import select_preview_media
|
||||
from ..utils.preview_selection import resolve_mature_threshold, select_preview_media
|
||||
from .settings_manager import get_settings_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -49,9 +49,13 @@ class PreviewAssetService:
|
||||
blur_mature_content = bool(
|
||||
settings_manager.get("blur_mature_content", True)
|
||||
)
|
||||
mature_threshold = resolve_mature_threshold(
|
||||
{"mature_blur_level": settings_manager.get("mature_blur_level", "R")}
|
||||
)
|
||||
first_preview, nsfw_level = select_preview_media(
|
||||
images,
|
||||
blur_mature_content=blur_mature_content,
|
||||
mature_threshold=mature_threshold,
|
||||
)
|
||||
|
||||
if not first_preview:
|
||||
@@ -216,4 +220,3 @@ class PreviewAssetService:
|
||||
if "webm" in content_type:
|
||||
return ".webm"
|
||||
return ".mp4"
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from .service_registry import ServiceRegistry
|
||||
from .lora_scanner import LoraScanner
|
||||
from .metadata_service import get_default_metadata_provider
|
||||
from .checkpoint_scanner import CheckpointScanner
|
||||
from .settings_manager import get_settings_manager
|
||||
from .recipes.errors import RecipeNotFoundError
|
||||
from ..utils.utils import calculate_recipe_fingerprint, fuzzy_match
|
||||
from natsort import natsorted
|
||||
@@ -951,6 +952,30 @@ class RecipeScanner:
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to update FTS index for recipe: %s", exc)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_recipe_gen_params(recipe_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return a recipe copy with normalized generation parameter aliases added."""
|
||||
|
||||
normalized_recipe = dict(recipe_data)
|
||||
gen_params = recipe_data.get("gen_params")
|
||||
if not isinstance(gen_params, dict):
|
||||
return normalized_recipe
|
||||
|
||||
normalized_gen_params = dict(gen_params)
|
||||
for key, value in gen_params.items():
|
||||
if value in (None, ""):
|
||||
continue
|
||||
|
||||
normalized_key = GenParamsMerger.NORMALIZATION_MAPPING.get(key, key)
|
||||
if normalized_key not in GenParamsMerger.ALLOWED_KEYS:
|
||||
continue
|
||||
|
||||
if normalized_gen_params.get(normalized_key) in (None, ""):
|
||||
normalized_gen_params[normalized_key] = value
|
||||
|
||||
normalized_recipe["gen_params"] = normalized_gen_params
|
||||
return normalized_recipe
|
||||
|
||||
async def _enrich_cache_metadata(self) -> None:
|
||||
"""Perform remote metadata enrichment after the initial scan."""
|
||||
|
||||
@@ -1090,6 +1115,14 @@ class RecipeScanner:
|
||||
@property
|
||||
def recipes_dir(self) -> str:
|
||||
"""Get path to recipes directory"""
|
||||
custom_recipes_dir = get_settings_manager().get("recipes_path", "")
|
||||
if isinstance(custom_recipes_dir, str) and custom_recipes_dir.strip():
|
||||
recipes_dir = os.path.abspath(
|
||||
os.path.normpath(os.path.expanduser(custom_recipes_dir.strip()))
|
||||
)
|
||||
os.makedirs(recipes_dir, exist_ok=True)
|
||||
return recipes_dir
|
||||
|
||||
if not config.loras_roots:
|
||||
return ""
|
||||
|
||||
@@ -1336,6 +1369,7 @@ class RecipeScanner:
|
||||
# Ensure gen_params exists
|
||||
if "gen_params" not in recipe_data:
|
||||
recipe_data["gen_params"] = {}
|
||||
recipe_data = self._normalize_recipe_gen_params(recipe_data)
|
||||
|
||||
# Update lora information with local paths and availability
|
||||
lora_metadata_updated = await self._update_lora_information(recipe_data)
|
||||
@@ -1615,6 +1649,9 @@ class RecipeScanner:
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Coerce legacy or malformed checkpoint entries into a dict."""
|
||||
|
||||
if checkpoint_raw is None:
|
||||
return None
|
||||
|
||||
if isinstance(checkpoint_raw, dict):
|
||||
return dict(checkpoint_raw)
|
||||
|
||||
@@ -1632,9 +1669,6 @@ class RecipeScanner:
|
||||
"file_name": file_name,
|
||||
}
|
||||
|
||||
logger.warning(
|
||||
"Unexpected checkpoint payload type %s", type(checkpoint_raw).__name__
|
||||
)
|
||||
return None
|
||||
|
||||
def _enrich_checkpoint_entry(self, checkpoint: Dict[str, Any]) -> Dict[str, Any]:
|
||||
@@ -1790,6 +1824,7 @@ class RecipeScanner:
|
||||
filters: dict = None,
|
||||
search_options: dict = None,
|
||||
lora_hash: str = None,
|
||||
checkpoint_hash: str = None,
|
||||
bypass_filters: bool = True,
|
||||
folder: str | None = None,
|
||||
recursive: bool = True,
|
||||
@@ -1804,7 +1839,8 @@ class RecipeScanner:
|
||||
filters: Dictionary of filters to apply
|
||||
search_options: Dictionary of search options to apply
|
||||
lora_hash: Optional SHA256 hash of a LoRA to filter recipes by
|
||||
bypass_filters: If True, ignore other filters when a lora_hash is provided
|
||||
checkpoint_hash: Optional SHA256 hash of a checkpoint to filter recipes by
|
||||
bypass_filters: If True, ignore other filters when a hash filter is provided
|
||||
folder: Optional folder filter relative to recipes directory
|
||||
recursive: Whether to include recipes in subfolders of the selected folder
|
||||
"""
|
||||
@@ -1852,9 +1888,23 @@ class RecipeScanner:
|
||||
# Skip other filters if bypass_filters is True
|
||||
pass
|
||||
# Otherwise continue with normal filtering after applying LoRA hash filter
|
||||
elif checkpoint_hash:
|
||||
normalized_checkpoint_hash = checkpoint_hash.lower()
|
||||
filtered_data = [
|
||||
item
|
||||
for item in filtered_data
|
||||
if isinstance(item.get("checkpoint"), dict)
|
||||
and (item["checkpoint"].get("hash", "") or "").lower()
|
||||
== normalized_checkpoint_hash
|
||||
]
|
||||
|
||||
# Skip further filtering if we're only filtering by LoRA hash with bypass enabled
|
||||
if not (lora_hash and bypass_filters):
|
||||
if bypass_filters:
|
||||
pass
|
||||
|
||||
has_hash_filter = bool(lora_hash or checkpoint_hash)
|
||||
|
||||
# Skip further filtering if we're only filtering by model hash with bypass enabled
|
||||
if not (has_hash_filter and bypass_filters):
|
||||
# Apply folder filter before other criteria
|
||||
if folder is not None:
|
||||
normalized_folder = folder.strip("/")
|
||||
@@ -2030,7 +2080,10 @@ class RecipeScanner:
|
||||
end_idx = min(start_idx + page_size, total_items)
|
||||
|
||||
# Get paginated items
|
||||
paginated_items = filtered_data[start_idx:end_idx]
|
||||
paginated_items = [
|
||||
self._normalize_recipe_gen_params(item)
|
||||
for item in filtered_data[start_idx:end_idx]
|
||||
]
|
||||
|
||||
# Add inLibrary information and URLs for each recipe
|
||||
for item in paginated_items:
|
||||
@@ -2089,8 +2142,18 @@ class RecipeScanner:
|
||||
if not recipe:
|
||||
return None
|
||||
|
||||
# Prefer the on-disk recipe JSON for fields that are not persisted in the
|
||||
# SQLite cache yet, such as source_path.
|
||||
merged_recipe = self._normalize_recipe_gen_params({**recipe})
|
||||
recipe_json = await self._load_recipe_json(recipe_id)
|
||||
if recipe_json:
|
||||
for field in ("source_path", "checkpoint", "loras", "gen_params"):
|
||||
if field not in recipe_json:
|
||||
merged_recipe.pop(field, None)
|
||||
merged_recipe.update(recipe_json)
|
||||
|
||||
# Format the recipe with all needed information
|
||||
formatted_recipe = {**recipe} # Copy all fields
|
||||
formatted_recipe = {**merged_recipe}
|
||||
|
||||
# Format file path to URL
|
||||
if "file_path" in formatted_recipe:
|
||||
@@ -2124,6 +2187,30 @@ class RecipeScanner:
|
||||
|
||||
return formatted_recipe
|
||||
|
||||
async def _load_recipe_json(self, recipe_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load the raw recipe JSON payload for a recipe ID if it exists."""
|
||||
|
||||
recipe_json_path = await self.get_recipe_json_path(recipe_id)
|
||||
if not recipe_json_path or not os.path.exists(recipe_json_path):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(recipe_json_path, "r", encoding="utf-8") as f:
|
||||
recipe_data = json.load(f)
|
||||
except Exception as exc:
|
||||
logger.debug(
|
||||
"Failed to load recipe JSON for %s from %s: %s",
|
||||
recipe_id,
|
||||
recipe_json_path,
|
||||
exc,
|
||||
)
|
||||
return None
|
||||
|
||||
if not isinstance(recipe_data, dict):
|
||||
return None
|
||||
|
||||
return self._normalize_recipe_gen_params(recipe_data)
|
||||
|
||||
def _format_file_url(self, file_path: str) -> str:
|
||||
"""Format file path as URL for serving in web UI"""
|
||||
if not file_path:
|
||||
@@ -2334,6 +2421,38 @@ class RecipeScanner:
|
||||
|
||||
return matching_recipes
|
||||
|
||||
async def get_recipes_for_checkpoint(
|
||||
self, checkpoint_hash: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Return recipes that reference a given checkpoint hash."""
|
||||
|
||||
if not checkpoint_hash:
|
||||
return []
|
||||
|
||||
normalized_hash = checkpoint_hash.lower()
|
||||
cache = await self.get_cached_data()
|
||||
matching_recipes: List[Dict[str, Any]] = []
|
||||
|
||||
for recipe in cache.raw_data:
|
||||
checkpoint = self._normalize_checkpoint_entry(recipe.get("checkpoint"))
|
||||
if not checkpoint:
|
||||
continue
|
||||
|
||||
enriched_checkpoint = self._enrich_checkpoint_entry(dict(checkpoint))
|
||||
if (enriched_checkpoint.get("hash") or "").lower() != normalized_hash:
|
||||
continue
|
||||
|
||||
recipe_copy = {**recipe}
|
||||
recipe_copy["checkpoint"] = enriched_checkpoint
|
||||
recipe_copy["loras"] = [
|
||||
self._enrich_lora_entry(dict(entry))
|
||||
for entry in recipe.get("loras", [])
|
||||
]
|
||||
recipe_copy["file_url"] = self._format_file_url(recipe.get("file_path"))
|
||||
matching_recipes.append(recipe_copy)
|
||||
|
||||
return matching_recipes
|
||||
|
||||
async def get_recipe_syntax_tokens(self, recipe_id: str) -> List[str]:
|
||||
"""Build LoRA syntax tokens for a recipe."""
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Services responsible for recipe metadata analysis."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
@@ -69,7 +70,9 @@ class RecipeAnalysisService:
|
||||
try:
|
||||
metadata = self._exif_utils.extract_image_metadata(temp_path)
|
||||
if not metadata:
|
||||
return AnalysisResult({"error": "No metadata found in this image", "loras": []})
|
||||
return AnalysisResult(
|
||||
{"error": "No metadata found in this image", "loras": []}
|
||||
)
|
||||
|
||||
return await self._parse_metadata(
|
||||
metadata,
|
||||
@@ -105,7 +108,9 @@ class RecipeAnalysisService:
|
||||
if civitai_match:
|
||||
image_info = await civitai_client.get_image_info(civitai_match.group(1))
|
||||
if not image_info:
|
||||
raise RecipeDownloadError("Failed to fetch image information from Civitai")
|
||||
raise RecipeDownloadError(
|
||||
"Failed to fetch image information from Civitai"
|
||||
)
|
||||
|
||||
image_url = image_info.get("url")
|
||||
if not image_url:
|
||||
@@ -114,13 +119,15 @@ class RecipeAnalysisService:
|
||||
is_video = image_info.get("type") == "video"
|
||||
|
||||
# Use optimized preview URLs if possible
|
||||
rewritten_url, _ = rewrite_preview_url(image_url, media_type=image_info.get("type"))
|
||||
rewritten_url, _ = rewrite_preview_url(
|
||||
image_url, media_type=image_info.get("type")
|
||||
)
|
||||
if rewritten_url:
|
||||
image_url = rewritten_url
|
||||
|
||||
if is_video:
|
||||
# Extract extension from URL
|
||||
url_path = image_url.split('?')[0].split('#')[0]
|
||||
url_path = image_url.split("?")[0].split("#")[0]
|
||||
extension = os.path.splitext(url_path)[1].lower() or ".mp4"
|
||||
else:
|
||||
extension = ".jpg"
|
||||
@@ -135,9 +142,23 @@ class RecipeAnalysisService:
|
||||
and isinstance(metadata["meta"], dict)
|
||||
):
|
||||
metadata = metadata["meta"]
|
||||
|
||||
# Include modelVersionIds from root level if available
|
||||
# Civitai API returns modelVersionIds at root level, not in meta
|
||||
model_version_ids = image_info.get("modelVersionIds")
|
||||
if model_version_ids and isinstance(metadata, dict):
|
||||
metadata["modelVersionIds"] = model_version_ids
|
||||
|
||||
# Validate that metadata contains meaningful recipe fields
|
||||
# If not, treat as None to trigger EXIF extraction from downloaded image
|
||||
if isinstance(metadata, dict) and not self._has_recipe_fields(metadata):
|
||||
self._logger.debug(
|
||||
"Civitai API metadata lacks recipe fields, will extract from EXIF"
|
||||
)
|
||||
metadata = None
|
||||
else:
|
||||
# Basic extension detection for non-Civitai URLs
|
||||
url_path = url.split('?')[0].split('#')[0]
|
||||
url_path = url.split("?")[0].split("#")[0]
|
||||
extension = os.path.splitext(url_path)[1].lower()
|
||||
if extension in [".mp4", ".webm"]:
|
||||
is_video = True
|
||||
@@ -211,7 +232,9 @@ class RecipeAnalysisService:
|
||||
|
||||
image_bytes = self._convert_tensor_to_png_bytes(latest_image)
|
||||
if image_bytes is None:
|
||||
raise RecipeValidationError("Cannot handle this data shape from metadata registry")
|
||||
raise RecipeValidationError(
|
||||
"Cannot handle this data shape from metadata registry"
|
||||
)
|
||||
|
||||
return AnalysisResult(
|
||||
{
|
||||
@@ -222,6 +245,22 @@ class RecipeAnalysisService:
|
||||
|
||||
# Internal helpers -------------------------------------------------
|
||||
|
||||
def _has_recipe_fields(self, metadata: dict[str, Any]) -> bool:
|
||||
"""Check if metadata contains meaningful recipe-related fields."""
|
||||
recipe_fields = {
|
||||
"prompt",
|
||||
"negative_prompt",
|
||||
"resources",
|
||||
"hashes",
|
||||
"params",
|
||||
"generationData",
|
||||
"Workflow",
|
||||
"prompt_type",
|
||||
"positive",
|
||||
"negative",
|
||||
}
|
||||
return any(field in metadata for field in recipe_fields)
|
||||
|
||||
async def _parse_metadata(
|
||||
self,
|
||||
metadata: dict[str, Any],
|
||||
@@ -234,7 +273,12 @@ class RecipeAnalysisService:
|
||||
) -> AnalysisResult:
|
||||
parser = self._recipe_parser_factory.create_parser(metadata)
|
||||
if parser is None:
|
||||
payload = {"error": "No parser found for this image", "loras": []}
|
||||
# Provide more specific error message based on metadata source
|
||||
if not metadata:
|
||||
error_msg = "This image does not contain any generation metadata (prompt, models, or parameters)"
|
||||
else:
|
||||
error_msg = "No parser found for this image"
|
||||
payload = {"error": error_msg, "loras": []}
|
||||
if include_image_base64 and image_path:
|
||||
payload["image_base64"] = self._encode_file(image_path)
|
||||
payload["is_video"] = is_video
|
||||
@@ -257,7 +301,9 @@ class RecipeAnalysisService:
|
||||
|
||||
matching_recipes: list[str] = []
|
||||
if fingerprint:
|
||||
matching_recipes = await recipe_scanner.find_recipes_by_fingerprint(fingerprint)
|
||||
matching_recipes = await recipe_scanner.find_recipes_by_fingerprint(
|
||||
fingerprint
|
||||
)
|
||||
result["matching_recipes"] = matching_recipes
|
||||
|
||||
return AnalysisResult(result)
|
||||
@@ -269,7 +315,10 @@ class RecipeAnalysisService:
|
||||
raise RecipeDownloadError(f"Failed to download image from URL: {result}")
|
||||
|
||||
def _metadata_not_found_response(self, path: str) -> AnalysisResult:
|
||||
payload: dict[str, Any] = {"error": "No metadata found in this image", "loras": []}
|
||||
payload: dict[str, Any] = {
|
||||
"error": "No metadata found in this image",
|
||||
"loras": [],
|
||||
}
|
||||
if os.path.exists(path):
|
||||
payload["image_base64"] = self._encode_file(path)
|
||||
return AnalysisResult(payload)
|
||||
@@ -305,7 +354,9 @@ class RecipeAnalysisService:
|
||||
|
||||
if hasattr(tensor_image, "shape"):
|
||||
self._logger.debug(
|
||||
"Tensor shape: %s, dtype: %s", tensor_image.shape, getattr(tensor_image, "dtype", None)
|
||||
"Tensor shape: %s, dtype: %s",
|
||||
tensor_image.shape,
|
||||
getattr(tensor_image, "dtype", None),
|
||||
)
|
||||
|
||||
import torch # type: ignore[import-not-found]
|
||||
|
||||
@@ -12,6 +12,7 @@ from dataclasses import dataclass
|
||||
from typing import Any, Dict, Iterable, Optional
|
||||
|
||||
from ...config import config
|
||||
from ...recipes.constants import GEN_PARAM_KEYS
|
||||
from ...utils.utils import calculate_recipe_fingerprint
|
||||
from .errors import RecipeNotFoundError, RecipeValidationError
|
||||
|
||||
@@ -90,23 +91,7 @@ class RecipePersistenceService:
|
||||
current_time = time.time()
|
||||
loras_data = [self._normalise_lora_entry(lora) for lora in (metadata.get("loras") or [])]
|
||||
checkpoint_entry = self._sanitize_checkpoint_entry(self._extract_checkpoint_entry(metadata))
|
||||
|
||||
gen_params = metadata.get("gen_params") or {}
|
||||
if not gen_params and "raw_metadata" in metadata:
|
||||
raw_metadata = metadata.get("raw_metadata", {})
|
||||
gen_params = {
|
||||
"prompt": raw_metadata.get("prompt", ""),
|
||||
"negative_prompt": raw_metadata.get("negative_prompt", ""),
|
||||
"steps": raw_metadata.get("steps", ""),
|
||||
"sampler": raw_metadata.get("sampler", ""),
|
||||
"cfg_scale": raw_metadata.get("cfg_scale", ""),
|
||||
"seed": raw_metadata.get("seed", ""),
|
||||
"size": raw_metadata.get("size", ""),
|
||||
"clip_skip": raw_metadata.get("clip_skip", ""),
|
||||
}
|
||||
|
||||
# Drop checkpoint duplication from generation parameters to store it only at top level
|
||||
gen_params.pop("checkpoint", None)
|
||||
gen_params = self._sanitize_gen_params_for_storage(metadata)
|
||||
|
||||
fingerprint = calculate_recipe_fingerprint(loras_data)
|
||||
recipe_data: Dict[str, Any] = {
|
||||
@@ -133,6 +118,7 @@ class RecipePersistenceService:
|
||||
json_filename = f"{recipe_id}.recipe.json"
|
||||
json_path = os.path.join(recipes_dir, json_filename)
|
||||
json_path = os.path.normpath(json_path)
|
||||
|
||||
with open(json_path, "w", encoding="utf-8") as file_obj:
|
||||
json.dump(recipe_data, file_obj, indent=4, ensure_ascii=False)
|
||||
|
||||
@@ -152,6 +138,30 @@ class RecipePersistenceService:
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_gen_params_for_storage(metadata: dict[str, Any]) -> dict[str, Any]:
|
||||
gen_params = metadata.get("gen_params")
|
||||
if isinstance(gen_params, dict) and gen_params:
|
||||
source = gen_params
|
||||
else:
|
||||
source = metadata.get("raw_metadata")
|
||||
|
||||
if not isinstance(source, dict):
|
||||
return {}
|
||||
|
||||
allowed_keys = set(GEN_PARAM_KEYS)
|
||||
sanitized: dict[str, Any] = {}
|
||||
for key in allowed_keys:
|
||||
if key not in source:
|
||||
continue
|
||||
value = source.get(key)
|
||||
if value in (None, ""):
|
||||
continue
|
||||
sanitized[key] = value
|
||||
|
||||
sanitized.pop("checkpoint", None)
|
||||
return sanitized
|
||||
|
||||
async def delete_recipe(self, *, recipe_scanner, recipe_id: str) -> PersistenceResult:
|
||||
"""Delete an existing recipe."""
|
||||
|
||||
@@ -173,11 +183,23 @@ class RecipePersistenceService:
|
||||
async def update_recipe(self, *, recipe_scanner, recipe_id: str, updates: dict[str, Any]) -> PersistenceResult:
|
||||
"""Update persisted metadata for a recipe."""
|
||||
|
||||
if not any(key in updates for key in ("title", "tags", "source_path", "preview_nsfw_level", "favorite")):
|
||||
raise RecipeValidationError(
|
||||
"At least one field to update must be provided (title or tags or source_path or preview_nsfw_level or favorite)"
|
||||
allowed_fields = (
|
||||
"title",
|
||||
"tags",
|
||||
"source_path",
|
||||
"preview_nsfw_level",
|
||||
"favorite",
|
||||
"gen_params",
|
||||
)
|
||||
|
||||
if not any(key in updates for key in allowed_fields):
|
||||
raise RecipeValidationError(
|
||||
"At least one field to update must be provided (title or tags or source_path or preview_nsfw_level or favorite or gen_params)"
|
||||
)
|
||||
|
||||
if "gen_params" in updates and not isinstance(updates["gen_params"], dict):
|
||||
raise RecipeValidationError("gen_params must be an object")
|
||||
|
||||
success = await recipe_scanner.update_recipe_metadata(recipe_id, updates)
|
||||
if not success:
|
||||
raise RecipeNotFoundError("Recipe not found or update failed")
|
||||
|
||||
@@ -159,10 +159,51 @@ class ServiceRegistry:
|
||||
return cls._services[service_name]
|
||||
|
||||
from .model_update_service import ModelUpdateService
|
||||
from .persistent_model_cache import get_persistent_cache
|
||||
from .settings_manager import get_settings_manager
|
||||
|
||||
cache = get_persistent_cache()
|
||||
service = ModelUpdateService(cache.get_database_path())
|
||||
service = ModelUpdateService(settings_manager=get_settings_manager())
|
||||
cls._services[service_name] = service
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return service
|
||||
|
||||
@classmethod
|
||||
async def get_downloaded_version_history_service(cls):
|
||||
"""Get or create the downloaded-version history service."""
|
||||
|
||||
service_name = "downloaded_version_history_service"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
from .downloaded_version_history_service import (
|
||||
DownloadedVersionHistoryService,
|
||||
)
|
||||
|
||||
service = DownloadedVersionHistoryService()
|
||||
cls._services[service_name] = service
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return service
|
||||
|
||||
@classmethod
|
||||
async def get_backup_service(cls):
|
||||
"""Get or create the backup service."""
|
||||
|
||||
service_name = "backup_service"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
from .backup_service import BackupService
|
||||
|
||||
service = await BackupService.get_instance()
|
||||
cls._services[service_name] = service
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return service
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -450,9 +450,9 @@ class TagFTSIndex:
|
||||
the tag_name, the result will include a "matched_alias" field.
|
||||
|
||||
Ranking is based on a combination of:
|
||||
1. FTS5 bm25 relevance score (how well the text matches)
|
||||
2. Post count (popularity)
|
||||
3. Exact prefix match boost (tag_name starts with query)
|
||||
1. Exact prefix match boost (tag_name starts with query)
|
||||
2. Post count to preserve expected autocomplete ordering
|
||||
3. FTS5 bm25 relevance score as a deterministic tie-breaker
|
||||
|
||||
Args:
|
||||
query: The search query string.
|
||||
@@ -484,65 +484,17 @@ class TagFTSIndex:
|
||||
with self._lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
# Build the SQL query with bm25 ranking
|
||||
# FTS5 bm25() returns negative scores, lower is better
|
||||
# We use -bm25() to get higher=better scores
|
||||
# Weights: -100.0 for exact matches, 1.0 for others
|
||||
# Add LOG10(post_count) weighting to boost popular tags
|
||||
# Use CASE to boost tag_name prefix matches above alias matches
|
||||
if categories:
|
||||
placeholders = ",".join("?" * len(categories))
|
||||
sql = f"""
|
||||
SELECT t.tag_name, t.category, t.post_count, t.aliases,
|
||||
CASE
|
||||
WHEN t.tag_name LIKE ? ESCAPE '\\' THEN 1
|
||||
ELSE 0
|
||||
END AS is_tag_name_match,
|
||||
bm25(tag_fts, -100.0, 1.0, 1.0) + LOG10(t.post_count + 1) * 10.0 AS rank_score
|
||||
FROM tag_fts
|
||||
JOIN tags t ON tag_fts.rowid = t.rowid
|
||||
WHERE tag_fts.searchable_text MATCH ?
|
||||
AND t.category IN ({placeholders})
|
||||
ORDER BY is_tag_name_match DESC, rank_score DESC
|
||||
LIMIT ? OFFSET ?
|
||||
"""
|
||||
# Escape special LIKE characters and add wildcard
|
||||
query_escaped = (
|
||||
query_lower.lstrip("/")
|
||||
.replace("\\", "\\\\")
|
||||
.replace("%", "\\%")
|
||||
.replace("_", "\\_")
|
||||
sql, params = self._build_search_statement(
|
||||
query_lower=query_lower,
|
||||
fts_query=fts_query,
|
||||
categories=categories,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
)
|
||||
params = (
|
||||
[query_escaped + "%", fts_query]
|
||||
+ categories
|
||||
+ [limit, offset]
|
||||
)
|
||||
else:
|
||||
sql = """
|
||||
SELECT t.tag_name, t.category, t.post_count, t.aliases,
|
||||
CASE
|
||||
WHEN t.tag_name LIKE ? ESCAPE '\\' THEN 1
|
||||
ELSE 0
|
||||
END AS is_tag_name_match,
|
||||
bm25(tag_fts, -100.0, 1.0, 1.0) + LOG10(t.post_count + 1) * 10.0 AS rank_score
|
||||
FROM tag_fts
|
||||
JOIN tags t ON tag_fts.rowid = t.rowid
|
||||
WHERE tag_fts.searchable_text MATCH ?
|
||||
ORDER BY is_tag_name_match DESC, rank_score DESC
|
||||
LIMIT ? OFFSET ?
|
||||
"""
|
||||
query_escaped = (
|
||||
query_lower.lstrip("/")
|
||||
.replace("\\", "\\\\")
|
||||
.replace("%", "\\%")
|
||||
.replace("_", "\\_")
|
||||
)
|
||||
params = [query_escaped + "%", fts_query, limit, offset]
|
||||
|
||||
cursor = conn.execute(sql, params)
|
||||
rows = cursor.fetchall()
|
||||
results = []
|
||||
for row in cursor.fetchall():
|
||||
for row in rows:
|
||||
result = {
|
||||
"tag_name": row[0],
|
||||
"category": row[1],
|
||||
@@ -571,6 +523,62 @@ class TagFTSIndex:
|
||||
logger.debug("Tag FTS search error for query '%s': %s", query, exc)
|
||||
return []
|
||||
|
||||
def _build_search_statement(
|
||||
self,
|
||||
query_lower: str,
|
||||
fts_query: str,
|
||||
categories: Optional[List[int]],
|
||||
limit: int,
|
||||
offset: int,
|
||||
) -> tuple[str, list[object]]:
|
||||
"""Build the SQL statement and params for a tag search."""
|
||||
# Escape special LIKE characters and add wildcard
|
||||
query_escaped = (
|
||||
query_lower.lstrip("/")
|
||||
.replace("\\", "\\\\")
|
||||
.replace("%", "\\%")
|
||||
.replace("_", "\\_")
|
||||
)
|
||||
|
||||
# FTS5 bm25() returns negative scores, lower is better.
|
||||
# We use -bm25() to get higher=better scores, but keep post_count as the
|
||||
# primary sort within tag-name prefix matches so autocomplete ordering
|
||||
# remains aligned with the existing popularity-first behavior.
|
||||
if categories:
|
||||
placeholders = ",".join("?" * len(categories))
|
||||
sql = f"""
|
||||
SELECT t.tag_name, t.category, t.post_count, t.aliases,
|
||||
CASE
|
||||
WHEN t.tag_name LIKE ? ESCAPE '\\' THEN 1
|
||||
ELSE 0
|
||||
END AS is_tag_name_match,
|
||||
bm25(tag_fts, -100.0, 1.0, 1.0) AS rank_score
|
||||
FROM tag_fts
|
||||
CROSS JOIN tags t ON t.rowid = tag_fts.rowid
|
||||
WHERE tag_fts.searchable_text MATCH ?
|
||||
AND t.category IN ({placeholders})
|
||||
ORDER BY is_tag_name_match DESC, t.post_count DESC, rank_score DESC
|
||||
LIMIT ? OFFSET ?
|
||||
"""
|
||||
params = [query_escaped + "%", fts_query] + categories + [limit, offset]
|
||||
else:
|
||||
sql = """
|
||||
SELECT t.tag_name, t.category, t.post_count, t.aliases,
|
||||
CASE
|
||||
WHEN t.tag_name LIKE ? ESCAPE '\\' THEN 1
|
||||
ELSE 0
|
||||
END AS is_tag_name_match,
|
||||
bm25(tag_fts, -100.0, 1.0, 1.0) AS rank_score
|
||||
FROM tag_fts
|
||||
JOIN tags t ON tag_fts.rowid = t.rowid
|
||||
WHERE tag_fts.searchable_text MATCH ?
|
||||
ORDER BY is_tag_name_match DESC, t.post_count DESC, rank_score DESC
|
||||
LIMIT ? OFFSET ?
|
||||
"""
|
||||
params = [query_escaped + "%", fts_query, limit, offset]
|
||||
|
||||
return sql, params
|
||||
|
||||
def _find_matched_alias(
|
||||
self, query: str, tag_name: str, aliases_str: str
|
||||
) -> Optional[str]:
|
||||
|
||||
428
py/services/wildcard_service.py
Normal file
428
py/services/wildcard_service.py
Normal file
@@ -0,0 +1,428 @@
|
||||
"""Managed wildcard loading, search, and text expansion."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from ..utils.settings_paths import get_settings_dir
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_WILDCARD_PATTERN = re.compile(r"__([\w\s.\-+/*\\]+?)__")
|
||||
_OPTION_PATTERN = re.compile(r"{([^{}]*?)}")
|
||||
_TRIGGER_WORD_PATTERN = re.compile(r"^trigger_words\d+$")
|
||||
_WEIGHTED_OPTION_PATTERN = re.compile(r"^\s*([0-9.]+)::")
|
||||
_NUMERIC_PATTERN = re.compile(r"^-?\d+(\.\d+)?$")
|
||||
|
||||
|
||||
def _normalize_wildcard_key(value: str) -> str:
|
||||
return value.replace("\\", "/").strip("/").lower()
|
||||
|
||||
|
||||
def _is_numeric_string(value: str) -> bool:
|
||||
return bool(_NUMERIC_PATTERN.match(value))
|
||||
|
||||
|
||||
def contains_dynamic_syntax(text: str) -> bool:
|
||||
"""Return True when text contains supported wildcard or option syntax."""
|
||||
|
||||
return isinstance(text, str) and bool(
|
||||
_WILDCARD_PATTERN.search(text) or _OPTION_PATTERN.search(text)
|
||||
)
|
||||
|
||||
|
||||
def get_wildcards_dir(create: bool = False) -> str:
|
||||
"""Return the managed wildcard directory inside the settings folder."""
|
||||
|
||||
settings_dir = get_settings_dir(create=create)
|
||||
wildcards_dir = os.path.join(settings_dir, "wildcards")
|
||||
if create:
|
||||
os.makedirs(wildcards_dir, exist_ok=True)
|
||||
return wildcards_dir
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class WildcardEntry:
|
||||
key: str
|
||||
values_count: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class WildcardMetadata:
|
||||
has_wildcards: bool
|
||||
wildcards_dir: str
|
||||
supported_formats: tuple[str, ...]
|
||||
|
||||
|
||||
class WildcardService:
|
||||
"""Discover wildcard keys and expand wildcard syntax."""
|
||||
|
||||
_instance: Optional["WildcardService"] = None
|
||||
|
||||
def __new__(cls) -> "WildcardService":
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self) -> None:
|
||||
if getattr(self, "_initialized", False):
|
||||
return
|
||||
self._initialized = True
|
||||
self._cached_signature: tuple[tuple[str, int, int], ...] | None = None
|
||||
self._wildcard_dict: dict[str, list[str]] = {}
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls) -> "WildcardService":
|
||||
return cls()
|
||||
|
||||
def search_keys(
|
||||
self, search_term: str, limit: int = 20, offset: int = 0
|
||||
) -> list[str]:
|
||||
"""Search wildcard keys for autocomplete."""
|
||||
|
||||
normalized_term = _normalize_wildcard_key(search_term).strip()
|
||||
if not normalized_term:
|
||||
return []
|
||||
|
||||
ranked: list[tuple[int, str]] = []
|
||||
compact_term = normalized_term.replace("/", "")
|
||||
for key in self.get_wildcard_dict().keys():
|
||||
score = self._score_entry(key, normalized_term, compact_term)
|
||||
if score is not None:
|
||||
ranked.append((score, key))
|
||||
|
||||
ranked.sort(key=lambda item: (-item[0], item[1]))
|
||||
keys = [key for _, key in ranked]
|
||||
return keys[offset : offset + limit]
|
||||
|
||||
def expand_text(self, text: str, seed: int | None = None) -> str:
|
||||
"""Expand wildcard and dynamic prompt syntax for a text value."""
|
||||
|
||||
if not isinstance(text, str) or not text:
|
||||
return text
|
||||
|
||||
rng = random.Random(seed) if seed is not None else random.Random()
|
||||
wildcard_dict = self.get_wildcard_dict()
|
||||
if not wildcard_dict:
|
||||
return self._expand_options_only(text, rng)
|
||||
|
||||
current = text
|
||||
remaining_depth = 100
|
||||
|
||||
while remaining_depth > 0:
|
||||
remaining_depth -= 1
|
||||
after_options, options_replaced = self._replace_options(current, rng)
|
||||
current, wildcards_replaced = self._replace_wildcards(
|
||||
after_options, rng, wildcard_dict
|
||||
)
|
||||
if not options_replaced and not wildcards_replaced:
|
||||
break
|
||||
|
||||
return current
|
||||
|
||||
def get_wildcard_dict(self) -> dict[str, list[str]]:
|
||||
signature = self._build_signature()
|
||||
if signature != self._cached_signature:
|
||||
self._wildcard_dict = self._scan_wildcard_dict()
|
||||
self._cached_signature = signature
|
||||
return self._wildcard_dict
|
||||
|
||||
def get_entries(self) -> list[WildcardEntry]:
|
||||
return [
|
||||
WildcardEntry(key=key, values_count=len(values))
|
||||
for key, values in sorted(self.get_wildcard_dict().items())
|
||||
]
|
||||
|
||||
def get_metadata(self, *, create_dir: bool = False) -> WildcardMetadata:
|
||||
wildcards_dir = get_wildcards_dir(create=create_dir)
|
||||
return WildcardMetadata(
|
||||
has_wildcards=bool(self.get_wildcard_dict()),
|
||||
wildcards_dir=wildcards_dir,
|
||||
supported_formats=(".txt", ".yaml", ".yml", ".json"),
|
||||
)
|
||||
|
||||
def _build_signature(self) -> tuple[tuple[str, int, int], ...]:
|
||||
root = get_wildcards_dir(create=False)
|
||||
if not os.path.isdir(root):
|
||||
return ()
|
||||
|
||||
signature: list[tuple[str, int, int]] = []
|
||||
for current_root, _dirs, files in os.walk(root, followlinks=True):
|
||||
for file_name in sorted(files):
|
||||
if not file_name.lower().endswith((".txt", ".yaml", ".yml", ".json")):
|
||||
continue
|
||||
file_path = os.path.join(current_root, file_name)
|
||||
try:
|
||||
stat = os.stat(file_path)
|
||||
except OSError:
|
||||
continue
|
||||
rel_path = os.path.relpath(file_path, root).replace("\\", "/")
|
||||
signature.append((rel_path, int(stat.st_mtime_ns), int(stat.st_size)))
|
||||
signature.sort()
|
||||
return tuple(signature)
|
||||
|
||||
def _scan_wildcard_dict(self) -> dict[str, list[str]]:
|
||||
root = get_wildcards_dir(create=False)
|
||||
if not os.path.isdir(root):
|
||||
return {}
|
||||
|
||||
collected: dict[str, list[str]] = {}
|
||||
for current_root, _dirs, files in os.walk(root, followlinks=True):
|
||||
for file_name in sorted(files):
|
||||
file_path = os.path.join(current_root, file_name)
|
||||
lower_name = file_name.lower()
|
||||
try:
|
||||
if lower_name.endswith(".txt"):
|
||||
rel_path = os.path.relpath(file_path, root)
|
||||
key = _normalize_wildcard_key(os.path.splitext(rel_path)[0])
|
||||
values = self._read_txt(file_path)
|
||||
if values:
|
||||
collected[key] = values
|
||||
elif lower_name.endswith((".yaml", ".yml")):
|
||||
payload = self._read_yaml(file_path)
|
||||
self._merge_nested_entries(collected, payload)
|
||||
elif lower_name.endswith(".json"):
|
||||
payload = self._read_json(file_path)
|
||||
self._merge_nested_entries(collected, payload)
|
||||
except Exception as exc: # pragma: no cover - defensive logging
|
||||
logger.warning("Failed to load wildcard file %s: %s", file_path, exc)
|
||||
|
||||
return collected
|
||||
|
||||
def _read_txt(self, file_path: str) -> list[str]:
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8", errors="ignore") as handle:
|
||||
return [line.strip() for line in handle.read().splitlines() if line.strip()]
|
||||
except OSError as exc:
|
||||
logger.warning("Failed to read wildcard txt file %s: %s", file_path, exc)
|
||||
return []
|
||||
|
||||
def _read_yaml(self, file_path: str) -> Any:
|
||||
with open(file_path, "r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
def _read_json(self, file_path: str) -> Any:
|
||||
with open(file_path, "r", encoding="utf-8") as handle:
|
||||
return json.load(handle)
|
||||
|
||||
def _merge_nested_entries(
|
||||
self, collected: dict[str, list[str]], payload: Any
|
||||
) -> None:
|
||||
for key, values in self._flatten_payload(payload):
|
||||
collected[key] = values
|
||||
|
||||
def _flatten_payload(
|
||||
self, payload: Any, prefix: str = ""
|
||||
) -> list[tuple[str, list[str]]]:
|
||||
entries: list[tuple[str, list[str]]] = []
|
||||
|
||||
if isinstance(payload, dict):
|
||||
for key, value in payload.items():
|
||||
next_prefix = f"{prefix}/{key}" if prefix else str(key)
|
||||
entries.extend(self._flatten_payload(value, next_prefix))
|
||||
return entries
|
||||
|
||||
if isinstance(payload, list):
|
||||
normalized_prefix = _normalize_wildcard_key(prefix)
|
||||
values = [value.strip() for value in payload if isinstance(value, str) and value.strip()]
|
||||
if normalized_prefix and values:
|
||||
entries.append((normalized_prefix, values))
|
||||
return entries
|
||||
|
||||
return entries
|
||||
|
||||
def _score_entry(
|
||||
self, key: str, normalized_term: str, compact_term: str
|
||||
) -> int | None:
|
||||
key_compact = key.replace("/", "")
|
||||
if key == normalized_term:
|
||||
return 5000
|
||||
if key.startswith(normalized_term):
|
||||
return 4000
|
||||
if f"/{normalized_term}" in key:
|
||||
return 3500
|
||||
if normalized_term in key:
|
||||
return 3000
|
||||
if compact_term and key_compact.startswith(compact_term):
|
||||
return 2500
|
||||
if compact_term and compact_term in key_compact:
|
||||
return 2000
|
||||
return None
|
||||
|
||||
def _expand_options_only(self, text: str, rng: random.Random) -> str:
|
||||
current = text
|
||||
remaining_depth = 100
|
||||
while remaining_depth > 0:
|
||||
remaining_depth -= 1
|
||||
current, replaced = self._replace_options(current, rng)
|
||||
if not replaced:
|
||||
break
|
||||
return current
|
||||
|
||||
def _replace_options(
|
||||
self, text: str, rng: random.Random
|
||||
) -> tuple[str, bool]:
|
||||
replaced_any = False
|
||||
|
||||
def replace_option(match: re.Match[str]) -> str:
|
||||
nonlocal replaced_any
|
||||
replacement = self._resolve_option_group(match.group(1), rng)
|
||||
replaced_any = True
|
||||
return replacement
|
||||
|
||||
return _OPTION_PATTERN.sub(replace_option, text), replaced_any
|
||||
|
||||
def _resolve_option_group(self, group_text: str, rng: random.Random) -> str:
|
||||
options = group_text.split("|")
|
||||
multi_select_pattern = options[0].split("$$")
|
||||
select_range: tuple[int, int] | None = None
|
||||
select_separator = " "
|
||||
|
||||
if len(multi_select_pattern) > 1:
|
||||
count_spec = multi_select_pattern[0]
|
||||
range_match = re.match(r"(\d+)(-(\d+))?$", count_spec)
|
||||
shorthand_match = re.match(r"-(\d+)$", count_spec)
|
||||
if range_match:
|
||||
start_text = range_match.group(1)
|
||||
end_text = range_match.group(3)
|
||||
if end_text is not None and _is_numeric_string(start_text) and _is_numeric_string(end_text):
|
||||
select_range = (int(start_text), int(end_text))
|
||||
elif _is_numeric_string(start_text):
|
||||
value = int(start_text)
|
||||
select_range = (value, value)
|
||||
elif shorthand_match:
|
||||
end_text = shorthand_match.group(1)
|
||||
if _is_numeric_string(end_text):
|
||||
select_range = (1, int(end_text))
|
||||
|
||||
if select_range is not None and len(multi_select_pattern) == 2:
|
||||
options[0] = multi_select_pattern[1]
|
||||
elif select_range is not None and len(multi_select_pattern) >= 3:
|
||||
select_separator = multi_select_pattern[1]
|
||||
options[0] = multi_select_pattern[2]
|
||||
|
||||
weighted_options: list[tuple[float, str]] = []
|
||||
for option in options:
|
||||
weight = 1.0
|
||||
parts = option.split("::", 1)
|
||||
if len(parts) == 2 and _is_numeric_string(parts[0].strip()):
|
||||
weight = float(parts[0].strip())
|
||||
weighted_options.append((weight, option))
|
||||
|
||||
if select_range is None:
|
||||
selection_count = 1
|
||||
else:
|
||||
selection_count = rng.randint(select_range[0], select_range[1])
|
||||
|
||||
if selection_count <= 1:
|
||||
return self._strip_weight_prefix(self._weighted_choice(weighted_options, rng))
|
||||
|
||||
selection_count = min(selection_count, len(weighted_options))
|
||||
selected: list[str] = []
|
||||
used_indexes: set[int] = set()
|
||||
while len(selected) < selection_count:
|
||||
picked_index = self._weighted_choice_index(weighted_options, rng)
|
||||
if picked_index in used_indexes:
|
||||
if len(used_indexes) == len(weighted_options):
|
||||
break
|
||||
continue
|
||||
used_indexes.add(picked_index)
|
||||
selected.append(
|
||||
self._strip_weight_prefix(weighted_options[picked_index][1])
|
||||
)
|
||||
|
||||
return select_separator.join(selected)
|
||||
|
||||
def _weighted_choice(
|
||||
self, weighted_options: list[tuple[float, str]], rng: random.Random
|
||||
) -> str:
|
||||
return weighted_options[self._weighted_choice_index(weighted_options, rng)][1]
|
||||
|
||||
def _weighted_choice_index(
|
||||
self, weighted_options: list[tuple[float, str]], rng: random.Random
|
||||
) -> int:
|
||||
total_weight = sum(max(weight, 0.0) for weight, _value in weighted_options)
|
||||
if total_weight <= 0:
|
||||
return rng.randrange(len(weighted_options))
|
||||
|
||||
threshold = rng.uniform(0, total_weight)
|
||||
cumulative = 0.0
|
||||
for index, (weight, _value) in enumerate(weighted_options):
|
||||
cumulative += max(weight, 0.0)
|
||||
if threshold <= cumulative:
|
||||
return index
|
||||
return len(weighted_options) - 1
|
||||
|
||||
def _strip_weight_prefix(self, value: str) -> str:
|
||||
return _WEIGHTED_OPTION_PATTERN.sub("", value, count=1)
|
||||
|
||||
def _replace_wildcards(
|
||||
self,
|
||||
text: str,
|
||||
rng: random.Random,
|
||||
wildcard_dict: dict[str, list[str]],
|
||||
) -> tuple[str, bool]:
|
||||
replaced_any = False
|
||||
|
||||
def replace_match(match: re.Match[str]) -> str:
|
||||
nonlocal replaced_any
|
||||
replacement = self._resolve_wildcard_match(match.group(1), rng, wildcard_dict)
|
||||
if replacement is None:
|
||||
return match.group(0)
|
||||
replaced_any = True
|
||||
return replacement
|
||||
|
||||
return _WILDCARD_PATTERN.sub(replace_match, text), replaced_any
|
||||
|
||||
def _resolve_wildcard_match(
|
||||
self,
|
||||
raw_key: str,
|
||||
rng: random.Random,
|
||||
wildcard_dict: dict[str, list[str]],
|
||||
) -> str | None:
|
||||
keyword = _normalize_wildcard_key(raw_key)
|
||||
if keyword in wildcard_dict:
|
||||
return rng.choice(wildcard_dict[keyword])
|
||||
|
||||
if "*" in keyword:
|
||||
regex_pattern = keyword.replace("*", ".*").replace("+", r"\+")
|
||||
compiled = re.compile(f"^{regex_pattern}$")
|
||||
aggregated: list[str] = []
|
||||
for key, values in wildcard_dict.items():
|
||||
if compiled.match(key):
|
||||
aggregated.extend(values)
|
||||
if aggregated:
|
||||
return rng.choice(aggregated)
|
||||
|
||||
if "/" not in keyword:
|
||||
fallback_keyword = _normalize_wildcard_key(f"*/{keyword}")
|
||||
if fallback_keyword != keyword:
|
||||
return self._resolve_wildcard_match(fallback_keyword, rng, wildcard_dict)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_trigger_words_input(name: str) -> bool:
|
||||
return bool(_TRIGGER_WORD_PATTERN.match(name))
|
||||
|
||||
|
||||
def get_wildcard_service() -> WildcardService:
|
||||
return WildcardService.get_instance()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"WildcardService",
|
||||
"WildcardMetadata",
|
||||
"contains_dynamic_syntax",
|
||||
"get_wildcard_service",
|
||||
"get_wildcards_dir",
|
||||
"is_trigger_words_input",
|
||||
]
|
||||
@@ -11,6 +11,8 @@ Target structure:
|
||||
│ └── symlink_map.json
|
||||
├── model/
|
||||
│ └── {library_name}.sqlite
|
||||
├── model_update/
|
||||
│ └── {library_name}.sqlite
|
||||
├── recipe/
|
||||
│ └── {library_name}.sqlite
|
||||
└── fts/
|
||||
@@ -36,6 +38,7 @@ class CacheType(Enum):
|
||||
"""Types of cache files managed by the cache path resolver."""
|
||||
|
||||
MODEL = "model"
|
||||
MODEL_UPDATE = "model_update"
|
||||
RECIPE = "recipe"
|
||||
RECIPE_FTS = "recipe_fts"
|
||||
TAG_FTS = "tag_fts"
|
||||
@@ -45,6 +48,7 @@ class CacheType(Enum):
|
||||
# Subdirectory structure for each cache type
|
||||
_CACHE_SUBDIRS = {
|
||||
CacheType.MODEL: "model",
|
||||
CacheType.MODEL_UPDATE: "model_update",
|
||||
CacheType.RECIPE: "recipe",
|
||||
CacheType.RECIPE_FTS: "fts",
|
||||
CacheType.TAG_FTS: "fts",
|
||||
@@ -54,6 +58,7 @@ _CACHE_SUBDIRS = {
|
||||
# Filename patterns for each cache type
|
||||
_CACHE_FILENAMES = {
|
||||
CacheType.MODEL: "{library_name}.sqlite",
|
||||
CacheType.MODEL_UPDATE: "{library_name}.sqlite",
|
||||
CacheType.RECIPE: "{library_name}.sqlite",
|
||||
CacheType.RECIPE_FTS: "recipe_fts.sqlite",
|
||||
CacheType.TAG_FTS: "tag_fts.sqlite",
|
||||
|
||||
@@ -22,7 +22,9 @@ def _normalize_commercial_values(value: Any) -> Sequence[str]:
|
||||
|
||||
def _split_aggregate(value_str: str) -> list[str]:
|
||||
stripped = value_str.strip()
|
||||
looks_aggregate = "," in stripped or (stripped.startswith("{") and stripped.endswith("}"))
|
||||
looks_aggregate = "," in stripped or (
|
||||
stripped.startswith("{") and stripped.endswith("}")
|
||||
)
|
||||
if not looks_aggregate:
|
||||
return [value_str]
|
||||
|
||||
@@ -141,14 +143,18 @@ def build_license_flags(payload: Mapping[str, Any] | None) -> int:
|
||||
return flags
|
||||
|
||||
|
||||
def resolve_license_info(model_data: Mapping[str, Any] | None) -> tuple[Dict[str, Any], int]:
|
||||
def resolve_license_info(
|
||||
model_data: Mapping[str, Any] | None,
|
||||
) -> tuple[Dict[str, Any], int]:
|
||||
"""Return normalized license payload and its encoded bitset."""
|
||||
|
||||
payload = resolve_license_payload(model_data)
|
||||
return payload, build_license_flags(payload)
|
||||
|
||||
|
||||
def rewrite_preview_url(source_url: str | None, media_type: str | None = None) -> tuple[str | None, bool]:
|
||||
def rewrite_preview_url(
|
||||
source_url: str | None, media_type: str | None = None
|
||||
) -> tuple[str | None, bool]:
|
||||
"""Rewrite Civitai preview URLs to use optimized renditions.
|
||||
|
||||
Args:
|
||||
@@ -168,7 +174,12 @@ def rewrite_preview_url(source_url: str | None, media_type: str | None = None) -
|
||||
except ValueError:
|
||||
return source_url, False
|
||||
|
||||
if parsed.netloc.lower() != "image.civitai.com":
|
||||
hostname = parsed.hostname
|
||||
if hostname is None:
|
||||
return source_url, False
|
||||
|
||||
hostname = hostname.lower()
|
||||
if hostname == "civitai.com" or not hostname.endswith(".civitai.com"):
|
||||
return source_url, False
|
||||
|
||||
replacement = "/width=450,optimized=true"
|
||||
|
||||
@@ -101,6 +101,7 @@ DEFAULT_PRIORITY_TAG_CONFIG = {
|
||||
DIFFUSION_MODEL_BASE_MODELS = frozenset(
|
||||
[
|
||||
"ZImageTurbo",
|
||||
"ZImageBase",
|
||||
"Wan Video 1.3B t2v",
|
||||
"Wan Video 14B t2v",
|
||||
"Wan Video 14B i2v 480p",
|
||||
@@ -110,6 +111,71 @@ DIFFUSION_MODEL_BASE_MODELS = frozenset(
|
||||
"Wan Video 2.2 T2V-A14B",
|
||||
"Wan Video 2.5 T2V",
|
||||
"Wan Video 2.5 I2V",
|
||||
"CogVideoX",
|
||||
"Mochi",
|
||||
"Qwen",
|
||||
]
|
||||
)
|
||||
|
||||
# Supported baseModel values for download exclusion settings.
|
||||
# Keep this aligned with static/js/utils/constants.js, excluding the generic "Other" value.
|
||||
SUPPORTED_DOWNLOAD_SKIP_BASE_MODELS = frozenset(
|
||||
[
|
||||
"SD 1.4",
|
||||
"SD 1.5",
|
||||
"SD 1.5 LCM",
|
||||
"SD 1.5 Hyper",
|
||||
"SD 2.0",
|
||||
"SD 2.1",
|
||||
"SD 3",
|
||||
"SD 3.5",
|
||||
"SD 3.5 Medium",
|
||||
"SD 3.5 Large",
|
||||
"SD 3.5 Large Turbo",
|
||||
"SDXL 1.0",
|
||||
"SDXL Lightning",
|
||||
"SDXL Hyper",
|
||||
"Flux.1 D",
|
||||
"Flux.1 S",
|
||||
"Flux.1 Krea",
|
||||
"Flux.1 Kontext",
|
||||
"Flux.2 D",
|
||||
"Flux.2 Klein 9B",
|
||||
"Flux.2 Klein 9B-base",
|
||||
"Flux.2 Klein 4B",
|
||||
"Flux.2 Klein 4B-base",
|
||||
"AuraFlow",
|
||||
"Chroma",
|
||||
"PixArt a",
|
||||
"PixArt E",
|
||||
"Hunyuan 1",
|
||||
"Lumina",
|
||||
"Kolors",
|
||||
"NoobAI",
|
||||
"Illustrious",
|
||||
"Pony",
|
||||
"Pony V7",
|
||||
"HiDream",
|
||||
"Qwen",
|
||||
"ZImageTurbo",
|
||||
"ZImageBase",
|
||||
"SVD",
|
||||
"LTXV",
|
||||
"LTXV2",
|
||||
"LTXV 2.3",
|
||||
"CogVideoX",
|
||||
"Mochi",
|
||||
"Wan Video",
|
||||
"Wan Video 1.3B t2v",
|
||||
"Wan Video 14B t2v",
|
||||
"Wan Video 14B i2v 480p",
|
||||
"Wan Video 14B i2v 720p",
|
||||
"Wan Video 2.2 TI2V-5B",
|
||||
"Wan Video 2.2 T2V-A14B",
|
||||
"Wan Video 2.2 I2V-A14B",
|
||||
"Wan Video 2.5 T2V",
|
||||
"Wan Video 2.5 I2V",
|
||||
"Hunyuan Video",
|
||||
"Anima",
|
||||
]
|
||||
)
|
||||
|
||||
@@ -40,49 +40,39 @@ async def calculate_sha256(file_path: str) -> str:
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
def find_preview_file(base_name: str, dir_path: str) -> str:
|
||||
"""Find preview file for given base name in directory"""
|
||||
"""Find preview file for given base name in directory.
|
||||
|
||||
Performs an exact-case check first (fast path), then falls back to a
|
||||
case-insensitive scan so that files like ``model.WEBP`` or ``model.Png``
|
||||
are discovered on case-sensitive filesystems.
|
||||
"""
|
||||
|
||||
temp_extensions = PREVIEW_EXTENSIONS.copy()
|
||||
# Add example extension for compatibility
|
||||
# https://github.com/willmiao/ComfyUI-Lora-Manager/issues/225
|
||||
# The preview image will be optimized to lora-name.webp, so it won't affect other logic
|
||||
temp_extensions.append(".example.0.jpeg")
|
||||
|
||||
# Fast path: exact-case match
|
||||
for ext in temp_extensions:
|
||||
full_pattern = os.path.join(dir_path, f"{base_name}{ext}")
|
||||
if os.path.exists(full_pattern):
|
||||
# Check if this is an image and not already webp
|
||||
# TODO: disable the optimization for now, maybe add a config option later
|
||||
# if ext.lower().endswith(('.jpg', '.jpeg', '.png')) and not ext.lower().endswith('.webp'):
|
||||
# try:
|
||||
# # Optimize the image to webp format
|
||||
# webp_path = os.path.join(dir_path, f"{base_name}.webp")
|
||||
|
||||
# # Use ExifUtils to optimize the image
|
||||
# with open(full_pattern, 'rb') as f:
|
||||
# image_data = f.read()
|
||||
|
||||
# optimized_data, _ = ExifUtils.optimize_image(
|
||||
# image_data=image_data,
|
||||
# target_width=CARD_PREVIEW_WIDTH,
|
||||
# format='webp',
|
||||
# quality=85,
|
||||
# preserve_metadata=False
|
||||
# )
|
||||
|
||||
# # Save the optimized webp file
|
||||
# with open(webp_path, 'wb') as f:
|
||||
# f.write(optimized_data)
|
||||
|
||||
# logger.debug(f"Optimized preview image from {full_pattern} to {webp_path}")
|
||||
# return webp_path.replace(os.sep, "/")
|
||||
# except Exception as e:
|
||||
# logger.error(f"Error optimizing preview image {full_pattern}: {e}")
|
||||
# # Fall back to original file if optimization fails
|
||||
# return full_pattern.replace(os.sep, "/")
|
||||
|
||||
# Return the original path for webp images or non-image files
|
||||
return full_pattern.replace(os.sep, "/")
|
||||
|
||||
# Slow path: case-insensitive match for systems with mixed-case extensions
|
||||
# (e.g. .WEBP, .Png, .JPG placed manually or by external tools)
|
||||
try:
|
||||
dir_entries = os.listdir(dir_path)
|
||||
except OSError:
|
||||
return ""
|
||||
|
||||
base_lower = base_name.lower()
|
||||
for ext in temp_extensions:
|
||||
target = f"{base_lower}{ext}" # ext is already lowercase
|
||||
for entry in dir_entries:
|
||||
if entry.lower() == target:
|
||||
return os.path.join(dir_path, entry).replace(os.sep, "/")
|
||||
|
||||
return ""
|
||||
|
||||
def get_preview_extension(preview_path: str) -> str:
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Mapping, Optional, Sequence, Tuple
|
||||
from typing import Any, Mapping, Optional, Sequence, Tuple
|
||||
|
||||
from .constants import NSFW_LEVELS
|
||||
|
||||
PreviewMedia = Mapping[str, object]
|
||||
VALID_MATURE_BLUR_LEVELS = ("PG13", "R", "X", "XXX")
|
||||
|
||||
|
||||
def _extract_nsfw_level(entry: Mapping[str, object]) -> int:
|
||||
@@ -19,17 +20,36 @@ def _extract_nsfw_level(entry: Mapping[str, object]) -> int:
|
||||
return 0
|
||||
|
||||
|
||||
def resolve_mature_threshold(settings: Mapping[str, Any] | None) -> int:
|
||||
"""Resolve the configured mature blur threshold from settings.
|
||||
|
||||
Allowed values are ``PG13``, ``R``, ``X``, and ``XXX``. Any invalid or
|
||||
missing value falls back to ``R``.
|
||||
"""
|
||||
|
||||
if not isinstance(settings, Mapping):
|
||||
return NSFW_LEVELS.get("R", 4)
|
||||
|
||||
raw_level = settings.get("mature_blur_level", "R")
|
||||
normalized = str(raw_level).strip().upper()
|
||||
if normalized not in VALID_MATURE_BLUR_LEVELS:
|
||||
normalized = "R"
|
||||
return NSFW_LEVELS.get(normalized, NSFW_LEVELS.get("R", 4))
|
||||
|
||||
|
||||
def select_preview_media(
|
||||
images: Sequence[Mapping[str, object]] | None,
|
||||
*,
|
||||
blur_mature_content: bool,
|
||||
mature_threshold: int | None = None,
|
||||
) -> Tuple[Optional[PreviewMedia], int]:
|
||||
"""Select the most appropriate preview media entry.
|
||||
|
||||
When ``blur_mature_content`` is enabled we first try to return the first media
|
||||
item with an ``nsfwLevel`` lower than :pydata:`NSFW_LEVELS["R"]`. If none are
|
||||
available we return the media entry with the lowest NSFW level. When the
|
||||
setting is disabled we simply return the first entry.
|
||||
item with an ``nsfwLevel`` lower than the configured mature threshold
|
||||
(defaults to :pydata:`NSFW_LEVELS["R"]`). If none are available we return
|
||||
the media entry with the lowest NSFW level. When the setting is disabled we
|
||||
simply return the first entry.
|
||||
"""
|
||||
|
||||
if not images:
|
||||
@@ -45,7 +65,9 @@ def select_preview_media(
|
||||
if not blur_mature_content:
|
||||
return selected, selected_level
|
||||
|
||||
safe_threshold = NSFW_LEVELS.get("R", 4)
|
||||
safe_threshold = (
|
||||
mature_threshold if isinstance(mature_threshold, int) else NSFW_LEVELS.get("R", 4)
|
||||
)
|
||||
for candidate in candidates:
|
||||
level = _extract_nsfw_level(candidate)
|
||||
if level < safe_threshold:
|
||||
@@ -60,4 +82,4 @@ def select_preview_media(
|
||||
return selected, selected_level
|
||||
|
||||
|
||||
__all__ = ["select_preview_media"]
|
||||
__all__ = ["resolve_mature_threshold", "select_preview_media", "VALID_MATURE_BLUR_LEVELS"]
|
||||
|
||||
136
py/utils/session_logging.py
Normal file
136
py/utils/session_logging.py
Normal file
@@ -0,0 +1,136 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import uuid
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
|
||||
LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
_SESSION_HANDLER_NAME = "lora_manager_standalone_session_memory"
|
||||
_FILE_HANDLER_NAME = "lora_manager_standalone_session_file"
|
||||
_session_state: "StandaloneSessionLogState | None" = None
|
||||
_session_lock = threading.Lock()
|
||||
|
||||
|
||||
@dataclass
|
||||
class StandaloneSessionLogState:
|
||||
started_at: str
|
||||
session_id: str
|
||||
log_file_path: str | None
|
||||
memory_handler: "StandaloneSessionMemoryHandler"
|
||||
|
||||
|
||||
class StandaloneSessionMemoryHandler(logging.Handler):
|
||||
def __init__(self, capacity: int = 4000) -> None:
|
||||
super().__init__()
|
||||
self._entries: deque[str] = deque(maxlen=capacity)
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
try:
|
||||
rendered = self.format(record)
|
||||
except Exception:
|
||||
rendered = record.getMessage()
|
||||
|
||||
with self._lock:
|
||||
self._entries.append(rendered)
|
||||
|
||||
def render(self, max_lines: int | None = None) -> str:
|
||||
with self._lock:
|
||||
entries = list(self._entries)
|
||||
|
||||
if max_lines is not None and max_lines > 0:
|
||||
entries = entries[-max_lines:]
|
||||
|
||||
if not entries:
|
||||
return ""
|
||||
|
||||
return "\n".join(entries) + "\n"
|
||||
|
||||
|
||||
def _build_log_file_path(settings_file: str | None, started_at: datetime) -> str | None:
|
||||
if not settings_file:
|
||||
return None
|
||||
|
||||
settings_dir = os.path.dirname(os.path.abspath(settings_file))
|
||||
log_dir = os.path.join(settings_dir, "logs")
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
timestamp = started_at.strftime("%Y%m%dT%H%M%SZ")
|
||||
return os.path.join(log_dir, f"standalone-session-{timestamp}.log")
|
||||
|
||||
|
||||
def setup_standalone_session_logging(settings_file: str | None) -> StandaloneSessionLogState:
|
||||
global _session_state
|
||||
|
||||
with _session_lock:
|
||||
if _session_state is not None:
|
||||
return _session_state
|
||||
|
||||
started_dt = datetime.now(timezone.utc)
|
||||
started_at = started_dt.replace(microsecond=0).isoformat()
|
||||
session_id = f"{started_dt.strftime('%Y%m%dT%H%M%SZ')}-{uuid.uuid4().hex[:8]}"
|
||||
formatter = logging.Formatter(LOG_FORMAT)
|
||||
root_logger = logging.getLogger()
|
||||
if root_logger.level > logging.INFO:
|
||||
root_logger.setLevel(logging.INFO)
|
||||
|
||||
memory_handler = StandaloneSessionMemoryHandler()
|
||||
memory_handler.set_name(_SESSION_HANDLER_NAME)
|
||||
memory_handler.setFormatter(formatter)
|
||||
root_logger.addHandler(memory_handler)
|
||||
|
||||
log_file_path = _build_log_file_path(settings_file, started_dt)
|
||||
if log_file_path:
|
||||
file_handler = logging.FileHandler(log_file_path, encoding="utf-8")
|
||||
file_handler.set_name(_FILE_HANDLER_NAME)
|
||||
file_handler.setFormatter(formatter)
|
||||
root_logger.addHandler(file_handler)
|
||||
|
||||
_session_state = StandaloneSessionLogState(
|
||||
started_at=started_at,
|
||||
session_id=session_id,
|
||||
log_file_path=log_file_path,
|
||||
memory_handler=memory_handler,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("lora-manager-standalone")
|
||||
logger.info("LoRA Manager standalone startup time: %s", started_at)
|
||||
logger.info("LoRA Manager standalone session id: %s", session_id)
|
||||
if log_file_path:
|
||||
logger.info("LoRA Manager standalone session log path: %s", log_file_path)
|
||||
|
||||
return _session_state
|
||||
|
||||
|
||||
def get_standalone_session_log_snapshot(max_lines: int = 2000) -> dict[str, Any] | None:
|
||||
state = _session_state
|
||||
if state is None:
|
||||
return None
|
||||
|
||||
return {
|
||||
"started_at": state.started_at,
|
||||
"session_id": state.session_id,
|
||||
"log_file_path": state.log_file_path,
|
||||
"in_memory_text": state.memory_handler.render(max_lines=max_lines),
|
||||
}
|
||||
|
||||
|
||||
def reset_standalone_session_logging_for_tests() -> None:
|
||||
global _session_state
|
||||
|
||||
with _session_lock:
|
||||
root_logger = logging.getLogger()
|
||||
handlers_to_remove = [
|
||||
handler
|
||||
for handler in root_logger.handlers
|
||||
if handler.get_name() in {_SESSION_HANDLER_NAME, _FILE_HANDLER_NAME}
|
||||
]
|
||||
for handler in handlers_to_remove:
|
||||
root_logger.removeHandler(handler)
|
||||
handler.close()
|
||||
_session_state = None
|
||||
@@ -29,6 +29,18 @@ if not standalone_mode:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_DEFAULT_CHECKPOINT_EXTENSIONS = {
|
||||
".ckpt",
|
||||
".pt",
|
||||
".pt2",
|
||||
".bin",
|
||||
".pth",
|
||||
".safetensors",
|
||||
".pkl",
|
||||
".sft",
|
||||
".gguf",
|
||||
}
|
||||
|
||||
class UsageStats:
|
||||
"""Track usage statistics for models and save to JSON"""
|
||||
|
||||
@@ -292,6 +304,151 @@ class UsageStats:
|
||||
if LORAS in metadata and isinstance(metadata[LORAS], dict):
|
||||
await self._process_loras(metadata[LORAS], today)
|
||||
|
||||
def _increment_usage_counter(self, category: str, stat_key: str, today_date: str) -> None:
|
||||
"""Increment usage counters for a resolved stats key."""
|
||||
if stat_key not in self.stats[category]:
|
||||
self.stats[category][stat_key] = {
|
||||
"total": 0,
|
||||
"history": {}
|
||||
}
|
||||
|
||||
self.stats[category][stat_key]["total"] += 1
|
||||
|
||||
if today_date not in self.stats[category][stat_key]["history"]:
|
||||
self.stats[category][stat_key]["history"][today_date] = 0
|
||||
self.stats[category][stat_key]["history"][today_date] += 1
|
||||
|
||||
def _normalize_model_lookup_name(self, model_name: str) -> str:
|
||||
"""Normalize a model reference to its base filename without extension."""
|
||||
return os.path.splitext(os.path.basename(model_name))[0]
|
||||
|
||||
async def _find_cached_checkpoint_entry(self, checkpoint_scanner, model_name: str):
|
||||
"""Best-effort lookup for a checkpoint cache entry by filename/model name."""
|
||||
get_cached_data = getattr(checkpoint_scanner, "get_cached_data", None)
|
||||
if not callable(get_cached_data):
|
||||
return None
|
||||
|
||||
cache = await get_cached_data()
|
||||
raw_data = getattr(cache, "raw_data", None)
|
||||
if not isinstance(raw_data, list):
|
||||
return None
|
||||
|
||||
normalized_name = self._normalize_model_lookup_name(model_name)
|
||||
for entry in raw_data:
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
|
||||
for candidate_key in ("file_name", "model_name", "file_path"):
|
||||
candidate_value = entry.get(candidate_key)
|
||||
if not candidate_value or not isinstance(candidate_value, str):
|
||||
continue
|
||||
if self._normalize_model_lookup_name(candidate_value) == normalized_name:
|
||||
return entry
|
||||
|
||||
return None
|
||||
|
||||
async def _find_checkpoint_file_on_disk(self, checkpoint_scanner, model_name: str):
|
||||
"""Search checkpoint roots directly for a matching file.
|
||||
|
||||
This is used when usage tracking sees a checkpoint name before the cache has
|
||||
been refreshed. The lookup is intentionally exact: we only match the model
|
||||
basename and supported checkpoint extensions.
|
||||
"""
|
||||
get_model_roots = getattr(checkpoint_scanner, "get_model_roots", None)
|
||||
if not callable(get_model_roots):
|
||||
return None
|
||||
|
||||
roots = [root for root in get_model_roots() if root]
|
||||
if not roots:
|
||||
return None
|
||||
|
||||
supported_extensions = getattr(
|
||||
checkpoint_scanner, "file_extensions", _DEFAULT_CHECKPOINT_EXTENSIONS
|
||||
)
|
||||
if not isinstance(supported_extensions, (set, frozenset, list, tuple)):
|
||||
supported_extensions = _DEFAULT_CHECKPOINT_EXTENSIONS
|
||||
|
||||
normalized_name = self._normalize_model_lookup_name(model_name)
|
||||
matches: list[str] = []
|
||||
|
||||
for root_path in roots:
|
||||
if not os.path.exists(root_path):
|
||||
continue
|
||||
|
||||
for dirpath, _dirnames, filenames in os.walk(root_path):
|
||||
for filename in filenames:
|
||||
extension = os.path.splitext(filename)[1].lower()
|
||||
if extension not in supported_extensions:
|
||||
continue
|
||||
|
||||
if os.path.splitext(filename)[0] != normalized_name:
|
||||
continue
|
||||
|
||||
matches.append(os.path.join(dirpath, filename).replace(os.sep, "/"))
|
||||
|
||||
if len(matches) > 1:
|
||||
logger.warning(
|
||||
"Multiple checkpoint files matched '%s'; skipping usage tracking: %s",
|
||||
normalized_name,
|
||||
", ".join(matches),
|
||||
)
|
||||
return None
|
||||
|
||||
return matches[0] if matches else None
|
||||
|
||||
async def _resolve_checkpoint_hash(self, checkpoint_scanner, model_name: str):
|
||||
"""Resolve a checkpoint hash, calculating pending hashes on demand when needed."""
|
||||
model_filename = self._normalize_model_lookup_name(model_name)
|
||||
model_hash = checkpoint_scanner.get_hash_by_filename(model_filename)
|
||||
if model_hash:
|
||||
return model_hash
|
||||
|
||||
cached_entry = await self._find_cached_checkpoint_entry(checkpoint_scanner, model_name)
|
||||
if cached_entry:
|
||||
cached_hash = cached_entry.get("sha256")
|
||||
if cached_hash:
|
||||
return cached_hash
|
||||
|
||||
hash_status = cached_entry.get("hash_status")
|
||||
if hash_status and hash_status != "pending":
|
||||
logger.warning(
|
||||
"Checkpoint '%s' has hash_status=%s; skipping usage tracking",
|
||||
model_filename,
|
||||
hash_status,
|
||||
)
|
||||
return None
|
||||
|
||||
file_path = cached_entry.get("file_path") if cached_entry else None
|
||||
if not file_path:
|
||||
file_path = await self._find_checkpoint_file_on_disk(
|
||||
checkpoint_scanner, model_name
|
||||
)
|
||||
|
||||
if not file_path:
|
||||
logger.warning(
|
||||
f"No hash found for checkpoint '{model_filename}', skipping usage tracking"
|
||||
)
|
||||
return None
|
||||
|
||||
calculate_hash = getattr(checkpoint_scanner, "calculate_hash_for_model", None)
|
||||
if not callable(calculate_hash):
|
||||
logger.warning("Checkpoint scanner not available for usage tracking")
|
||||
return None
|
||||
|
||||
logger.info(
|
||||
"Calculating hash for checkpoint '%s' from %s",
|
||||
model_filename,
|
||||
file_path,
|
||||
)
|
||||
calculated_hash = await calculate_hash(file_path)
|
||||
if calculated_hash:
|
||||
return calculated_hash
|
||||
|
||||
logger.warning(
|
||||
f"Failed to calculate hash for checkpoint '{model_filename}', skipping usage tracking"
|
||||
)
|
||||
return None
|
||||
|
||||
async def _process_checkpoints(self, models_data, today_date):
|
||||
"""Process checkpoint models from metadata"""
|
||||
try:
|
||||
@@ -312,26 +469,11 @@ class UsageStats:
|
||||
if not model_name:
|
||||
continue
|
||||
|
||||
# Clean up filename (remove extension if present)
|
||||
model_filename = os.path.splitext(os.path.basename(model_name))[0]
|
||||
model_hash = await self._resolve_checkpoint_hash(checkpoint_scanner, model_name)
|
||||
if not model_hash:
|
||||
continue
|
||||
|
||||
# Get hash for this checkpoint
|
||||
model_hash = checkpoint_scanner.get_hash_by_filename(model_filename)
|
||||
if model_hash:
|
||||
# Update stats for this checkpoint with date tracking
|
||||
if model_hash not in self.stats["checkpoints"]:
|
||||
self.stats["checkpoints"][model_hash] = {
|
||||
"total": 0,
|
||||
"history": {}
|
||||
}
|
||||
|
||||
# Increment total count
|
||||
self.stats["checkpoints"][model_hash]["total"] += 1
|
||||
|
||||
# Increment today's count
|
||||
if today_date not in self.stats["checkpoints"][model_hash]["history"]:
|
||||
self.stats["checkpoints"][model_hash]["history"][today_date] = 0
|
||||
self.stats["checkpoints"][model_hash]["history"][today_date] += 1
|
||||
self._increment_usage_counter("checkpoints", model_hash, today_date)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing checkpoint usage: {e}", exc_info=True)
|
||||
|
||||
@@ -360,21 +502,11 @@ class UsageStats:
|
||||
|
||||
# Get hash for this LoRA
|
||||
lora_hash = lora_scanner.get_hash_by_filename(lora_name)
|
||||
if lora_hash:
|
||||
# Update stats for this LoRA with date tracking
|
||||
if lora_hash not in self.stats["loras"]:
|
||||
self.stats["loras"][lora_hash] = {
|
||||
"total": 0,
|
||||
"history": {}
|
||||
}
|
||||
if not lora_hash:
|
||||
logger.warning(f"No hash found for LoRA '{lora_name}', skipping usage tracking")
|
||||
continue
|
||||
|
||||
# Increment total count
|
||||
self.stats["loras"][lora_hash]["total"] += 1
|
||||
|
||||
# Increment today's count
|
||||
if today_date not in self.stats["loras"][lora_hash]["history"]:
|
||||
self.stats["loras"][lora_hash]["history"][today_date] = 0
|
||||
self.stats["loras"][lora_hash]["history"][today_date] += 1
|
||||
self._increment_usage_counter("loras", lora_hash, today_date)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing LoRA usage: {e}", exc_info=True)
|
||||
|
||||
|
||||
@@ -148,8 +148,8 @@ def get_checkpoint_info_absolute(checkpoint_name):
|
||||
# Format the stored path as ComfyUI-style name
|
||||
formatted_name = _format_model_name_for_comfyui(file_path, model_roots)
|
||||
|
||||
# Match by formatted name
|
||||
if formatted_name == normalized_name or formatted_name == checkpoint_name:
|
||||
# Match by formatted name (normalize separators for robust comparison)
|
||||
if formatted_name.replace(os.sep, "/") == normalized_name or formatted_name == checkpoint_name:
|
||||
return file_path, item
|
||||
|
||||
# Also try matching by basename only (for backward compatibility)
|
||||
@@ -200,19 +200,22 @@ def _format_model_name_for_comfyui(file_path: str, model_roots: list) -> str:
|
||||
Returns:
|
||||
ComfyUI-style model name with relative path and extension
|
||||
"""
|
||||
# Normalize path separators
|
||||
normalized_path = file_path.replace(os.sep, "/")
|
||||
|
||||
# Find the matching root and get relative path
|
||||
for root in model_roots:
|
||||
normalized_root = root.replace(os.sep, "/")
|
||||
# Ensure root ends with / for proper matching
|
||||
if not normalized_root.endswith("/"):
|
||||
normalized_root += "/"
|
||||
try:
|
||||
# Normalize paths for comparison
|
||||
norm_file = os.path.normcase(os.path.abspath(file_path))
|
||||
norm_root = os.path.normcase(os.path.abspath(root))
|
||||
|
||||
if normalized_path.startswith(normalized_root):
|
||||
rel_path = normalized_path[len(normalized_root) :]
|
||||
return rel_path
|
||||
# Add trailing separator for prefix check
|
||||
if not norm_root.endswith(os.sep):
|
||||
norm_root += os.sep
|
||||
|
||||
if norm_file.startswith(norm_root):
|
||||
# Use os.path.relpath to get relative path with OS-native separator
|
||||
return os.path.relpath(file_path, root)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
# If no root matches, just return the basename with extension
|
||||
return os.path.basename(file_path)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "comfyui-lora-manager"
|
||||
description = "Revolutionize your workflow with the ultimate LoRA companion for ComfyUI!"
|
||||
version = "1.0.0"
|
||||
version = "1.0.3"
|
||||
license = {file = "LICENSE"}
|
||||
dependencies = [
|
||||
"aiohttp",
|
||||
@@ -14,7 +14,8 @@ dependencies = [
|
||||
"natsort",
|
||||
"GitPython",
|
||||
"aiosqlite",
|
||||
"platformdirs"
|
||||
"platformdirs",
|
||||
"pyyaml"
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[pytest]
|
||||
addopts = -v --import-mode=importlib -m "not performance"
|
||||
addopts = -v --import-mode=importlib -m "not performance" --ignore=__init__.py
|
||||
testpaths = tests
|
||||
python_files = test_*.py
|
||||
python_classes = Test*
|
||||
|
||||
@@ -11,3 +11,4 @@ GitPython
|
||||
aiosqlite
|
||||
beautifulsoup4
|
||||
platformdirs
|
||||
pyyaml
|
||||
|
||||
@@ -113,6 +113,8 @@ import asyncio
|
||||
import logging
|
||||
from aiohttp import web
|
||||
|
||||
from py.utils.session_logging import setup_standalone_session_logging
|
||||
|
||||
# Increase allowable header size to align with in-ComfyUI configuration.
|
||||
HEADER_SIZE_LIMIT = 16384
|
||||
|
||||
@@ -125,6 +127,8 @@ logger = logging.getLogger("lora-manager-standalone")
|
||||
# Configure aiohttp access logger to be less verbose
|
||||
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
|
||||
|
||||
setup_standalone_session_logging(ensure_settings_file(logger))
|
||||
|
||||
|
||||
# Add specific suppression for connection reset errors
|
||||
class ConnectionResetFilter(logging.Filter):
|
||||
|
||||
@@ -687,7 +687,7 @@
|
||||
padding: 12px 16px;
|
||||
background: oklch(var(--lora-warning) / 0.1);
|
||||
border: 1px solid var(--lora-warning);
|
||||
border-radius: var(--border-radius-sm) var(--border-radius-sm) 0 0;
|
||||
border-radius: var(--border-radius-sm);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
|
||||
@@ -835,7 +835,8 @@
|
||||
}
|
||||
|
||||
[data-theme="dark"] .creator-info,
|
||||
[data-theme="dark"] .civitai-view {
|
||||
[data-theme="dark"] .civitai-view,
|
||||
[data-theme="dark"] .modal-send-btn {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--lora-border);
|
||||
}
|
||||
@@ -875,7 +876,8 @@
|
||||
|
||||
/* Add hover effect for creator info */
|
||||
.creator-info:hover,
|
||||
.civitai-view:hover {
|
||||
.civitai-view:hover,
|
||||
.modal-send-btn:hover {
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||
border-color: var(--lora-accent);
|
||||
transform: translateY(-1px);
|
||||
@@ -910,3 +912,42 @@
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
/* Send to ComfyUI Button */
|
||||
.modal-send-btn {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
padding: 6px 12px;
|
||||
background: rgba(0, 0, 0, 0.03);
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
border-radius: var(--border-radius-sm);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
font-weight: 500;
|
||||
font-size: 0.9em;
|
||||
transition: all 0.2s;
|
||||
}
|
||||
|
||||
[data-theme="dark"] .modal-send-btn {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--lora-border);
|
||||
}
|
||||
|
||||
.modal-send-btn:hover {
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||
border-color: var(--lora-accent);
|
||||
transform: translateY(-1px);
|
||||
}
|
||||
|
||||
.modal-send-btn:active {
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
.modal-send-btn i {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.modal-send-btn span {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
@@ -163,6 +163,18 @@
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.model-version-row.is-clickable .version-actions,
|
||||
.model-version-row.is-clickable .version-badges,
|
||||
.model-version-row.is-clickable .version-action,
|
||||
.model-version-row.is-clickable .version-civitai-link {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
.model-version-row.is-clickable .version-action,
|
||||
.model-version-row.is-clickable .version-civitai-link {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.model-version-row.is-current {
|
||||
border-color: var(--lora-accent);
|
||||
box-shadow: 0 0 0 1px color-mix(in oklch, var(--lora-accent) 65%, transparent),
|
||||
@@ -217,6 +229,7 @@
|
||||
gap: 8px;
|
||||
font-weight: 600;
|
||||
font-size: 0.95rem;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.versions-tab-version-name {
|
||||
@@ -226,6 +239,27 @@
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
.version-civitai-link {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
border-radius: 999px;
|
||||
color: var(--text-muted);
|
||||
text-decoration: none;
|
||||
flex: 0 0 auto;
|
||||
transition: color 0.2s ease, background-color 0.2s ease, transform 0.2s ease;
|
||||
}
|
||||
|
||||
.version-civitai-link:hover,
|
||||
.version-civitai-link:focus-visible {
|
||||
color: var(--lora-accent);
|
||||
background: color-mix(in oklch, var(--lora-accent) 12%, transparent);
|
||||
transform: translateY(-1px);
|
||||
outline: none;
|
||||
}
|
||||
|
||||
.version-badges {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
|
||||
@@ -151,7 +151,8 @@ body.modal-open {
|
||||
[data-theme="dark"] .changelog-section,
|
||||
[data-theme="dark"] .update-info,
|
||||
[data-theme="dark"] .info-item,
|
||||
[data-theme="dark"] .path-preview {
|
||||
[data-theme="dark"] .path-preview,
|
||||
[data-theme="dark"] #bulkDownloadMissingLorasModal .bulk-download-loras-preview {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--lora-border);
|
||||
}
|
||||
@@ -310,6 +311,161 @@ button:disabled,
|
||||
color: var(--lora-error, #ef4444);
|
||||
}
|
||||
|
||||
.backup-status {
|
||||
background: rgba(0, 0, 0, 0.03);
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
border-radius: var(--border-radius-sm);
|
||||
padding: var(--space-3);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .backup-status {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--lora-border);
|
||||
}
|
||||
|
||||
.backup-summary-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
||||
gap: var(--space-2);
|
||||
margin-bottom: var(--space-3);
|
||||
}
|
||||
|
||||
.backup-summary-card {
|
||||
background: rgba(255, 255, 255, 0.5);
|
||||
border: 1px solid rgba(0, 0, 0, 0.06);
|
||||
border-radius: var(--border-radius-sm);
|
||||
padding: var(--space-2);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .backup-summary-card {
|
||||
background: rgba(255, 255, 255, 0.02);
|
||||
border-color: rgba(255, 255, 255, 0.05);
|
||||
}
|
||||
|
||||
.backup-summary-label {
|
||||
color: var(--text-color);
|
||||
font-size: 0.85rem;
|
||||
opacity: 0.7;
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
|
||||
.backup-summary-value {
|
||||
color: var(--text-color);
|
||||
font-size: 1.1rem;
|
||||
font-weight: 600;
|
||||
line-height: 1.3;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.backup-summary-value.status-enabled {
|
||||
color: var(--lora-success, #10b981);
|
||||
}
|
||||
|
||||
.backup-summary-value.status-disabled {
|
||||
color: var(--lora-error, #ef4444);
|
||||
}
|
||||
|
||||
.backup-status-list {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.backup-status-row {
|
||||
display: grid;
|
||||
grid-template-columns: minmax(140px, 180px) 1fr;
|
||||
gap: var(--space-2);
|
||||
align-items: start;
|
||||
}
|
||||
|
||||
.backup-status-label {
|
||||
color: var(--text-color);
|
||||
font-weight: 500;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.backup-status-content {
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.backup-status-primary {
|
||||
color: var(--text-color);
|
||||
font-weight: 600;
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.backup-status-secondary {
|
||||
color: var(--text-color);
|
||||
opacity: 0.72;
|
||||
font-size: 0.88rem;
|
||||
line-height: 1.4;
|
||||
word-break: break-word;
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.backup-location-details {
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
border-radius: var(--border-radius-sm);
|
||||
background: rgba(0, 0, 0, 0.02);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .backup-location-details {
|
||||
border-color: var(--lora-border);
|
||||
background: rgba(255, 255, 255, 0.02);
|
||||
}
|
||||
|
||||
.backup-location-details summary {
|
||||
cursor: pointer;
|
||||
padding: var(--space-2) var(--space-3);
|
||||
color: var(--text-color);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.backup-location-panel {
|
||||
display: grid;
|
||||
grid-template-columns: minmax(0, 1fr) auto;
|
||||
gap: var(--space-2);
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
box-sizing: border-box;
|
||||
padding: 0 var(--space-3) var(--space-3);
|
||||
}
|
||||
|
||||
.backup-location-panel .text-btn {
|
||||
justify-self: end;
|
||||
}
|
||||
|
||||
.backup-location-path {
|
||||
display: block;
|
||||
min-width: 0;
|
||||
max-width: 100%;
|
||||
padding: 6px 8px;
|
||||
border-radius: var(--border-radius-sm);
|
||||
background: rgba(0, 0, 0, 0.05);
|
||||
color: var(--text-color);
|
||||
overflow-wrap: anywhere;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
[data-theme="dark"] .backup-location-path {
|
||||
background: rgba(255, 255, 255, 0.05);
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.backup-status-row {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
|
||||
.backup-location-panel {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
|
||||
.backup-location-panel .text-btn {
|
||||
justify-self: start;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add styles for delete preview image */
|
||||
.delete-preview {
|
||||
max-width: 150px;
|
||||
@@ -349,3 +505,87 @@ button:disabled,
|
||||
margin-top: var(--space-1);
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
/* Bulk Download Missing LoRAs Modal */
|
||||
#bulkDownloadMissingLorasModal .modal-body {
|
||||
padding: var(--space-3);
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .confirmation-message {
|
||||
color: var(--text-color);
|
||||
margin-bottom: var(--space-3);
|
||||
font-size: 1em;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .bulk-download-loras-preview {
|
||||
background: rgba(0, 0, 0, 0.03);
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
border-radius: var(--border-radius-sm);
|
||||
padding: var(--space-3);
|
||||
margin-bottom: var(--space-3);
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .preview-title {
|
||||
font-weight: 600;
|
||||
margin-bottom: var(--space-2);
|
||||
color: var(--text-color);
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .bulk-download-loras-list {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .bulk-download-loras-list li {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: var(--space-1) 0;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .bulk-download-loras-list li:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .bulk-download-loras-list li.more-items {
|
||||
font-style: italic;
|
||||
opacity: 0.7;
|
||||
text-align: center;
|
||||
justify-content: center;
|
||||
padding: var(--space-2) 0;
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .lora-name {
|
||||
font-weight: 500;
|
||||
color: var(--text-color);
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .lora-version {
|
||||
font-size: 0.85em;
|
||||
opacity: 0.7;
|
||||
margin-left: var(--space-1);
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .confirmation-note {
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
gap: var(--space-2);
|
||||
padding: var(--space-2);
|
||||
background: rgba(59, 130, 246, 0.1);
|
||||
border-radius: var(--border-radius-sm);
|
||||
font-size: 0.9em;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
#bulkDownloadMissingLorasModal .confirmation-note i {
|
||||
color: var(--lora-accent);
|
||||
margin-top: 2px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
281
static/css/components/modal/doctor-modal.css
Normal file
281
static/css/components/modal/doctor-modal.css
Normal file
@@ -0,0 +1,281 @@
|
||||
.doctor-trigger {
|
||||
min-width: 120px;
|
||||
position: relative;
|
||||
border-color: color-mix(in srgb, var(--lora-accent) 24%, var(--border-color));
|
||||
}
|
||||
|
||||
.doctor-trigger i {
|
||||
color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.doctor-status-badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-width: 18px;
|
||||
height: 18px;
|
||||
padding: 0 5px;
|
||||
border-radius: 999px;
|
||||
background: var(--lora-error);
|
||||
color: #fff;
|
||||
font-size: 11px;
|
||||
line-height: 1;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.doctor-status-badge.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.doctor-modal {
|
||||
width: min(960px, 92vw);
|
||||
max-width: 960px;
|
||||
}
|
||||
|
||||
.doctor-shell {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: var(--space-3);
|
||||
padding-top: var(--space-2);
|
||||
}
|
||||
|
||||
.doctor-hero {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-3);
|
||||
align-items: flex-start;
|
||||
margin-top: var(--space-2);
|
||||
padding: var(--space-3);
|
||||
border-radius: var(--border-radius-sm);
|
||||
border: 1px solid var(--lora-border);
|
||||
background: rgba(0, 0, 0, 0.03);
|
||||
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.03);
|
||||
}
|
||||
|
||||
.doctor-kicker {
|
||||
display: inline-block;
|
||||
font-size: 0.75rem;
|
||||
font-weight: 700;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.08em;
|
||||
color: var(--lora-accent);
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
|
||||
.doctor-hero h2 {
|
||||
margin: 0 0 8px;
|
||||
}
|
||||
|
||||
.doctor-hero p {
|
||||
margin: 0;
|
||||
color: var(--text-color);
|
||||
opacity: 0.88;
|
||||
}
|
||||
|
||||
.doctor-summary-badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
border-radius: 999px;
|
||||
padding: 8px 12px;
|
||||
font-weight: 700;
|
||||
white-space: nowrap;
|
||||
border: 1px solid transparent;
|
||||
}
|
||||
|
||||
.doctor-status-ok {
|
||||
background: color-mix(in oklch, var(--lora-success) 14%, transparent);
|
||||
border-color: color-mix(in oklch, var(--lora-success) 28%, transparent);
|
||||
color: color-mix(in oklch, var(--lora-success) 72%, var(--text-color));
|
||||
}
|
||||
|
||||
.doctor-status-warning {
|
||||
background: color-mix(in oklch, var(--lora-warning) 16%, transparent);
|
||||
border-color: color-mix(in oklch, var(--lora-warning) 30%, transparent);
|
||||
color: color-mix(in oklch, var(--lora-warning) 70%, var(--text-color));
|
||||
}
|
||||
|
||||
.doctor-status-error {
|
||||
background: color-mix(in oklch, var(--lora-error) 16%, transparent);
|
||||
border-color: color-mix(in oklch, var(--lora-error) 30%, transparent);
|
||||
color: color-mix(in oklch, var(--lora-error) 68%, var(--text-color));
|
||||
}
|
||||
|
||||
.doctor-loading-state {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
padding: 12px 16px;
|
||||
min-height: 22px;
|
||||
border-radius: var(--border-radius-sm);
|
||||
background: var(--lora-surface);
|
||||
border: 1px solid var(--lora-border);
|
||||
visibility: hidden;
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
transition: opacity 0.18s ease, visibility 0.18s ease;
|
||||
}
|
||||
|
||||
.doctor-loading-state.visible {
|
||||
visibility: visible;
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.doctor-issues-list {
|
||||
display: grid;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.doctor-issue-card {
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
background: rgba(0, 0, 0, 0.03);
|
||||
border-radius: var(--border-radius-sm);
|
||||
padding: var(--space-3);
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.doctor-issue-card[data-status="warning"] {
|
||||
border-color: color-mix(in oklch, var(--lora-warning) 32%, var(--lora-border));
|
||||
}
|
||||
|
||||
.doctor-issue-card[data-status="error"] {
|
||||
border-color: color-mix(in oklch, var(--lora-error) 28%, var(--lora-border));
|
||||
}
|
||||
|
||||
.doctor-issue-header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-2);
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.doctor-issue-header h3 {
|
||||
margin: 0 0 6px;
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
.doctor-issue-summary {
|
||||
margin: 0;
|
||||
color: var(--text-color);
|
||||
opacity: 0.92;
|
||||
}
|
||||
|
||||
.doctor-issue-tag {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
border-radius: 999px;
|
||||
padding: 6px 10px;
|
||||
font-size: 0.8rem;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.doctor-issue-card[data-status="ok"] .doctor-issue-tag {
|
||||
background: color-mix(in oklch, var(--lora-success) 14%, transparent);
|
||||
color: color-mix(in oklch, var(--lora-success) 72%, var(--text-color));
|
||||
}
|
||||
|
||||
.doctor-issue-card[data-status="warning"] .doctor-issue-tag {
|
||||
background: color-mix(in oklch, var(--lora-warning) 16%, transparent);
|
||||
color: color-mix(in oklch, var(--lora-warning) 70%, var(--text-color));
|
||||
}
|
||||
|
||||
.doctor-issue-card[data-status="error"] .doctor-issue-tag {
|
||||
background: color-mix(in oklch, var(--lora-error) 16%, transparent);
|
||||
color: color-mix(in oklch, var(--lora-error) 68%, var(--text-color));
|
||||
}
|
||||
|
||||
.doctor-issue-details {
|
||||
margin: 14px 0 0;
|
||||
padding-left: 18px;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.doctor-issue-details li + li {
|
||||
margin-top: 8px;
|
||||
}
|
||||
|
||||
.doctor-issue-actions {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 10px;
|
||||
margin-top: 16px;
|
||||
}
|
||||
|
||||
.doctor-inline-detail-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
||||
gap: 10px;
|
||||
margin-top: 14px;
|
||||
}
|
||||
|
||||
.doctor-inline-detail {
|
||||
padding: 10px 12px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--lora-surface);
|
||||
border: 1px solid var(--lora-border);
|
||||
}
|
||||
|
||||
.doctor-inline-detail strong {
|
||||
display: block;
|
||||
margin-bottom: 4px;
|
||||
font-size: 0.82rem;
|
||||
}
|
||||
|
||||
.doctor-footer {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-2);
|
||||
align-items: center;
|
||||
padding-top: var(--space-1);
|
||||
}
|
||||
|
||||
.doctor-footer-note {
|
||||
color: var(--text-color);
|
||||
opacity: 0.82;
|
||||
}
|
||||
|
||||
.doctor-footer-actions {
|
||||
display: flex;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
[data-theme="dark"] .doctor-hero,
|
||||
[data-theme="dark"] .doctor-issue-card {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border-color: var(--lora-border);
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
@media (max-width: 760px) {
|
||||
.doctor-trigger {
|
||||
min-width: auto;
|
||||
padding-inline: 10px;
|
||||
}
|
||||
|
||||
.doctor-trigger span:not(.doctor-status-badge) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.doctor-hero,
|
||||
.doctor-footer {
|
||||
flex-direction: column;
|
||||
align-items: stretch;
|
||||
}
|
||||
|
||||
.doctor-hero {
|
||||
padding-right: var(--space-3);
|
||||
}
|
||||
|
||||
.doctor-summary-badge {
|
||||
align-self: flex-start;
|
||||
}
|
||||
|
||||
.doctor-footer-actions {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.doctor-footer-actions button {
|
||||
flex: 1;
|
||||
}
|
||||
}
|
||||
@@ -102,6 +102,13 @@
|
||||
overflow-y: auto;
|
||||
padding: var(--space-3);
|
||||
scroll-behavior: smooth;
|
||||
scrollbar-gutter: stable;
|
||||
}
|
||||
|
||||
@supports not (scrollbar-gutter: stable) {
|
||||
.settings-content {
|
||||
overflow-y: scroll;
|
||||
}
|
||||
}
|
||||
|
||||
.settings-content .settings-form {
|
||||
@@ -430,6 +437,88 @@
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.base-model-skip-toggle {
|
||||
min-width: 220px;
|
||||
justify-content: space-between;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.base-model-skip-toggle-label {
|
||||
opacity: 0.75;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.base-model-skip-panel {
|
||||
margin-top: var(--space-2);
|
||||
padding: 12px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background-color: var(--lora-surface);
|
||||
}
|
||||
|
||||
.base-model-skip-toolbar {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.base-model-skip-search {
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
padding: 8px 10px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--settings-bg);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.base-model-skip-search:focus {
|
||||
border-color: var(--lora-accent);
|
||||
outline: none;
|
||||
box-shadow: 0 0 0 2px rgba(var(--lora-accent-rgb, 79, 70, 229), 0.1);
|
||||
}
|
||||
|
||||
.base-model-skip-list {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(180px, 1fr));
|
||||
gap: 8px;
|
||||
max-height: 220px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.base-model-skip-option {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
padding: 8px 10px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background-color: var(--settings-bg);
|
||||
cursor: pointer;
|
||||
transition: border-color 0.15s ease, background-color 0.15s ease;
|
||||
}
|
||||
|
||||
.base-model-skip-option:hover {
|
||||
border-color: var(--lora-accent);
|
||||
background-color: rgba(var(--lora-accent-rgb, 79, 70, 229), 0.05);
|
||||
}
|
||||
|
||||
.base-model-skip-option input {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.base-model-skip-option span {
|
||||
font-size: 0.9em;
|
||||
line-height: 1.25;
|
||||
}
|
||||
|
||||
.base-model-skip-empty {
|
||||
padding: 8px 0 0;
|
||||
font-size: 0.9em;
|
||||
opacity: 0.75;
|
||||
}
|
||||
|
||||
.priority-tags-input:focus {
|
||||
border-color: var(--lora-accent);
|
||||
outline: none;
|
||||
|
||||
@@ -424,6 +424,7 @@
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.param-header label {
|
||||
@@ -431,7 +432,14 @@
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.copy-btn {
|
||||
.param-actions {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.copy-btn,
|
||||
.edit-btn {
|
||||
background: none;
|
||||
border: none;
|
||||
color: var(--text-color);
|
||||
@@ -442,7 +450,8 @@
|
||||
transition: all 0.2s;
|
||||
}
|
||||
|
||||
.copy-btn:hover {
|
||||
.copy-btn:hover,
|
||||
.edit-btn:hover {
|
||||
opacity: 1;
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
@@ -461,6 +470,48 @@
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.param-content.hide {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.param-content.is-placeholder {
|
||||
color: color-mix(in oklch, var(--text-color), transparent 35%);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.param-editor {
|
||||
display: none;
|
||||
flex-direction: column;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.param-editor.active {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.param-textarea {
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
min-height: 140px;
|
||||
resize: vertical;
|
||||
background: var(--bg-color);
|
||||
border: 1px solid var(--lora-border);
|
||||
border-radius: var(--border-radius-xs);
|
||||
padding: 10px 12px;
|
||||
font-size: 0.9em;
|
||||
line-height: 1.5;
|
||||
color: var(--text-color);
|
||||
font-family: inherit;
|
||||
box-sizing: border-box;
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
.param-editor-hint {
|
||||
font-size: 0.78em;
|
||||
line-height: 1.4;
|
||||
color: color-mix(in oklch, var(--text-color), transparent 35%);
|
||||
}
|
||||
|
||||
/* Other Parameters */
|
||||
.other-params {
|
||||
display: flex;
|
||||
@@ -565,6 +616,26 @@
|
||||
color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.send-recipe-btn {
|
||||
background: none;
|
||||
border: none;
|
||||
color: var(--text-color);
|
||||
opacity: 0.7;
|
||||
cursor: pointer;
|
||||
padding: 4px 8px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
transition: all 0.2s;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.send-recipe-btn:hover {
|
||||
opacity: 1;
|
||||
background: var(--lora-surface);
|
||||
color: var(--lora-accent);
|
||||
}
|
||||
|
||||
#recipeLorasCount {
|
||||
font-size: 0.9em;
|
||||
color: var(--text-color);
|
||||
@@ -965,6 +1036,73 @@
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-height: 860px) {
|
||||
#recipeModal .modal-content {
|
||||
padding-top: var(--space-2);
|
||||
padding-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
.recipe-modal-header {
|
||||
padding-bottom: 6px;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
.recipe-modal-header h2 {
|
||||
font-size: 1.25em;
|
||||
max-height: 2.5em;
|
||||
}
|
||||
|
||||
.recipe-tags-container {
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
|
||||
.recipe-top-section {
|
||||
grid-template-columns: 1fr;
|
||||
gap: var(--space-1);
|
||||
margin-bottom: var(--space-1);
|
||||
}
|
||||
|
||||
.recipe-preview-container {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.recipe-gen-params {
|
||||
height: auto;
|
||||
max-height: 210px;
|
||||
}
|
||||
|
||||
.recipe-gen-params h3 {
|
||||
margin-bottom: var(--space-1);
|
||||
font-size: 1.05em;
|
||||
}
|
||||
|
||||
.gen-params-container {
|
||||
gap: var(--space-1);
|
||||
}
|
||||
|
||||
.param-content {
|
||||
max-height: 90px;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.param-textarea {
|
||||
min-height: 100px;
|
||||
}
|
||||
|
||||
.other-params {
|
||||
margin-top: 0;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.recipe-bottom-section {
|
||||
padding-top: var(--space-1);
|
||||
}
|
||||
|
||||
.recipe-section-header {
|
||||
margin-bottom: var(--space-1);
|
||||
}
|
||||
}
|
||||
|
||||
.badge-container {
|
||||
position: relative;
|
||||
display: flex;
|
||||
|
||||
@@ -21,6 +21,27 @@
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.downloaded-badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
background: color-mix(in oklch, var(--badge-update-bg, #4a90e2) 22%, transparent);
|
||||
color: var(--badge-update-bg, #4a90e2);
|
||||
border: 1px solid color-mix(in oklch, var(--badge-update-bg, #4a90e2) 50%, transparent);
|
||||
padding: 4px 8px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
font-size: 0.8em;
|
||||
font-weight: 500;
|
||||
white-space: nowrap;
|
||||
flex-shrink: 0;
|
||||
transform: translateZ(0);
|
||||
will-change: transform;
|
||||
}
|
||||
|
||||
.downloaded-badge i {
|
||||
margin-right: 4px;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
/* Early Access Badge */
|
||||
.early-access-badge {
|
||||
display: inline-flex;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
@import 'components/modal/delete-modal.css';
|
||||
@import 'components/modal/update-modal.css';
|
||||
@import 'components/modal/settings-modal.css';
|
||||
@import 'components/modal/doctor-modal.css';
|
||||
@import 'components/modal/help-modal.css';
|
||||
@import 'components/modal/relink-civitai-modal.css';
|
||||
@import 'components/modal/example-access-modal.css';
|
||||
|
||||
@@ -251,7 +251,7 @@ export class BaseModelApiClient {
|
||||
replaceModelPreview(filePath) {
|
||||
const input = document.createElement('input');
|
||||
input.type = 'file';
|
||||
input.accept = 'image/*,video/mp4';
|
||||
input.accept = 'image/*,image/webp,video/mp4';
|
||||
|
||||
input.onchange = async () => {
|
||||
if (!input.files || !input.files[0]) return;
|
||||
|
||||
164
static/js/api/civitaiBaseModelApi.js
Normal file
164
static/js/api/civitaiBaseModelApi.js
Normal file
@@ -0,0 +1,164 @@
|
||||
/**
|
||||
* API client for Civitai base model management
|
||||
* Handles fetching and refreshing base models from Civitai API
|
||||
*/
|
||||
|
||||
import { showToast } from '../utils/uiHelpers.js';
|
||||
|
||||
const BASE_MODEL_ENDPOINTS = {
|
||||
getModels: '/api/lm/base-models',
|
||||
refresh: '/api/lm/base-models/refresh',
|
||||
categories: '/api/lm/base-models/categories',
|
||||
cacheStatus: '/api/lm/base-models/cache-status',
|
||||
};
|
||||
|
||||
/**
|
||||
* Civitai Base Model API Client
|
||||
*/
|
||||
export class CivitaiBaseModelApi {
|
||||
constructor() {
|
||||
this.cache = null;
|
||||
this.cacheTimestamp = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get base models (with caching)
|
||||
* @param {boolean} forceRefresh - Force refresh from API
|
||||
* @returns {Promise<Object>} Response with models, source, and counts
|
||||
*/
|
||||
async getBaseModels(forceRefresh = false) {
|
||||
try {
|
||||
const url = new URL(BASE_MODEL_ENDPOINTS.getModels, window.location.origin);
|
||||
if (forceRefresh) {
|
||||
url.searchParams.append('refresh', 'true');
|
||||
}
|
||||
|
||||
const response = await fetch(url);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch base models: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
this.cache = data.data;
|
||||
this.cacheTimestamp = Date.now();
|
||||
return data.data;
|
||||
} else {
|
||||
throw new Error(data.error || 'Failed to fetch base models');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching base models:', error);
|
||||
showToast('Failed to fetch base models', { message: error.message }, 'error');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Force refresh base models from Civitai API
|
||||
* @returns {Promise<Object>} Refreshed data
|
||||
*/
|
||||
async refreshBaseModels() {
|
||||
try {
|
||||
const response = await fetch(BASE_MODEL_ENDPOINTS.refresh, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' }
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to refresh base models: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
this.cache = data.data;
|
||||
this.cacheTimestamp = Date.now();
|
||||
showToast('Base models refreshed successfully', {}, 'success');
|
||||
return data.data;
|
||||
} else {
|
||||
throw new Error(data.error || 'Failed to refresh base models');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error refreshing base models:', error);
|
||||
showToast('Failed to refresh base models', { message: error.message }, 'error');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get base model categories
|
||||
* @returns {Promise<Object>} Categories with model lists
|
||||
*/
|
||||
async getCategories() {
|
||||
try {
|
||||
const response = await fetch(BASE_MODEL_ENDPOINTS.categories);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch categories: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
return data.data;
|
||||
} else {
|
||||
throw new Error(data.error || 'Failed to fetch categories');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching categories:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache status
|
||||
* @returns {Promise<Object>} Cache status information
|
||||
*/
|
||||
async getCacheStatus() {
|
||||
try {
|
||||
const response = await fetch(BASE_MODEL_ENDPOINTS.cacheStatus);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch cache status: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
return data.data;
|
||||
} else {
|
||||
throw new Error(data.error || 'Failed to fetch cache status');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching cache status:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached models (if available)
|
||||
* @returns {Object|null} Cached data or null
|
||||
*/
|
||||
getCachedModels() {
|
||||
return this.cache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if cache is available
|
||||
* @returns {boolean}
|
||||
*/
|
||||
hasCache() {
|
||||
return this.cache !== null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache age in milliseconds
|
||||
* @returns {number|null} Age in ms or null if no cache
|
||||
*/
|
||||
getCacheAge() {
|
||||
if (!this.cacheTimestamp) return null;
|
||||
return Date.now() - this.cacheTimestamp;
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const civitaiBaseModelApi = new CivitaiBaseModelApi();
|
||||
@@ -1,6 +1,7 @@
|
||||
import { RecipeCard } from '../components/RecipeCard.js';
|
||||
import { state, getCurrentPageState } from '../state/index.js';
|
||||
import { showToast } from '../utils/uiHelpers.js';
|
||||
import { captureScrollPosition, restoreScrollPosition } from '../utils/infiniteScroll.js';
|
||||
|
||||
const RECIPE_ENDPOINTS = {
|
||||
list: '/api/lm/recipes',
|
||||
@@ -31,6 +32,20 @@ export function extractRecipeId(filePath) {
|
||||
return dotIndex > 0 ? basename.substring(0, dotIndex) : basename;
|
||||
}
|
||||
|
||||
export async function fetchRecipeDetails(recipeId) {
|
||||
if (!recipeId) {
|
||||
throw new Error('Unable to determine recipe ID');
|
||||
}
|
||||
|
||||
const encodedRecipeId = encodeURIComponent(recipeId);
|
||||
const response = await fetch(`${RECIPE_ENDPOINTS.detail}/${encodedRecipeId}`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load recipe: ${response.statusText}`);
|
||||
}
|
||||
|
||||
return response.json();
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch recipes with pagination for virtual scrolling
|
||||
* @param {number} page - Page number to fetch
|
||||
@@ -61,7 +76,9 @@ export async function fetchRecipesPage(page = 1, pageSize = 100) {
|
||||
// If we have a specific recipe ID to load
|
||||
if (pageState.customFilter?.active && pageState.customFilter?.recipeId) {
|
||||
// Special case: load specific recipe
|
||||
const response = await fetch(`${RECIPE_ENDPOINTS.detail}/${pageState.customFilter.recipeId}`);
|
||||
const response = await fetch(
|
||||
`${RECIPE_ENDPOINTS.detail}/${encodeURIComponent(pageState.customFilter.recipeId)}`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load recipe: ${response.statusText}`);
|
||||
@@ -83,6 +100,9 @@ export async function fetchRecipesPage(page = 1, pageSize = 100) {
|
||||
if (pageState.customFilter?.active && pageState.customFilter?.loraHash) {
|
||||
params.append('lora_hash', pageState.customFilter.loraHash);
|
||||
params.append('bypass_filters', 'true');
|
||||
} else if (pageState.customFilter?.active && pageState.customFilter?.checkpointHash) {
|
||||
params.append('checkpoint_hash', pageState.customFilter.checkpointHash);
|
||||
params.append('bypass_filters', 'true');
|
||||
} else {
|
||||
// Normal filtering logic
|
||||
|
||||
@@ -163,10 +183,12 @@ export async function resetAndReloadWithVirtualScroll(options = {}) {
|
||||
const {
|
||||
modelType = 'lora',
|
||||
updateFolders = false,
|
||||
fetchPageFunction
|
||||
fetchPageFunction,
|
||||
preserveScroll = false
|
||||
} = options;
|
||||
|
||||
const pageState = getCurrentPageState();
|
||||
const scrollSnapshot = preserveScroll ? captureScrollPosition() : null;
|
||||
|
||||
try {
|
||||
pageState.isLoading = true;
|
||||
@@ -188,6 +210,10 @@ export async function resetAndReloadWithVirtualScroll(options = {}) {
|
||||
pageState.hasMore = result.hasMore;
|
||||
pageState.currentPage = 2; // Next page will be 2
|
||||
|
||||
if (scrollSnapshot) {
|
||||
await restoreScrollPosition(scrollSnapshot);
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.error(`Error reloading ${modelType}s:`, error);
|
||||
@@ -208,10 +234,12 @@ export async function loadMoreWithVirtualScroll(options = {}) {
|
||||
modelType = 'lora',
|
||||
resetPage = false,
|
||||
updateFolders = false,
|
||||
fetchPageFunction
|
||||
fetchPageFunction,
|
||||
preserveScroll = false
|
||||
} = options;
|
||||
|
||||
const pageState = getCurrentPageState();
|
||||
const scrollSnapshot = preserveScroll ? captureScrollPosition() : null;
|
||||
|
||||
try {
|
||||
// Start loading state
|
||||
@@ -236,6 +264,10 @@ export async function loadMoreWithVirtualScroll(options = {}) {
|
||||
pageState.hasMore = result.hasMore;
|
||||
pageState.currentPage = 2; // Next page to load would be 2
|
||||
|
||||
if (scrollSnapshot) {
|
||||
await restoreScrollPosition(scrollSnapshot);
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.error(`Error loading ${modelType}s:`, error);
|
||||
@@ -251,11 +283,12 @@ export async function loadMoreWithVirtualScroll(options = {}) {
|
||||
* @param {boolean} updateFolders - Whether to update folder tags
|
||||
* @returns {Promise<Object>} The fetch result
|
||||
*/
|
||||
export async function resetAndReload(updateFolders = false) {
|
||||
export async function resetAndReload(updateFolders = false, options = {}) {
|
||||
return resetAndReloadWithVirtualScroll({
|
||||
modelType: 'recipe',
|
||||
updateFolders,
|
||||
fetchPageFunction: fetchRecipesPage
|
||||
fetchPageFunction: fetchRecipesPage,
|
||||
preserveScroll: options.preserveScroll === true
|
||||
});
|
||||
}
|
||||
|
||||
@@ -267,7 +300,7 @@ export async function syncChanges() {
|
||||
state.loadingManager.showSimpleLoading('Syncing changes...');
|
||||
|
||||
// Simply reload the recipes without rebuilding cache
|
||||
await resetAndReload();
|
||||
await resetAndReload(false, { preserveScroll: true });
|
||||
|
||||
showToast('toast.recipes.syncComplete', {}, 'success');
|
||||
} catch (error) {
|
||||
@@ -295,7 +328,7 @@ export async function refreshRecipes() {
|
||||
}
|
||||
|
||||
// After successful cache rebuild, reload the recipes
|
||||
await resetAndReload();
|
||||
await resetAndReload(false, { preserveScroll: true });
|
||||
|
||||
showToast('toast.recipes.refreshComplete', {}, 'success');
|
||||
} catch (error) {
|
||||
@@ -346,9 +379,10 @@ export function createRecipeCard(recipe) {
|
||||
* @param {Object} updates - The metadata updates to apply
|
||||
* @returns {Promise<Object>} The updated recipe data
|
||||
*/
|
||||
export async function updateRecipeMetadata(filePath, updates) {
|
||||
export async function updateRecipeMetadata(filePath, updates, options = {}) {
|
||||
try {
|
||||
state.loadingManager.showSimpleLoading('Saving metadata...');
|
||||
const listFilePath = options.listFilePath || filePath;
|
||||
|
||||
// Extract recipeId from filePath (basename without extension)
|
||||
const recipeId = extractRecipeId(filePath);
|
||||
@@ -356,7 +390,7 @@ export async function updateRecipeMetadata(filePath, updates) {
|
||||
throw new Error('Unable to determine recipe ID');
|
||||
}
|
||||
|
||||
const response = await fetch(`${RECIPE_ENDPOINTS.update}/${recipeId}/update`, {
|
||||
const response = await fetch(`${RECIPE_ENDPOINTS.update}/${encodeURIComponent(recipeId)}/update`, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
@@ -371,7 +405,7 @@ export async function updateRecipeMetadata(filePath, updates) {
|
||||
throw new Error(data.error || 'Failed to update recipe');
|
||||
}
|
||||
|
||||
state.virtualScroller.updateSingleItem(filePath, updates);
|
||||
state.virtualScroller.updateSingleItem(listFilePath, updates);
|
||||
|
||||
return data;
|
||||
} catch (error) {
|
||||
|
||||
@@ -2,6 +2,8 @@ import { BaseContextMenu } from './BaseContextMenu.js';
|
||||
import { state } from '../../state/index.js';
|
||||
import { bulkManager } from '../../managers/BulkManager.js';
|
||||
import { updateElementText, translate } from '../../utils/i18nHelpers.js';
|
||||
import { bulkMissingLoraDownloadManager } from '../../managers/BulkMissingLoraDownloadManager.js';
|
||||
import { showToast } from '../../utils/uiHelpers.js';
|
||||
|
||||
export class BulkContextMenu extends BaseContextMenu {
|
||||
constructor() {
|
||||
@@ -37,6 +39,7 @@ export class BulkContextMenu extends BaseContextMenu {
|
||||
const moveAllItem = this.menu.querySelector('[data-action="move-all"]');
|
||||
const autoOrganizeItem = this.menu.querySelector('[data-action="auto-organize"]');
|
||||
const deleteAllItem = this.menu.querySelector('[data-action="delete-all"]');
|
||||
const downloadMissingLorasItem = this.menu.querySelector('[data-action="download-missing-loras"]');
|
||||
|
||||
if (sendToWorkflowAppendItem) {
|
||||
sendToWorkflowAppendItem.style.display = config.sendToWorkflow ? 'flex' : 'none';
|
||||
@@ -71,6 +74,10 @@ export class BulkContextMenu extends BaseContextMenu {
|
||||
if (setContentRatingItem) {
|
||||
setContentRatingItem.style.display = config.setContentRating ? 'flex' : 'none';
|
||||
}
|
||||
if (downloadMissingLorasItem) {
|
||||
// Only show for recipes page
|
||||
downloadMissingLorasItem.style.display = currentModelType === 'recipes' ? 'flex' : 'none';
|
||||
}
|
||||
|
||||
const skipMetadataRefreshItem = this.menu.querySelector('[data-action="skip-metadata-refresh"]');
|
||||
const resumeMetadataRefreshItem = this.menu.querySelector('[data-action="resume-metadata-refresh"]');
|
||||
@@ -178,6 +185,9 @@ export class BulkContextMenu extends BaseContextMenu {
|
||||
case 'delete-all':
|
||||
bulkManager.showBulkDeleteModal();
|
||||
break;
|
||||
case 'download-missing-loras':
|
||||
this.handleDownloadMissingLoras();
|
||||
break;
|
||||
case 'clear':
|
||||
bulkManager.clearSelection();
|
||||
break;
|
||||
@@ -185,4 +195,39 @@ export class BulkContextMenu extends BaseContextMenu {
|
||||
console.warn(`Unknown bulk action: ${action}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle downloading missing LoRAs for selected recipes
|
||||
*/
|
||||
async handleDownloadMissingLoras() {
|
||||
if (state.selectedModels.size === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get selected recipes from the virtual scroller
|
||||
const selectedRecipes = [];
|
||||
state.selectedModels.forEach(filePath => {
|
||||
const card = document.querySelector(`.model-card[data-filepath="${CSS.escape(filePath)}"]`);
|
||||
if (card && card.recipeData) {
|
||||
selectedRecipes.push(card.recipeData);
|
||||
}
|
||||
});
|
||||
|
||||
if (selectedRecipes.length === 0) {
|
||||
// Try to get recipes from virtual scroller state
|
||||
const items = state.virtualScroller?.items || [];
|
||||
items.forEach(recipe => {
|
||||
if (recipe.file_path && state.selectedModels.has(recipe.file_path)) {
|
||||
selectedRecipes.push(recipe);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (selectedRecipes.length === 0) {
|
||||
showToast('toast.recipes.noRecipesSelected', {}, 'warning');
|
||||
return;
|
||||
}
|
||||
|
||||
await bulkMissingLoraDownloadManager.downloadMissingLoras(selectedRecipes);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import { getModelApiClient, resetAndReload } from '../../api/modelApiFactory.js'
|
||||
import { showDeleteModal, showExcludeModal } from '../../utils/modalUtils.js';
|
||||
import { moveManager } from '../../managers/MoveManager.js';
|
||||
import { i18n } from '../../i18n/index.js';
|
||||
import { sendModelPathToWorkflow } from '../../utils/uiHelpers.js';
|
||||
import { MODEL_TYPES } from '../../api/apiConfig.js';
|
||||
|
||||
export class CheckpointContextMenu extends BaseContextMenu {
|
||||
constructor() {
|
||||
@@ -60,6 +62,10 @@ export class CheckpointContextMenu extends BaseContextMenu {
|
||||
this.currentCard.querySelector('.fa-copy').click();
|
||||
}
|
||||
break;
|
||||
case 'sendworkflow':
|
||||
// Send checkpoint to workflow (always replace mode)
|
||||
this.sendCheckpointToWorkflow();
|
||||
break;
|
||||
case 'refresh-metadata':
|
||||
// Refresh metadata from CivitAI
|
||||
apiClient.refreshSingleModelMetadata(this.currentCard.dataset.filepath);
|
||||
@@ -79,6 +85,52 @@ export class CheckpointContextMenu extends BaseContextMenu {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
async sendCheckpointToWorkflow() {
|
||||
const modelPath = this.currentCard.dataset.filepath;
|
||||
if (!modelPath) {
|
||||
return;
|
||||
}
|
||||
|
||||
const subtype = (this.currentCard.dataset.sub_type || 'checkpoint').toLowerCase();
|
||||
const isDiffusionModel = subtype === 'diffusion_model';
|
||||
const widgetName = isDiffusionModel ? 'unet_name' : 'ckpt_name';
|
||||
const actionTypeText = i18n.t(
|
||||
isDiffusionModel ? 'uiHelpers.nodeSelector.diffusionModel' : 'uiHelpers.nodeSelector.checkpoint',
|
||||
{},
|
||||
isDiffusionModel ? 'Diffusion Model' : 'Checkpoint'
|
||||
);
|
||||
const successMessage = i18n.t(
|
||||
'uiHelpers.workflow.modelUpdated',
|
||||
{},
|
||||
'Model updated in workflow'
|
||||
);
|
||||
const failureMessage = i18n.t(
|
||||
'uiHelpers.workflow.modelFailed',
|
||||
{},
|
||||
'Failed to update model node'
|
||||
);
|
||||
const missingNodesMessage = i18n.t(
|
||||
'uiHelpers.workflow.noMatchingNodes',
|
||||
{},
|
||||
'No compatible nodes available in the current workflow'
|
||||
);
|
||||
const missingTargetMessage = i18n.t(
|
||||
'uiHelpers.workflow.noTargetNodeSelected',
|
||||
{},
|
||||
'No target node selected'
|
||||
);
|
||||
|
||||
await sendModelPathToWorkflow(modelPath, {
|
||||
widgetName,
|
||||
collectionType: MODEL_TYPES.CHECKPOINT,
|
||||
actionTypeText,
|
||||
successMessage,
|
||||
failureMessage,
|
||||
missingNodesMessage,
|
||||
missingTargetMessage,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Mix in shared methods
|
||||
|
||||
@@ -23,7 +23,7 @@ export class RecipeContextMenu extends BaseContextMenu {
|
||||
// Override resetAndReload for recipe context
|
||||
async resetAndReload() {
|
||||
const { resetAndReload } = await import('../../api/recipeApi.js');
|
||||
return resetAndReload();
|
||||
return resetAndReload(false, { preserveScroll: true });
|
||||
}
|
||||
|
||||
showMenu(x, y, card) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user