mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-22 05:32:12 -03:00
Compare commits
620 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d8408e626 | ||
|
|
0906271aa9 | ||
|
|
4c33c9d256 | ||
|
|
fa9c78209f | ||
|
|
6678ec8a60 | ||
|
|
854e467c12 | ||
|
|
e6b94c7b21 | ||
|
|
2c6f9d8602 | ||
|
|
c74033b9c0 | ||
|
|
d2b21d27bb | ||
|
|
215272469f | ||
|
|
f7d05ab0f1 | ||
|
|
6f2ad2be77 | ||
|
|
66575c719a | ||
|
|
677a239d53 | ||
|
|
3b96bfe5af | ||
|
|
83be5cfa64 | ||
|
|
6b834c2362 | ||
|
|
7abfc49e08 | ||
|
|
65d5f50088 | ||
|
|
4f1f4ffe3d | ||
|
|
b0c2027a1c | ||
|
|
33c83358b0 | ||
|
|
31223f0526 | ||
|
|
92daadb92c | ||
|
|
fae2e274fd | ||
|
|
342a722991 | ||
|
|
65ec6aacb7 | ||
|
|
9387470c69 | ||
|
|
31f6edf8f0 | ||
|
|
487b062175 | ||
|
|
d8e13de096 | ||
|
|
e8a30088ef | ||
|
|
bf7b07ba74 | ||
|
|
28fe3e7b7a | ||
|
|
c0eff2bb5e | ||
|
|
848c1741fe | ||
|
|
1370b8e8c1 | ||
|
|
82a068e610 | ||
|
|
32f42bafaa | ||
|
|
4081b7f022 | ||
|
|
a5808193a6 | ||
|
|
854ca322c1 | ||
|
|
c1d9b5137a | ||
|
|
f33d5745b3 | ||
|
|
d89c2ca128 | ||
|
|
835584cc85 | ||
|
|
b2ffbe3a68 | ||
|
|
defcc79e6c | ||
|
|
c06d9f84f0 | ||
|
|
fe57a8e156 | ||
|
|
b77105795a | ||
|
|
e2df5fcf27 | ||
|
|
836a64e728 | ||
|
|
08ba0c9f42 | ||
|
|
6fcc6a5299 | ||
|
|
6dd58248c6 | ||
|
|
2786801b71 | ||
|
|
ea29cbeb7a | ||
|
|
3cf9121a8c | ||
|
|
381bd3938a | ||
|
|
e4ce384023 | ||
|
|
12d1857b13 | ||
|
|
0d9003dea4 | ||
|
|
1a3751acfa | ||
|
|
c5a3af2399 | ||
|
|
ea8a64fafc | ||
|
|
981e367bf1 | ||
|
|
a3d6e62035 | ||
|
|
7f205cdcc8 | ||
|
|
e587189880 | ||
|
|
206c1bd69f | ||
|
|
a7d9255c2c | ||
|
|
08265a85ec | ||
|
|
1ed5630464 | ||
|
|
c784615f11 | ||
|
|
26d51b1190 | ||
|
|
d83fad6abc | ||
|
|
692796db46 | ||
|
|
f15c6f33f9 | ||
|
|
dda9eb4d7c | ||
|
|
6f3aeb61e7 | ||
|
|
d6145e633f | ||
|
|
07014d98ce | ||
|
|
e8ccdabe6c | ||
|
|
cf9fd2d5c2 | ||
|
|
bf9aa9356b | ||
|
|
68d00ce289 | ||
|
|
5288021e4f | ||
|
|
4d38add291 | ||
|
|
804808da4a | ||
|
|
298a95432d | ||
|
|
a834fc4b30 | ||
|
|
2c6c9542dd | ||
|
|
a9a7f4c8ec | ||
|
|
ea9370443d | ||
|
|
c2e00b240e | ||
|
|
a2b81ea099 | ||
|
|
ee609e8eac | ||
|
|
e04ef671e9 | ||
|
|
0184dfd7eb | ||
|
|
eccfa0ca54 | ||
|
|
6d3feb4bef | ||
|
|
29d2b5ee4b | ||
|
|
c82fabb67f | ||
|
|
fcfc868e57 | ||
|
|
67b403f8ca | ||
|
|
de06c6b2f6 | ||
|
|
fa444dfb8a | ||
|
|
124002a472 | ||
|
|
0c883433c1 | ||
|
|
bcf3b2cf55 | ||
|
|
357c4e9c08 | ||
|
|
9edfc68e91 | ||
|
|
8c06cb3e80 | ||
|
|
144fa0a6d4 | ||
|
|
25d5a1541e | ||
|
|
a579d36389 | ||
|
|
d766dac341 | ||
|
|
b15ef1bbc6 | ||
|
|
3e52e00597 | ||
|
|
f749dd0d52 | ||
|
|
48a8a42108 | ||
|
|
db7f57a5a4 | ||
|
|
556381b983 | ||
|
|
158d7d5898 | ||
|
|
18844da95d | ||
|
|
7e0df4d718 | ||
|
|
0dbb76e8c8 | ||
|
|
f73b3422a6 | ||
|
|
bd95e802ec | ||
|
|
5de16a78c5 | ||
|
|
6f8e09fcde | ||
|
|
f54d480f03 | ||
|
|
e68b213fb3 | ||
|
|
132334d500 | ||
|
|
a6f04c6d7e | ||
|
|
854e8bf356 | ||
|
|
6ff883d2d3 | ||
|
|
849b97afba | ||
|
|
1bd2635864 | ||
|
|
79ab0f7b6c | ||
|
|
79011bd257 | ||
|
|
c692713ffb | ||
|
|
df9b554ce1 | ||
|
|
277a8e4682 | ||
|
|
acb52dba09 | ||
|
|
8f10765254 | ||
|
|
0653f59473 | ||
|
|
7a4b5a4667 | ||
|
|
49c4a4068b | ||
|
|
40ad590046 | ||
|
|
30374ae3e6 | ||
|
|
ab22d16bad | ||
|
|
971cd56a4a | ||
|
|
d7cb546c5f | ||
|
|
9d8b7344cd | ||
|
|
2d4f6ae7ce | ||
|
|
d9126807b0 | ||
|
|
cad5fb3fba | ||
|
|
afe23ad6b7 | ||
|
|
fc4327087b | ||
|
|
71762d788f | ||
|
|
6472e00fb0 | ||
|
|
4043846767 | ||
|
|
d3b2bc962c | ||
|
|
54f7b64821 | ||
|
|
82a2a6e669 | ||
|
|
6376d60af5 | ||
|
|
b1e2e3831f | ||
|
|
5de1c8aa82 | ||
|
|
63dc5c2bdb | ||
|
|
7f2d1670a0 | ||
|
|
53c8c337fc | ||
|
|
5b4ec1b2a2 | ||
|
|
64dd2ed141 | ||
|
|
eb57e04e95 | ||
|
|
ae905c8630 | ||
|
|
c157e794f0 | ||
|
|
ed9bae6f6a | ||
|
|
9fe1ce19ad | ||
|
|
6148236cbd | ||
|
|
2471eb518a | ||
|
|
8931b41c76 | ||
|
|
7f523f167d | ||
|
|
446b6d6158 | ||
|
|
2ee057e19b | ||
|
|
afc810f21f | ||
|
|
357052a903 | ||
|
|
39d6d8d04a | ||
|
|
888896c0c0 | ||
|
|
ceee482ecc | ||
|
|
d0ed1213d8 | ||
|
|
f6ef428008 | ||
|
|
e726c4f442 | ||
|
|
402318e586 | ||
|
|
b198cc2a6e | ||
|
|
c3dd4da11b | ||
|
|
ba2e42b06e | ||
|
|
fa0902dc74 | ||
|
|
8fcb6083dc | ||
|
|
1ef88140e3 | ||
|
|
aa34c4c84c | ||
|
|
32d12bb334 | ||
|
|
1b2a02cb1a | ||
|
|
2ff11a16c4 | ||
|
|
441af82dbd | ||
|
|
e09c09af6f | ||
|
|
3721fe226f | ||
|
|
8ace0e11cf | ||
|
|
5e249b0b59 | ||
|
|
4889955ecf | ||
|
|
d840fd53da | ||
|
|
a61819cdb3 | ||
|
|
e986fbb5fb | ||
|
|
8f4d575ec8 | ||
|
|
605a06317b | ||
|
|
a7304ccf47 | ||
|
|
374e2bd4b9 | ||
|
|
09a3246ddb | ||
|
|
a615603866 | ||
|
|
1ca05808e1 | ||
|
|
5febc2a805 | ||
|
|
3c047bee58 | ||
|
|
022c6c157a | ||
|
|
fa587d5678 | ||
|
|
afa5a42f5a | ||
|
|
71df8ba3e2 | ||
|
|
8764998e8c | ||
|
|
2cb4f3aac8 | ||
|
|
1ccaf33aac | ||
|
|
cb0a8e0413 | ||
|
|
8674168df4 | ||
|
|
2221653801 | ||
|
|
78bcdcef5d | ||
|
|
672fbe2ac0 | ||
|
|
56a5970b44 | ||
|
|
a66cef7cfe | ||
|
|
c0b1c2e099 | ||
|
|
9e553bb87b | ||
|
|
f966514bc7 | ||
|
|
dc0a49f96d | ||
|
|
65c783c024 | ||
|
|
6395836fbb | ||
|
|
a7207084ef | ||
|
|
27ef1f1e71 | ||
|
|
68fdb14cd6 | ||
|
|
c2af282a85 | ||
|
|
92d48335cb | ||
|
|
78cac2edc2 | ||
|
|
26d105c439 | ||
|
|
7fec107b98 | ||
|
|
eb01ad3af9 | ||
|
|
e0d9880b32 | ||
|
|
e81e96f0ab | ||
|
|
06d5bd259c | ||
|
|
14238b8d62 | ||
|
|
3b51886927 | ||
|
|
a295ff2e06 | ||
|
|
18cdaabf5e | ||
|
|
787e37b7c6 | ||
|
|
4e5c8b2dd0 | ||
|
|
d8ddacde38 | ||
|
|
bb1e42f0d3 | ||
|
|
923669c495 | ||
|
|
7a4139544c | ||
|
|
4d6ea0236b | ||
|
|
e872a06f22 | ||
|
|
647bda2160 | ||
|
|
c1e93d23f3 | ||
|
|
c96550cc68 | ||
|
|
b1015ecdc5 | ||
|
|
f1b928a037 | ||
|
|
16c312c90b | ||
|
|
110ffd0118 | ||
|
|
35ad872419 | ||
|
|
9b943cf2b8 | ||
|
|
9d1b357e64 | ||
|
|
9fc2fb4d17 | ||
|
|
641fa8a3d9 | ||
|
|
add9269706 | ||
|
|
1a01c4a344 | ||
|
|
b4e7feed06 | ||
|
|
4b96c650eb | ||
|
|
107aef3785 | ||
|
|
b49807824f | ||
|
|
e5ef2ef8b5 | ||
|
|
88779ed56c | ||
|
|
8b59fb6adc | ||
|
|
7945647b0b | ||
|
|
2d39b84806 | ||
|
|
e151a19fcf | ||
|
|
99d2ba26b9 | ||
|
|
396924f4cc | ||
|
|
7545312229 | ||
|
|
26f9779fbf | ||
|
|
0bd62eef3a | ||
|
|
e06d15f508 | ||
|
|
aa1ee96bc9 | ||
|
|
355c73512d | ||
|
|
0daf9d92ff | ||
|
|
37de26ce25 | ||
|
|
0eaef7e7a0 | ||
|
|
8063cee3cd | ||
|
|
cbb25b4ac0 | ||
|
|
c62206a157 | ||
|
|
09832141d0 | ||
|
|
bf8e121a10 | ||
|
|
68568073ec | ||
|
|
ec36524c35 | ||
|
|
67acd9fd2c | ||
|
|
f7be5c8d25 | ||
|
|
ceacac75e0 | ||
|
|
bae66f94e8 | ||
|
|
ddf132bd78 | ||
|
|
afb012029f | ||
|
|
651e14c8c3 | ||
|
|
e7c626eb5f | ||
|
|
a0b0d40a19 | ||
|
|
42e3ab9e27 | ||
|
|
6e5f333364 | ||
|
|
f33a9abe60 | ||
|
|
7f1bbdd615 | ||
|
|
d3bf8eaceb | ||
|
|
b9c9d602de | ||
|
|
b25fbd6e24 | ||
|
|
6052608a4e | ||
|
|
a073b82751 | ||
|
|
8250acdfb5 | ||
|
|
8e1f73a34e | ||
|
|
50704bc882 | ||
|
|
35d34e3513 | ||
|
|
ea834f3de6 | ||
|
|
11aedde72f | ||
|
|
488654abc8 | ||
|
|
da1be0dc65 | ||
|
|
d0c728a339 | ||
|
|
66c66c4d9b | ||
|
|
4882721387 | ||
|
|
06a8850c0c | ||
|
|
370aa06c67 | ||
|
|
c9fa0564e7 | ||
|
|
2ba7a0ceba | ||
|
|
276aedfbb9 | ||
|
|
c193c75674 | ||
|
|
a562ba3746 | ||
|
|
2fedd572ff | ||
|
|
db0b49c427 | ||
|
|
03a6f8111c | ||
|
|
925ad7b3e0 | ||
|
|
bf793d5b8b | ||
|
|
64a906ca5e | ||
|
|
99b36442bb | ||
|
|
3c5164d510 | ||
|
|
ec4b5a4d45 | ||
|
|
78e1901779 | ||
|
|
cb539314de | ||
|
|
c7627fe0de | ||
|
|
84bfad7ce5 | ||
|
|
3e06938b05 | ||
|
|
4f712fec14 | ||
|
|
c5c9659c76 | ||
|
|
d6e175c1f1 | ||
|
|
88088e1071 | ||
|
|
958ddbca86 | ||
|
|
6670fd28f4 | ||
|
|
1e59c31de3 | ||
|
|
c966dbbbbc | ||
|
|
af8f5ba04e | ||
|
|
b741ed0b3b | ||
|
|
01ba3c14f8 | ||
|
|
d13b1a83ad | ||
|
|
303477db70 | ||
|
|
311e89e9e7 | ||
|
|
8546cfe714 | ||
|
|
e6f4d84b9a | ||
|
|
ce7e422169 | ||
|
|
e5aec80984 | ||
|
|
6d97817390 | ||
|
|
d516f22159 | ||
|
|
e918c18ca2 | ||
|
|
5dd8d905fa | ||
|
|
1121d1ee6c | ||
|
|
4793f096af | ||
|
|
7b5b4ce082 | ||
|
|
fa08c9c3e4 | ||
|
|
d0d5eb956a | ||
|
|
969f949330 | ||
|
|
9169bbd04d | ||
|
|
99463ad01c | ||
|
|
f1d6b0feda | ||
|
|
e33da50278 | ||
|
|
4034eb3221 | ||
|
|
75a95f0109 | ||
|
|
92fdc16fe6 | ||
|
|
23fa2995c8 | ||
|
|
59aefdff77 | ||
|
|
e92ab9e3cc | ||
|
|
e3bf1f763c | ||
|
|
1c6e9d0b69 | ||
|
|
bfd4eb3e11 | ||
|
|
c9f902a8af | ||
|
|
0b67510ec9 | ||
|
|
b5cd320e8b | ||
|
|
deb25b4987 | ||
|
|
4612da264a | ||
|
|
59b67e1e10 | ||
|
|
5fad936b27 | ||
|
|
e376a45dea | ||
|
|
fd593bb61d | ||
|
|
71b97d5974 | ||
|
|
2b405ae164 | ||
|
|
2fe4736b69 | ||
|
|
184f8ca6cf | ||
|
|
1ff2019dde | ||
|
|
a3d8261686 | ||
|
|
7d0600976e | ||
|
|
e1e6e4f3dc | ||
|
|
fba2853773 | ||
|
|
48df7e1078 | ||
|
|
235dcd5fa6 | ||
|
|
2027db7411 | ||
|
|
611dd33c75 | ||
|
|
ec1c92a714 | ||
|
|
6ac78156ac | ||
|
|
e94b74e92d | ||
|
|
2bbec47f63 | ||
|
|
b5ddf4c953 | ||
|
|
44be75aeef | ||
|
|
2c03759b5d | ||
|
|
2e3da03723 | ||
|
|
6e96fbcda7 | ||
|
|
d1fd5b7f27 | ||
|
|
9dbcc105e7 | ||
|
|
5cd5a82ddc | ||
|
|
88c1892dc9 | ||
|
|
3c1b181675 | ||
|
|
6777dc16ca | ||
|
|
3833647dfe | ||
|
|
b6c47f0cce | ||
|
|
d308c7ac60 | ||
|
|
947c757aa5 | ||
|
|
5ee5bd7d36 | ||
|
|
d9c4ae92cd | ||
|
|
e1efff19f0 | ||
|
|
61f723a1f5 | ||
|
|
b32756932b | ||
|
|
cb5e64d26b | ||
|
|
f36febf10a | ||
|
|
26d9a9caa6 | ||
|
|
cb876cf77e | ||
|
|
4789711910 | ||
|
|
4064980505 | ||
|
|
f9b8f2d22c | ||
|
|
6a95aadc53 | ||
|
|
f9f08f082d | ||
|
|
0817901bef | ||
|
|
ac22172e53 | ||
|
|
fd87fbf31e | ||
|
|
554be0908f | ||
|
|
eaec4e5f13 | ||
|
|
0e7ba27a7d | ||
|
|
c551f5c23b | ||
|
|
5159657ae5 | ||
|
|
d35db7df72 | ||
|
|
2b5399c559 | ||
|
|
9e61bbbd8e | ||
|
|
7ce5857cd5 | ||
|
|
38fbae99fd | ||
|
|
b0a9d44b0c | ||
|
|
b4e22cd375 | ||
|
|
9bc92736a7 | ||
|
|
111b34d05c | ||
|
|
07d9599a2f | ||
|
|
d8194f211d | ||
|
|
51a6374c33 | ||
|
|
aa6c6035b6 | ||
|
|
44b4a7ffbb | ||
|
|
e5bb018d22 | ||
|
|
79b8a6536e | ||
|
|
3de31cd06a | ||
|
|
c579b54d40 | ||
|
|
0a52575e8b | ||
|
|
23c9a98f66 | ||
|
|
796fc33b5b | ||
|
|
dc4c11ddd2 | ||
|
|
d389e4d5d4 | ||
|
|
8cb78ad931 | ||
|
|
85f987d15c | ||
|
|
b12079e0f6 | ||
|
|
dcf5c6167a | ||
|
|
b395d3f487 | ||
|
|
37662cad10 | ||
|
|
aa1673063d | ||
|
|
f51f49eb60 | ||
|
|
54c9bac961 | ||
|
|
e70fd73bdd | ||
|
|
9bb9e7b64d | ||
|
|
f64c03543a | ||
|
|
51374de1a1 | ||
|
|
afcc12f263 | ||
|
|
88c5482366 | ||
|
|
bbf7295c32 | ||
|
|
ca5e23e68c | ||
|
|
eadb1487ae | ||
|
|
1faa70fc77 | ||
|
|
30d7c007de | ||
|
|
f54f6a4402 | ||
|
|
7b41cdec65 | ||
|
|
fb6a652a57 | ||
|
|
ea34d753c1 | ||
|
|
2bc46e708e | ||
|
|
96e3b5b7b3 | ||
|
|
fafbafa5e1 | ||
|
|
be8605d8c6 | ||
|
|
061660d47a | ||
|
|
2ed6dbb344 | ||
|
|
4766b45746 | ||
|
|
0734252e98 | ||
|
|
91b4827c1d | ||
|
|
df6d56ce66 | ||
|
|
f0203c96ab | ||
|
|
bccabe40c0 | ||
|
|
c2f599b4ff | ||
|
|
5fd069d70d | ||
|
|
32d34d1748 | ||
|
|
18eb605605 | ||
|
|
4fdc88e9e1 | ||
|
|
4c69d8d3a8 | ||
|
|
d4b2dd0ec1 | ||
|
|
181f78421b | ||
|
|
8ed38527d0 | ||
|
|
c4c926070d | ||
|
|
ed87411e0d | ||
|
|
4ec2a448ab | ||
|
|
73d01da94e | ||
|
|
df8e02157a | ||
|
|
6e513ed32a | ||
|
|
325ef6327d | ||
|
|
46700e5ad0 | ||
|
|
d1e21fa345 | ||
|
|
cede387783 | ||
|
|
b206427d50 | ||
|
|
47d96e2037 | ||
|
|
e51f7cc1a7 | ||
|
|
40381d4b11 | ||
|
|
76fc9e5a3d | ||
|
|
9822f2c614 | ||
|
|
8854334ab5 | ||
|
|
53080844d2 | ||
|
|
76fd722e33 | ||
|
|
fa27513f76 | ||
|
|
72c6f91130 | ||
|
|
5918f35b8b | ||
|
|
0b11e6e6d0 | ||
|
|
a043b487bd | ||
|
|
3982489e67 | ||
|
|
5f3c515323 | ||
|
|
6e1297d734 | ||
|
|
8f3cbdd257 | ||
|
|
2fc06ae64e | ||
|
|
515aa1d2bd | ||
|
|
ff7a36394a | ||
|
|
5261ab249a | ||
|
|
c3192351da | ||
|
|
ce30d067a6 | ||
|
|
e84a8a72c5 | ||
|
|
10a4fe04d1 | ||
|
|
d5ce6441e3 | ||
|
|
a8d21fb1d6 | ||
|
|
9277d8d8f8 | ||
|
|
0618541527 | ||
|
|
1db49a4dd4 | ||
|
|
3df96034a1 | ||
|
|
e991dc061d | ||
|
|
56670066c7 | ||
|
|
31d27ff3fa | ||
|
|
297ff0dd25 | ||
|
|
b0a5b48fb2 | ||
|
|
ac244e6ad9 | ||
|
|
7393e92b21 | ||
|
|
86810d9f03 | ||
|
|
18aa8d11ad | ||
|
|
fafec56f09 | ||
|
|
129ca9da81 | ||
|
|
cbfb9ac87c | ||
|
|
42309edef4 | ||
|
|
559e57ca46 | ||
|
|
311bf1f157 | ||
|
|
131c3cc324 | ||
|
|
152ec0da0d | ||
|
|
ee04df40c3 | ||
|
|
252e90a633 | ||
|
|
048d486fa6 | ||
|
|
8fdfb68741 | ||
|
|
64c9e4aeca | ||
|
|
08b90e8767 | ||
|
|
0206613f9e | ||
|
|
ae0629628e | ||
|
|
785b2e7287 | ||
|
|
43e3d0552e | ||
|
|
801aa2e876 | ||
|
|
bddc7a438d | ||
|
|
b8c78a68e7 | ||
|
|
49219f4447 | ||
|
|
59b1abb719 | ||
|
|
3e2cfb552b | ||
|
|
779be1b8d0 | ||
|
|
faf74de238 | ||
|
|
50a51c2e79 | ||
|
|
d31e641496 | ||
|
|
f2d36f5be9 | ||
|
|
0b55f61fac | ||
|
|
4156dcbafd | ||
|
|
36e6ac2362 | ||
|
|
9613199152 | ||
|
|
14328d7496 | ||
|
|
6af12d1acc | ||
|
|
9b44e49879 | ||
|
|
afee18f146 |
5
.github/FUNDING.yml
vendored
Normal file
5
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
patreon: PixelPawsAI
|
||||
ko_fi: pixelpawsai
|
||||
custom: ['paypal.me/pixelpawsai']
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,5 +1,7 @@
|
||||
__pycache__/
|
||||
settings.json
|
||||
path_mappings.yaml
|
||||
output/*
|
||||
py/run_test.py
|
||||
.vscode/
|
||||
cache/
|
||||
|
||||
687
LICENSE
687
LICENSE
@@ -1,21 +1,674 @@
|
||||
MIT License
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (c) 2023 Will Miao
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Preamble
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
ComfyUI Lora Manager - A ComfyUI custom node for managing models
|
||||
Copyright (C) 2025 Will Miao
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
ComfyUI Lora Manager Copyright (C) 2025 Will Miao
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
218
README.md
218
README.md
@@ -6,48 +6,96 @@
|
||||
[](https://github.com/willmiao/ComfyUI-Lora-Manager/releases)
|
||||
[](https://github.com/willmiao/ComfyUI-Lora-Manager/releases)
|
||||
|
||||
A comprehensive toolset that streamlines organizing, downloading, and applying LoRA models in ComfyUI. With powerful features like recipe management and one-click workflow integration, working with LoRAs becomes faster, smoother, and significantly easier. Access the interface at: `http://localhost:8188/loras`
|
||||
A comprehensive toolset that streamlines organizing, downloading, and applying LoRA models in ComfyUI. With powerful features like recipe management, checkpoint organization, and one-click workflow integration, working with models becomes faster, smoother, and significantly easier. Access the interface at: `http://localhost:8188/loras`
|
||||
|
||||

|
||||
|
||||
## 📺 Tutorial: One-Click LoRA Integration
|
||||
Watch this quick tutorial to learn how to use the new one-click LoRA integration feature:
|
||||
|
||||
[](https://youtu.be/qS95OjX3e70)
|
||||
[](https://youtu.be/noN7f_ER7yo)
|
||||
[](https://youtu.be/hvKw31YpE-U)
|
||||
|
||||
## 🌐 Browser Extension
|
||||
Enhance your Civitai browsing experience with our companion browser extension! See which models you already have, download new ones with a single click, and manage your downloads efficiently.
|
||||
|
||||

|
||||
|
||||
<div>
|
||||
<a href="https://chromewebstore.google.com/detail/lm-civitai-extension/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb" style="display: inline-block; background-color: #4285F4; color: white; padding: 8px 16px; text-decoration: none; border-radius: 4px; font-weight: bold; margin: 10px 0;">
|
||||
<img src="https://www.google.com/chrome/static/images/chrome-logo.svg" width="20" style="vertical-align: middle; margin-right: 8px;"> Get Extension from Chrome Web Store
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div id="firefox-install" class="install-ok"><a href="https://github.com/willmiao/lm-civitai-extension-firefox/releases/latest/download/extension.xpi">📦 Install Firefox Extension (reviewed and verified by Mozilla)</a></div>
|
||||
|
||||
📚 [Learn More: Complete Tutorial](https://github.com/willmiao/ComfyUI-Lora-Manager/wiki/LoRA-Manager-Civitai-Extension-(Chrome-Extension))
|
||||
|
||||
---
|
||||
|
||||
## Release Notes
|
||||
|
||||
### v0.8.3
|
||||
* **Enhanced Workflow Parser** - Rebuilt workflow analysis engine with improved support for ComfyUI core nodes and easier extensibility
|
||||
* **Improved Recipe System** - Refined the experimental Save Recipe functionality with better workflow integration
|
||||
* **New Save Image Node** - Added experimental node with metadata support for perfect CivitAI compatibility
|
||||
* Supports dynamic filename prefixes with variables [1](https://github.com/nkchocoai/ComfyUI-SaveImageWithMetaData?tab=readme-ov-file#filename_prefix)
|
||||
* **Default LoRA Root Setting** - Added configuration option for setting your preferred LoRA directory
|
||||
### v0.8.26
|
||||
* **Creator Search Option**
|
||||
- Added ability to search models by creator name, making it easier to find models from specific authors.
|
||||
* **Enhanced Node Usability**
|
||||
- Improved user experience for Lora Loader, Lora Stacker, and WanVideo Lora Select nodes by fixing the maximum height of the text input area. Users can now freely and conveniently adjust the LoRA region within these nodes.
|
||||
* **Compatibility Fixes**
|
||||
- Resolved compatibility issues with ComfyUI and certain custom nodes, including ComfyUI-Custom-Scripts, ensuring smoother integration and operation.
|
||||
|
||||
### v0.8.2
|
||||
* **Faster Initialization for Forge Users** - Improved first-run efficiency by utilizing existing `.json` and `.civitai.info` files from Forge’s CivitAI helper extension, making migration smoother.
|
||||
* **LoRA Filename Editing** - Added support for renaming LoRA files directly within LoRA Manager.
|
||||
* **Recipe Editing** - Users can now edit recipe names and tags.
|
||||
* **Retain Deleted LoRAs in Recipes** - Deleted LoRAs will remain listed in recipes, allowing future functionality to reconnect them once re-obtained.
|
||||
* **Download Missing LoRAs from Recipes** - Easily fetch missing LoRAs associated with a recipe.
|
||||
### v0.8.25
|
||||
* **LoRA List Reordering**
|
||||
- Drag & Drop: Easily rearrange LoRA entries using the drag handle.
|
||||
- Keyboard Shortcuts:
|
||||
- Arrow keys: Navigate between LoRAs
|
||||
- Ctrl/Cmd + Arrow: Move selected LoRA up/down
|
||||
- Ctrl/Cmd + Home/End: Move selected LoRA to top/bottom
|
||||
- Delete/Backspace: Remove selected LoRA
|
||||
- Context Menu: Right-click for quick actions like Move Up, Move Down, Move to Top, Move to Bottom.
|
||||
* **Bulk Operations for Checkpoints & Embeddings**
|
||||
- Bulk Mode: Select multiple checkpoints or embeddings for batch actions.
|
||||
- Bulk Refresh: Update Civitai metadata for selected models.
|
||||
- Bulk Delete: Remove multiple models at once.
|
||||
- Bulk Move (Embeddings): Move selected embeddings to a different folder.
|
||||
* **New Setting: Auto Download Example Images**
|
||||
- Automatically fetch example images for models missing previews (requires download location to be set). Enabled by default.
|
||||
* **General Improvements**
|
||||
- Various user experience enhancements and stability fixes.
|
||||
|
||||
### v0.8.1
|
||||
* **Base Model Correction** - Added support for modifying base model associations to fix incorrect metadata for non-CivitAI LoRAs
|
||||
* **LoRA Loader Flexibility** - Made CLIP input optional for model-only workflows like Hunyuan video generation
|
||||
* **Expanded Recipe Support** - Added compatibility with 3 additional recipe metadata formats
|
||||
* **Enhanced Showcase Images** - Generation parameters now displayed alongside LoRA preview images
|
||||
* **UI Improvements & Bug Fixes** - Various interface refinements and stability enhancements
|
||||
### v0.8.22
|
||||
* **Embeddings Management** - Added Embeddings page for comprehensive embedding model management.
|
||||
* **Advanced Sorting Options** - Introduced flexible sorting controls, allowing sorting by name, added date, or file size in both ascending and descending order.
|
||||
* **Custom Download Path Templates & Base Model Mapping** - Implemented UI settings for configuring download path templates and base model path mappings, allowing customized model organization and storage location when downloading models via LM Civitai Extension.
|
||||
* **LM Civitai Extension Enhancements** - Improved concurrent download performance and stability, with new support for canceling active downloads directly from the extension interface.
|
||||
* **Update Feature** - Added update functionality, allowing users to update LoRA Manager to the latest release version directly from the LoRA Manager UI.
|
||||
* **Bulk Operations: Refresh All** - Added bulk refresh functionality, allowing users to update Civitai metadata across multiple LoRAs.
|
||||
|
||||
### v0.8.0
|
||||
* **Introduced LoRA Recipes** - Create, import, save, and share your favorite LoRA combinations
|
||||
* **Recipe Management System** - Easily browse, search, and organize your LoRA recipes
|
||||
* **Workflow Integration** - Save recipes directly from your workflow with generation parameters preserved
|
||||
* **Simplified Workflow Application** - Quickly apply saved recipes to new projects
|
||||
* **Enhanced UI & UX** - Improved interface design and user experience
|
||||
* **Bug Fixes & Stability** - Resolved various issues and enhanced overall performance
|
||||
### v0.8.20
|
||||
* **LM Civitai Extension** - Released [browser extension through Chrome Web Store](https://chromewebstore.google.com/detail/lm-civitai-extension/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb) that works seamlessly with LoRA Manager to enhance Civitai browsing experience, showing which models are already in your local library, enabling one-click downloads, and providing queue and parallel download support
|
||||
* **Enhanced Lora Loader** - Added support for nunchaku, improving convenience when working with ComfyUI-nunchaku workflows, plus new template workflows for quick onboarding
|
||||
* **WanVideo Integration** - Introduced WanVideo Lora Select (LoraManager) node compatible with ComfyUI-WanVideoWrapper for streamlined lora usage in video workflows, including a template workflow to help you get started quickly
|
||||
|
||||
### v0.8.19
|
||||
* **Analytics Dashboard** - Added new Statistics page providing comprehensive visual analysis of model collection and usage patterns for better library insights
|
||||
* **Target Node Selection** - Enhanced workflow integration with intelligent target choosing when sending LoRAs/recipes to workflows with multiple loader/stacker nodes; a visual selector now appears showing node color, type, ID, and title for precise targeting
|
||||
* **Enhanced NSFW Controls** - Added support for setting NSFW levels on recipes with automatic content blurring based on user preferences
|
||||
* **Customizable Card Display** - New display settings allowing users to choose whether card information and action buttons are always visible or only revealed on hover
|
||||
* **Expanded Compatibility** - Added support for efficiency-nodes-comfyui in Save Recipe and Save Image nodes, plus fixed compatibility with ComfyUI_Custom_Nodes_AlekPet
|
||||
|
||||
### v0.8.18
|
||||
* **Custom Example Images** - Added ability to import your own example images for LoRAs and checkpoints with automatic metadata extraction from embedded information
|
||||
* **Enhanced Example Management** - New action buttons to set specific examples as previews or delete custom examples
|
||||
* **Improved Duplicate Detection** - Enhanced "Find Duplicates" with hash verification feature to eliminate false positives when identifying duplicate models
|
||||
* **Tag Management** - Added tag editing functionality allowing users to customize and manage model tags
|
||||
* **Advanced Selection Controls** - Implemented Ctrl+A shortcut for quickly selecting all filtered LoRAs, automatically entering bulk mode when needed
|
||||
* **Note**: Cache file functionality temporarily disabled pending rework
|
||||
|
||||
### v0.8.17
|
||||
* **Duplicate Model Detection** - Added "Find Duplicates" functionality for LoRAs and checkpoints using model file hash detection, enabling convenient viewing and batch deletion of duplicate models
|
||||
* **Enhanced URL Recipe Imports** - Optimized import recipe via URL functionality using CivitAI API calls instead of web scraping, now supporting all rated images (including NSFW) for recipe imports
|
||||
* **Improved TriggerWord Control** - Enhanced TriggerWord Toggle node with new default_active switch to set the initial state (active/inactive) when trigger words are added
|
||||
* **Centralized Example Management** - Added "Migrate Existing Example Images" feature to consolidate downloaded example images from model folders into central storage with customizable naming patterns
|
||||
* **Intelligent Word Suggestions** - Implemented smart trigger word suggestions by reading class tokens and tag frequency from safetensors files, displaying recommendations when editing trigger words
|
||||
* **Model Version Management** - Added "Re-link to CivitAI" context menu option for connecting models to different CivitAI versions when needed
|
||||
|
||||
[View Update History](./update_logs.md)
|
||||
|
||||
@@ -66,13 +114,6 @@ Watch this quick tutorial to learn how to use the new one-click LoRA integration
|
||||
- 🚀 **High Performance**
|
||||
- Fast model loading and browsing
|
||||
- Smooth scrolling through large collections
|
||||
- Real-time updates when files change
|
||||
|
||||
- 📂 **Advanced Organization**
|
||||
- Quick search with fuzzy matching
|
||||
- Folder-based categorization
|
||||
- Move LoRAs between folders
|
||||
- Sort by name or date
|
||||
|
||||
- 🌐 **Rich Model Integration**
|
||||
- Direct download from CivitAI
|
||||
@@ -81,6 +122,12 @@ Watch this quick tutorial to learn how to use the new one-click LoRA integration
|
||||
- Trigger words at a glance
|
||||
- One-click workflow integration with preset values
|
||||
|
||||
- 🔄 **Checkpoint Management**
|
||||
- Scan and organize checkpoint models
|
||||
- Filter and search your collection
|
||||
- View and edit metadata
|
||||
- Clean up and manage disk space
|
||||
|
||||
- 🧩 **LoRA Recipes**
|
||||
- Save and share favorite LoRA combinations
|
||||
- Preserve generation parameters for future reference
|
||||
@@ -92,24 +139,32 @@ Watch this quick tutorial to learn how to use the new one-click LoRA integration
|
||||
- Context menu for quick actions
|
||||
- Custom notes and usage tips
|
||||
- Multi-folder support
|
||||
- Visual progress indicators during initialization
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
### Option 1: **ComfyUI Manager** (Recommended)
|
||||
### Option 1: **ComfyUI Manager** (Recommended for ComfyUI users)
|
||||
|
||||
1. Open **ComfyUI**.
|
||||
2. Go to **Manager > Custom Node Manager**.
|
||||
3. Search for `lora-manager`.
|
||||
4. Click **Install**.
|
||||
|
||||
### Option 2: **Manual Installation**
|
||||
### Option 2: **Portable Standalone Edition** (No ComfyUI required)
|
||||
|
||||
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.8.15/lora_manager_portable.7z)
|
||||
2. Copy the provided `settings.json.example` file to create a new file named `settings.json` in `comfyui-lora-manager` folder
|
||||
3. Edit `settings.json` to include your correct model folder paths and CivitAI API key
|
||||
4. Run run.bat
|
||||
|
||||
### Option 3: **Manual Installation**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/willmiao/ComfyUI-Lora-Manager.git
|
||||
cd ComfyUI-Lora-Manager
|
||||
pip install requirements.txt
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -130,29 +185,102 @@ pip install requirements.txt
|
||||
- Paste into the Lora Loader node's text input
|
||||
- The node will automatically apply preset strength and trigger words
|
||||
|
||||
### Filename Format Patterns for Save Image Node
|
||||
|
||||
The Save Image Node supports dynamic filename generation using pattern codes. You can customize how your images are named using the following format patterns:
|
||||
|
||||
#### Available Pattern Codes
|
||||
|
||||
- `%seed%` - Inserts the generation seed number
|
||||
- `%width%` - Inserts the image width
|
||||
- `%height%` - Inserts the image height
|
||||
- `%pprompt:N%` - Inserts the positive prompt (limited to N characters)
|
||||
- `%nprompt:N%` - Inserts the negative prompt (limited to N characters)
|
||||
- `%model:N%` - Inserts the model/checkpoint name (limited to N characters)
|
||||
- `%date%` - Inserts current date/time as "yyyyMMddhhmmss"
|
||||
- `%date:FORMAT%` - Inserts date using custom format with:
|
||||
- `yyyy` - 4-digit year
|
||||
- `yy` - 2-digit year
|
||||
- `MM` - 2-digit month
|
||||
- `dd` - 2-digit day
|
||||
- `hh` - 2-digit hour
|
||||
- `mm` - 2-digit minute
|
||||
- `ss` - 2-digit second
|
||||
|
||||
#### Examples
|
||||
|
||||
- `image_%seed%` → `image_1234567890`
|
||||
- `gen_%width%x%height%` → `gen_512x768`
|
||||
- `%model:10%_%seed%` → `dreamshape_1234567890`
|
||||
- `%date:yyyy-MM-dd%` → `2025-04-28`
|
||||
- `%pprompt:20%_%seed%` → `beautiful landscape_1234567890`
|
||||
- `%model%_%date:yyMMdd%_%seed%` → `dreamshaper_v8_250428_1234567890`
|
||||
|
||||
You can combine multiple patterns to create detailed, organized filenames for your generated images.
|
||||
|
||||
### Standalone Mode
|
||||
|
||||
You can now run LoRA Manager independently from ComfyUI:
|
||||
|
||||
1. **For ComfyUI users**:
|
||||
- Launch ComfyUI with LoRA Manager at least once to initialize the necessary path information in the `settings.json` file.
|
||||
- Make sure dependencies are installed: `pip install -r requirements.txt`
|
||||
- From your ComfyUI root directory, run:
|
||||
```bash
|
||||
python custom_nodes\comfyui-lora-manager\standalone.py
|
||||
```
|
||||
- Access the interface at: `http://localhost:8188/loras`
|
||||
- You can specify a different host or port with arguments:
|
||||
```bash
|
||||
python custom_nodes\comfyui-lora-manager\standalone.py --host 127.0.0.1 --port 9000
|
||||
```
|
||||
|
||||
2. **For non-ComfyUI users**:
|
||||
- Copy the provided `settings.json.example` file to create a new file named `settings.json`
|
||||
- Edit `settings.json` to include your correct model folder paths and CivitAI API key
|
||||
- Install required dependencies: `pip install -r requirements.txt`
|
||||
- Run standalone mode:
|
||||
```bash
|
||||
python standalone.py
|
||||
```
|
||||
- Access the interface through your browser at: `http://localhost:8188/loras`
|
||||
|
||||
This standalone mode provides a lightweight option for managing your model and recipe collection without needing to run the full ComfyUI environment, making it useful even for users who primarily use other stable diffusion interfaces.
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
Thank you for your interest in contributing to ComfyUI LoRA Manager! As this project is currently in its early stages and undergoing rapid development and refactoring, we are temporarily not accepting pull requests.
|
||||
|
||||
However, your feedback and ideas are extremely valuable to us:
|
||||
- Please feel free to open issues for any bugs you encounter
|
||||
- Submit feature requests through GitHub issues
|
||||
- Share your suggestions for improvements
|
||||
|
||||
We appreciate your understanding and look forward to potentially accepting code contributions once the project architecture stabilizes.
|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
This project has been inspired by and benefited from other excellent ComfyUI extensions:
|
||||
|
||||
- [ComfyUI-SaveImageWithMetaData](https://github.com/Comfy-Community/ComfyUI-SaveImageWithMetaData) - For the image metadata functionality
|
||||
- [ComfyUI-SaveImageWithMetaData](https://github.com/nkchocoai/ComfyUI-SaveImageWithMetaData) - For the image metadata functionality
|
||||
- [rgthree-comfy](https://github.com/rgthree/rgthree-comfy) - For the lora loader functionality
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
If you have suggestions, bug reports, or improvements, feel free to open an issue or contribute directly to the codebase. Pull requests are always welcome!
|
||||
|
||||
---
|
||||
|
||||
## ☕ Support
|
||||
|
||||
If you find this project helpful, consider supporting its development:
|
||||
|
||||
[](https://ko-fi.com/pixelpawsai)
|
||||
|
||||
[](https://patreon.com/PixelPawsAI)
|
||||
|
||||
WeChat: [Click to view QR code](https://raw.githubusercontent.com/willmiao/ComfyUI-Lora-Manager/main/static/images/wechat-qr.webp)
|
||||
|
||||
## 💬 Community
|
||||
|
||||
Join our Discord community for support, discussions, and updates:
|
||||
|
||||
11
__init__.py
11
__init__.py
@@ -3,16 +3,25 @@ from .py.nodes.lora_loader import LoraManagerLoader
|
||||
from .py.nodes.trigger_word_toggle import TriggerWordToggle
|
||||
from .py.nodes.lora_stacker import LoraStacker
|
||||
from .py.nodes.save_image import SaveImage
|
||||
from .py.nodes.debug_metadata import DebugMetadata
|
||||
from .py.nodes.wanvideo_lora_select import WanVideoLoraSelect
|
||||
# Import metadata collector to install hooks on startup
|
||||
from .py.metadata_collector import init as init_metadata_collector
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
LoraManagerLoader.NAME: LoraManagerLoader,
|
||||
TriggerWordToggle.NAME: TriggerWordToggle,
|
||||
LoraStacker.NAME: LoraStacker,
|
||||
SaveImage.NAME: SaveImage
|
||||
SaveImage.NAME: SaveImage,
|
||||
DebugMetadata.NAME: DebugMetadata,
|
||||
WanVideoLoraSelect.NAME: WanVideoLoraSelect
|
||||
}
|
||||
|
||||
WEB_DIRECTORY = "./web/comfyui"
|
||||
|
||||
# Initialize metadata collector
|
||||
init_metadata_collector()
|
||||
|
||||
# Register routes on import
|
||||
LoraManager.add_routes()
|
||||
__all__ = ['NODE_CLASS_MAPPINGS', 'WEB_DIRECTORY']
|
||||
|
||||
BIN
example_workflows/Flux Example.jpg
Normal file
BIN
example_workflows/Flux Example.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 669 KiB |
1
example_workflows/Flux Example.json
Normal file
1
example_workflows/Flux Example.json
Normal file
File diff suppressed because one or more lines are too long
BIN
example_workflows/Illustrious Pony Example.jpg
Normal file
BIN
example_workflows/Illustrious Pony Example.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 669 KiB |
1
example_workflows/Illustrious Pony Example.json
Normal file
1
example_workflows/Illustrious Pony Example.json
Normal file
File diff suppressed because one or more lines are too long
BIN
example_workflows/nunchaku-flux.1-dev.jpg
Normal file
BIN
example_workflows/nunchaku-flux.1-dev.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 68 KiB |
1
example_workflows/nunchaku-flux.1-dev.json
Normal file
1
example_workflows/nunchaku-flux.1-dev.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
222
py/config.py
222
py/config.py
@@ -3,6 +3,11 @@ import platform
|
||||
import folder_paths # type: ignore
|
||||
from typing import List
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
|
||||
# Check if running in standalone mode
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -12,14 +17,59 @@ class Config:
|
||||
def __init__(self):
|
||||
self.templates_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'templates')
|
||||
self.static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static')
|
||||
# 路径映射字典, target to link mapping
|
||||
# Path mapping dictionary, target to link mapping
|
||||
self._path_mappings = {}
|
||||
# 静态路由映射字典, target to route mapping
|
||||
# Static route mapping dictionary, target to route mapping
|
||||
self._route_mappings = {}
|
||||
self.loras_roots = self._init_lora_paths()
|
||||
self.temp_directory = folder_paths.get_temp_directory()
|
||||
# 在初始化时扫描符号链接
|
||||
self.checkpoints_roots = None
|
||||
self.unet_roots = None
|
||||
self.embeddings_roots = None
|
||||
self.base_models_roots = self._init_checkpoint_paths()
|
||||
self.embeddings_roots = self._init_embedding_paths()
|
||||
# Scan symbolic links during initialization
|
||||
self._scan_symbolic_links()
|
||||
|
||||
if not standalone_mode:
|
||||
# Save the paths to settings.json when running in ComfyUI mode
|
||||
self.save_folder_paths_to_settings()
|
||||
|
||||
def save_folder_paths_to_settings(self):
|
||||
"""Save folder paths to settings.json for standalone mode to use later"""
|
||||
try:
|
||||
# Check if we're running in ComfyUI mode (not standalone)
|
||||
# Load existing settings
|
||||
settings_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'settings.json')
|
||||
settings = {}
|
||||
if os.path.exists(settings_path):
|
||||
with open(settings_path, 'r', encoding='utf-8') as f:
|
||||
settings = json.load(f)
|
||||
|
||||
# Update settings with paths
|
||||
settings['folder_paths'] = {
|
||||
'loras': self.loras_roots,
|
||||
'checkpoints': self.checkpoints_roots,
|
||||
'unet': self.unet_roots,
|
||||
'embeddings': self.embeddings_roots,
|
||||
}
|
||||
|
||||
# Add default roots if there's only one item and key doesn't exist
|
||||
if len(self.loras_roots) == 1 and "default_lora_root" not in settings:
|
||||
settings["default_lora_root"] = self.loras_roots[0]
|
||||
|
||||
if self.checkpoints_roots and len(self.checkpoints_roots) == 1 and "default_checkpoint_root" not in settings:
|
||||
settings["default_checkpoint_root"] = self.checkpoints_roots[0]
|
||||
|
||||
if self.embeddings_roots and len(self.embeddings_roots) == 1 and "default_embedding_root" not in settings:
|
||||
settings["default_embedding_root"] = self.embeddings_roots[0]
|
||||
|
||||
# Save settings
|
||||
with open(settings_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(settings, f, indent=2)
|
||||
|
||||
logger.info("Saved folder paths to settings.json")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to save folder paths: {e}")
|
||||
|
||||
def _is_link(self, path: str) -> bool:
|
||||
try:
|
||||
@@ -39,12 +89,18 @@ class Config:
|
||||
return False
|
||||
|
||||
def _scan_symbolic_links(self):
|
||||
"""扫描所有 LoRA 根目录中的符号链接"""
|
||||
"""Scan all symbolic links in LoRA, Checkpoint, and Embedding root directories"""
|
||||
for root in self.loras_roots:
|
||||
self._scan_directory_links(root)
|
||||
|
||||
for root in self.base_models_roots:
|
||||
self._scan_directory_links(root)
|
||||
|
||||
for root in self.embeddings_roots:
|
||||
self._scan_directory_links(root)
|
||||
|
||||
def _scan_directory_links(self, root: str):
|
||||
"""递归扫描目录中的符号链接"""
|
||||
"""Recursively scan symbolic links in a directory"""
|
||||
try:
|
||||
with os.scandir(root) as it:
|
||||
for entry in it:
|
||||
@@ -59,50 +115,156 @@ class Config:
|
||||
logger.error(f"Error scanning links in {root}: {e}")
|
||||
|
||||
def add_path_mapping(self, link_path: str, target_path: str):
|
||||
"""添加符号链接路径映射
|
||||
target_path: 实际目标路径
|
||||
link_path: 符号链接路径
|
||||
"""Add a symbolic link path mapping
|
||||
target_path: actual target path
|
||||
link_path: symbolic link path
|
||||
"""
|
||||
normalized_link = os.path.normpath(link_path).replace(os.sep, '/')
|
||||
normalized_target = os.path.normpath(target_path).replace(os.sep, '/')
|
||||
# 保持原有的映射关系:目标路径 -> 链接路径
|
||||
# Keep the original mapping: target path -> link path
|
||||
self._path_mappings[normalized_target] = normalized_link
|
||||
logger.info(f"Added path mapping: {normalized_target} -> {normalized_link}")
|
||||
|
||||
def add_route_mapping(self, path: str, route: str):
|
||||
"""添加静态路由映射"""
|
||||
"""Add a static route mapping"""
|
||||
normalized_path = os.path.normpath(path).replace(os.sep, '/')
|
||||
self._route_mappings[normalized_path] = route
|
||||
logger.info(f"Added route mapping: {normalized_path} -> {route}")
|
||||
# logger.info(f"Added route mapping: {normalized_path} -> {route}")
|
||||
|
||||
def map_path_to_link(self, path: str) -> str:
|
||||
"""将目标路径映射回符号链接路径"""
|
||||
"""Map a target path back to its symbolic link path"""
|
||||
normalized_path = os.path.normpath(path).replace(os.sep, '/')
|
||||
# 检查路径是否包含在任何映射的目标路径中
|
||||
# Check if the path is contained in any mapped target path
|
||||
for target_path, link_path in self._path_mappings.items():
|
||||
if normalized_path.startswith(target_path):
|
||||
# 如果路径以目标路径开头,则替换为链接路径
|
||||
# If the path starts with the target path, replace with link path
|
||||
mapped_path = normalized_path.replace(target_path, link_path, 1)
|
||||
return mapped_path
|
||||
return path
|
||||
|
||||
def map_link_to_path(self, link_path: str) -> str:
|
||||
"""Map a symbolic link path back to the actual path"""
|
||||
normalized_link = os.path.normpath(link_path).replace(os.sep, '/')
|
||||
# Check if the path is contained in any mapped target path
|
||||
for target_path, link_path in self._path_mappings.items():
|
||||
if normalized_link.startswith(target_path):
|
||||
# If the path starts with the target path, replace with actual path
|
||||
mapped_path = normalized_link.replace(target_path, link_path, 1)
|
||||
return mapped_path
|
||||
return link_path
|
||||
|
||||
def _init_lora_paths(self) -> List[str]:
|
||||
"""Initialize and validate LoRA paths from ComfyUI settings"""
|
||||
paths = sorted(set(path.replace(os.sep, "/")
|
||||
for path in folder_paths.get_folder_paths("loras")
|
||||
if os.path.exists(path)), key=lambda p: p.lower())
|
||||
print("Found LoRA roots:", "\n - " + "\n - ".join(paths))
|
||||
|
||||
if not paths:
|
||||
raise ValueError("No valid loras folders found in ComfyUI configuration")
|
||||
|
||||
# 初始化路径映射
|
||||
for path in paths:
|
||||
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
|
||||
if real_path != path:
|
||||
self.add_path_mapping(path, real_path)
|
||||
|
||||
return paths
|
||||
try:
|
||||
raw_paths = folder_paths.get_folder_paths("loras")
|
||||
|
||||
# Normalize and resolve symlinks, store mapping from resolved -> original
|
||||
path_map = {}
|
||||
for path in raw_paths:
|
||||
if os.path.exists(path):
|
||||
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
|
||||
path_map[real_path] = path_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
|
||||
|
||||
# Now sort and use only the deduplicated real paths
|
||||
unique_paths = sorted(path_map.values(), key=lambda p: p.lower())
|
||||
logger.info("Found LoRA roots:" + ("\n - " + "\n - ".join(unique_paths) if unique_paths else "[]"))
|
||||
|
||||
if not unique_paths:
|
||||
logger.warning("No valid loras folders found in ComfyUI configuration")
|
||||
return []
|
||||
|
||||
for original_path in unique_paths:
|
||||
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
|
||||
if real_path != original_path:
|
||||
self.add_path_mapping(original_path, real_path)
|
||||
|
||||
return unique_paths
|
||||
except Exception as e:
|
||||
logger.warning(f"Error initializing LoRA paths: {e}")
|
||||
return []
|
||||
|
||||
def _init_checkpoint_paths(self) -> List[str]:
|
||||
"""Initialize and validate checkpoint paths from ComfyUI settings"""
|
||||
try:
|
||||
# Get checkpoint paths from folder_paths
|
||||
raw_checkpoint_paths = folder_paths.get_folder_paths("checkpoints")
|
||||
raw_unet_paths = folder_paths.get_folder_paths("unet")
|
||||
|
||||
# Normalize and resolve symlinks for checkpoints, store mapping from resolved -> original
|
||||
checkpoint_map = {}
|
||||
for path in raw_checkpoint_paths:
|
||||
if os.path.exists(path):
|
||||
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
|
||||
checkpoint_map[real_path] = checkpoint_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
|
||||
|
||||
# Normalize and resolve symlinks for unet, store mapping from resolved -> original
|
||||
unet_map = {}
|
||||
for path in raw_unet_paths:
|
||||
if os.path.exists(path):
|
||||
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
|
||||
unet_map[real_path] = unet_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
|
||||
|
||||
# Merge both maps and deduplicate by real path
|
||||
merged_map = {}
|
||||
for real_path, orig_path in {**checkpoint_map, **unet_map}.items():
|
||||
if real_path not in merged_map:
|
||||
merged_map[real_path] = orig_path
|
||||
|
||||
# Now sort and use only the deduplicated real paths
|
||||
unique_paths = sorted(merged_map.values(), key=lambda p: p.lower())
|
||||
|
||||
# Split back into checkpoints and unet roots for class properties
|
||||
self.checkpoints_roots = [p for p in unique_paths if p in checkpoint_map.values()]
|
||||
self.unet_roots = [p for p in unique_paths if p in unet_map.values()]
|
||||
|
||||
all_paths = unique_paths
|
||||
|
||||
logger.info("Found checkpoint roots:" + ("\n - " + "\n - ".join(all_paths) if all_paths else "[]"))
|
||||
|
||||
if not all_paths:
|
||||
logger.warning("No valid checkpoint folders found in ComfyUI configuration")
|
||||
return []
|
||||
|
||||
# Initialize path mappings
|
||||
for original_path in all_paths:
|
||||
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
|
||||
if real_path != original_path:
|
||||
self.add_path_mapping(original_path, real_path)
|
||||
|
||||
return all_paths
|
||||
except Exception as e:
|
||||
logger.warning(f"Error initializing checkpoint paths: {e}")
|
||||
return []
|
||||
|
||||
def _init_embedding_paths(self) -> List[str]:
|
||||
"""Initialize and validate embedding paths from ComfyUI settings"""
|
||||
try:
|
||||
raw_paths = folder_paths.get_folder_paths("embeddings")
|
||||
|
||||
# Normalize and resolve symlinks, store mapping from resolved -> original
|
||||
path_map = {}
|
||||
for path in raw_paths:
|
||||
if os.path.exists(path):
|
||||
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
|
||||
path_map[real_path] = path_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
|
||||
|
||||
# Now sort and use only the deduplicated real paths
|
||||
unique_paths = sorted(path_map.values(), key=lambda p: p.lower())
|
||||
logger.info("Found embedding roots:" + ("\n - " + "\n - ".join(unique_paths) if unique_paths else "[]"))
|
||||
|
||||
if not unique_paths:
|
||||
logger.warning("No valid embeddings folders found in ComfyUI configuration")
|
||||
return []
|
||||
|
||||
for original_path in unique_paths:
|
||||
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
|
||||
if real_path != original_path:
|
||||
self.add_path_mapping(original_path, real_path)
|
||||
|
||||
return unique_paths
|
||||
except Exception as e:
|
||||
logger.warning(f"Error initializing embedding paths: {e}")
|
||||
return []
|
||||
|
||||
def get_preview_static_url(self, preview_path: str) -> str:
|
||||
"""Convert local preview path to static URL"""
|
||||
|
||||
@@ -1,29 +1,62 @@
|
||||
import asyncio
|
||||
import sys
|
||||
import os
|
||||
from server import PromptServer # type: ignore
|
||||
from .config import config
|
||||
from .routes.lora_routes import LoraRoutes
|
||||
from .routes.api_routes import ApiRoutes
|
||||
from .routes.recipe_routes import RecipeRoutes
|
||||
from .routes.checkpoints_routes import CheckpointsRoutes
|
||||
from .services.lora_scanner import LoraScanner
|
||||
from .services.recipe_scanner import RecipeScanner
|
||||
from .services.file_monitor import LoraFileMonitor
|
||||
from .services.lora_cache import LoraCache
|
||||
from .services.recipe_cache import RecipeCache
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from server import PromptServer # type: ignore
|
||||
|
||||
from .config import config
|
||||
from .services.model_service_factory import ModelServiceFactory, register_default_model_types
|
||||
from .routes.recipe_routes import RecipeRoutes
|
||||
from .routes.stats_routes import StatsRoutes
|
||||
from .routes.update_routes import UpdateRoutes
|
||||
from .routes.misc_routes import MiscRoutes
|
||||
from .routes.example_images_routes import ExampleImagesRoutes
|
||||
from .services.service_registry import ServiceRegistry
|
||||
from .services.settings_manager import settings
|
||||
from .utils.example_images_migration import ExampleImagesMigration
|
||||
from .services.websocket_manager import ws_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Check if we're in standalone mode
|
||||
STANDALONE_MODE = 'nodes' not in sys.modules
|
||||
|
||||
class LoraManager:
|
||||
"""Main entry point for LoRA Manager plugin"""
|
||||
|
||||
@classmethod
|
||||
def add_routes(cls):
|
||||
"""Initialize and register all routes"""
|
||||
"""Initialize and register all routes using the new refactored architecture"""
|
||||
app = PromptServer.instance.app
|
||||
|
||||
added_targets = set() # 用于跟踪已添加的目标路径
|
||||
# Configure aiohttp access logger to be less verbose
|
||||
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
|
||||
|
||||
# Add specific suppression for connection reset errors
|
||||
class ConnectionResetFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
# Filter out connection reset errors that are not critical
|
||||
if "ConnectionResetError" in str(record.getMessage()):
|
||||
return False
|
||||
if "_call_connection_lost" in str(record.getMessage()):
|
||||
return False
|
||||
if "WinError 10054" in str(record.getMessage()):
|
||||
return False
|
||||
return True
|
||||
|
||||
# Apply the filter to asyncio logger
|
||||
asyncio_logger = logging.getLogger("asyncio")
|
||||
asyncio_logger.addFilter(ConnectionResetFilter())
|
||||
|
||||
added_targets = set() # Track already added target paths
|
||||
|
||||
# Add static route for example images if the path exists in settings
|
||||
example_images_path = settings.get('example_images_path')
|
||||
logger.info(f"Example images path: {example_images_path}")
|
||||
if example_images_path and os.path.exists(example_images_path):
|
||||
app.router.add_static('/example_images_static', example_images_path)
|
||||
logger.info(f"Added static route for example images: /example_images_static -> {example_images_path}")
|
||||
|
||||
# Add static routes for each lora root
|
||||
for idx, root in enumerate(config.loras_roots, start=1):
|
||||
@@ -35,102 +68,159 @@ class LoraManager:
|
||||
if link == root:
|
||||
real_root = target
|
||||
break
|
||||
# 为原始路径添加静态路由
|
||||
# Add static route for original path
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {real_root}")
|
||||
|
||||
# 记录路由映射
|
||||
# Record route mapping
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(real_root)
|
||||
|
||||
# 为符号链接的目标路径添加额外的静态路由
|
||||
link_idx = 1
|
||||
# Add static routes for each checkpoint root
|
||||
for idx, root in enumerate(config.base_models_roots, start=1):
|
||||
preview_path = f'/checkpoints_static/root{idx}/preview'
|
||||
|
||||
real_root = root
|
||||
if root in config._path_mappings.values():
|
||||
for target, link in config._path_mappings.items():
|
||||
if link == root:
|
||||
real_root = target
|
||||
break
|
||||
# Add static route for original path
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {real_root}")
|
||||
|
||||
# Record route mapping
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(real_root)
|
||||
|
||||
# Add static routes for each embedding root
|
||||
for idx, root in enumerate(config.embeddings_roots, start=1):
|
||||
preview_path = f'/embeddings_static/root{idx}/preview'
|
||||
|
||||
real_root = root
|
||||
if root in config._path_mappings.values():
|
||||
for target, link in config._path_mappings.items():
|
||||
if link == root:
|
||||
real_root = target
|
||||
break
|
||||
# Add static route for original path
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {real_root}")
|
||||
|
||||
# Record route mapping
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(real_root)
|
||||
|
||||
# Add static routes for symlink target paths
|
||||
link_idx = {
|
||||
'lora': 1,
|
||||
'checkpoint': 1,
|
||||
'embedding': 1
|
||||
}
|
||||
|
||||
for target_path, link_path in config._path_mappings.items():
|
||||
if target_path not in added_targets:
|
||||
route_path = f'/loras_static/link_{link_idx}/preview'
|
||||
app.router.add_static(route_path, target_path)
|
||||
logger.info(f"Added static route for link target {route_path} -> {target_path}")
|
||||
config.add_route_mapping(target_path, route_path)
|
||||
added_targets.add(target_path)
|
||||
link_idx += 1
|
||||
# Determine if this is a checkpoint, lora, or embedding link based on path
|
||||
is_checkpoint = any(cp_root in link_path for cp_root in config.base_models_roots)
|
||||
is_checkpoint = is_checkpoint or any(cp_root in target_path for cp_root in config.base_models_roots)
|
||||
is_embedding = any(emb_root in link_path for emb_root in config.embeddings_roots)
|
||||
is_embedding = is_embedding or any(emb_root in target_path for emb_root in config.embeddings_roots)
|
||||
|
||||
if is_checkpoint:
|
||||
route_path = f'/checkpoints_static/link_{link_idx["checkpoint"]}/preview'
|
||||
link_idx["checkpoint"] += 1
|
||||
elif is_embedding:
|
||||
route_path = f'/embeddings_static/link_{link_idx["embedding"]}/preview'
|
||||
link_idx["embedding"] += 1
|
||||
else:
|
||||
route_path = f'/loras_static/link_{link_idx["lora"]}/preview'
|
||||
link_idx["lora"] += 1
|
||||
|
||||
try:
|
||||
app.router.add_static(route_path, Path(target_path).resolve(strict=False))
|
||||
logger.info(f"Added static route for link target {route_path} -> {target_path}")
|
||||
config.add_route_mapping(target_path, route_path)
|
||||
added_targets.add(target_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to add static route on initialization for {target_path}: {e}")
|
||||
continue
|
||||
|
||||
# Add static route for plugin assets
|
||||
app.router.add_static('/loras_static', config.static_path)
|
||||
|
||||
# Setup feature routes
|
||||
routes = LoraRoutes()
|
||||
checkpoints_routes = CheckpointsRoutes()
|
||||
# Register default model types with the factory
|
||||
register_default_model_types()
|
||||
|
||||
# Setup file monitoring
|
||||
monitor = LoraFileMonitor(routes.scanner, config.loras_roots)
|
||||
monitor.start()
|
||||
# Setup all model routes using the factory
|
||||
ModelServiceFactory.setup_all_routes(app)
|
||||
|
||||
routes.setup_routes(app)
|
||||
checkpoints_routes.setup_routes(app)
|
||||
ApiRoutes.setup_routes(app, monitor)
|
||||
# Setup non-model-specific routes
|
||||
stats_routes = StatsRoutes()
|
||||
stats_routes.setup_routes(app)
|
||||
RecipeRoutes.setup_routes(app)
|
||||
UpdateRoutes.setup_routes(app)
|
||||
MiscRoutes.setup_routes(app)
|
||||
ExampleImagesRoutes.setup_routes(app)
|
||||
|
||||
# Store monitor in app for cleanup
|
||||
app['lora_monitor'] = monitor
|
||||
# Setup WebSocket routes that are shared across all model types
|
||||
app.router.add_get('/ws/fetch-progress', ws_manager.handle_connection)
|
||||
app.router.add_get('/ws/download-progress', ws_manager.handle_download_connection)
|
||||
app.router.add_get('/ws/init-progress', ws_manager.handle_init_connection)
|
||||
|
||||
# Schedule cache initialization using the application's startup handler
|
||||
app.on_startup.append(lambda app: cls._schedule_cache_init(routes.scanner, routes.recipe_scanner))
|
||||
# Schedule service initialization
|
||||
app.on_startup.append(lambda app: cls._initialize_services())
|
||||
|
||||
# Add cleanup
|
||||
app.on_shutdown.append(cls._cleanup)
|
||||
app.on_shutdown.append(ApiRoutes.cleanup)
|
||||
|
||||
logger.info(f"LoRA Manager: Set up routes for {len(ModelServiceFactory.get_registered_types())} model types: {', '.join(ModelServiceFactory.get_registered_types())}")
|
||||
|
||||
@classmethod
|
||||
async def _schedule_cache_init(cls, scanner: LoraScanner, recipe_scanner: RecipeScanner):
|
||||
"""Schedule cache initialization in the running event loop"""
|
||||
async def _initialize_services(cls):
|
||||
"""Initialize all services using the ServiceRegistry"""
|
||||
try:
|
||||
# 创建低优先级的初始化任务
|
||||
lora_task = asyncio.create_task(cls._initialize_lora_cache(scanner), name='lora_cache_init')
|
||||
# Initialize CivitaiClient first to ensure it's ready for other services
|
||||
await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Register DownloadManager with ServiceRegistry
|
||||
await ServiceRegistry.get_download_manager()
|
||||
|
||||
# Schedule recipe cache initialization with a delay to let lora scanner initialize first
|
||||
recipe_task = asyncio.create_task(cls._initialize_recipe_cache(recipe_scanner, delay=2), name='recipe_cache_init')
|
||||
# Initialize WebSocket manager
|
||||
await ServiceRegistry.get_websocket_manager()
|
||||
|
||||
# Initialize scanners in background
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
|
||||
# Initialize recipe scanner if needed
|
||||
recipe_scanner = await ServiceRegistry.get_recipe_scanner()
|
||||
|
||||
# Create low-priority initialization tasks
|
||||
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init')
|
||||
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
|
||||
asyncio.create_task(embedding_scanner.initialize_in_background(), name='embedding_cache_init')
|
||||
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
|
||||
|
||||
await ExampleImagesMigration.check_and_run_migrations()
|
||||
|
||||
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error scheduling cache initialization: {e}")
|
||||
|
||||
@classmethod
|
||||
async def _initialize_lora_cache(cls, scanner: LoraScanner):
|
||||
"""Initialize lora cache in background"""
|
||||
try:
|
||||
# 设置初始缓存占位
|
||||
scanner._cache = LoraCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# 分阶段加载缓存
|
||||
await scanner.get_cached_data(force_refresh=True)
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing lora cache: {e}")
|
||||
|
||||
@classmethod
|
||||
async def _initialize_recipe_cache(cls, scanner: RecipeScanner, delay: float = 2.0):
|
||||
"""Initialize recipe cache in background with a delay"""
|
||||
try:
|
||||
# Wait for the specified delay to let lora scanner initialize first
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# Set initial empty cache
|
||||
scanner._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
|
||||
# Force refresh to load the actual data
|
||||
await scanner.get_cached_data(force_refresh=True)
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing recipe cache: {e}")
|
||||
logger.error(f"LoRA Manager: Error initializing services: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
async def _cleanup(cls, app):
|
||||
"""Cleanup resources"""
|
||||
if 'lora_monitor' in app:
|
||||
app['lora_monitor'].stop()
|
||||
"""Cleanup resources using ServiceRegistry"""
|
||||
try:
|
||||
logger.info("LoRA Manager: Cleaning up services")
|
||||
|
||||
# Close CivitaiClient gracefully
|
||||
civitai_client = await ServiceRegistry.get_service("civitai_client")
|
||||
if civitai_client:
|
||||
await civitai_client.close()
|
||||
logger.info("Closed CivitaiClient connection")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during cleanup: {e}", exc_info=True)
|
||||
|
||||
32
py/metadata_collector/__init__.py
Normal file
32
py/metadata_collector/__init__.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
import importlib
|
||||
import sys
|
||||
|
||||
# Check if running in standalone mode
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
|
||||
if not standalone_mode:
|
||||
from .metadata_hook import MetadataHook
|
||||
from .metadata_registry import MetadataRegistry
|
||||
|
||||
def init():
|
||||
# Install hooks to collect metadata during execution
|
||||
MetadataHook.install()
|
||||
|
||||
# Initialize registry
|
||||
registry = MetadataRegistry()
|
||||
|
||||
print("ComfyUI Metadata Collector initialized")
|
||||
|
||||
def get_metadata(prompt_id=None):
|
||||
"""Helper function to get metadata from the registry"""
|
||||
registry = MetadataRegistry()
|
||||
return registry.get_metadata(prompt_id)
|
||||
else:
|
||||
# Standalone mode - provide dummy implementations
|
||||
def init():
|
||||
print("ComfyUI Metadata Collector disabled in standalone mode")
|
||||
|
||||
def get_metadata(prompt_id=None):
|
||||
"""Dummy implementation for standalone mode"""
|
||||
return {}
|
||||
13
py/metadata_collector/constants.py
Normal file
13
py/metadata_collector/constants.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""Constants used by the metadata collector"""
|
||||
|
||||
# Metadata categories
|
||||
MODELS = "models"
|
||||
PROMPTS = "prompts"
|
||||
SAMPLING = "sampling"
|
||||
LORAS = "loras"
|
||||
SIZE = "size"
|
||||
IMAGES = "images"
|
||||
IS_SAMPLER = "is_sampler" # New constant to mark sampler nodes
|
||||
|
||||
# Complete list of categories to track
|
||||
METADATA_CATEGORIES = [MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES]
|
||||
204
py/metadata_collector/metadata_hook.py
Normal file
204
py/metadata_collector/metadata_hook.py
Normal file
@@ -0,0 +1,204 @@
|
||||
import sys
|
||||
import inspect
|
||||
from .metadata_registry import MetadataRegistry
|
||||
|
||||
class MetadataHook:
|
||||
"""Install hooks for metadata collection"""
|
||||
|
||||
@staticmethod
|
||||
def install():
|
||||
"""Install hooks to collect metadata during execution"""
|
||||
try:
|
||||
# Import ComfyUI's execution module
|
||||
execution = None
|
||||
try:
|
||||
# Try direct import first
|
||||
import execution # type: ignore
|
||||
except ImportError:
|
||||
# Try to locate from system modules
|
||||
for module_name in sys.modules:
|
||||
if module_name.endswith('.execution'):
|
||||
execution = sys.modules[module_name]
|
||||
break
|
||||
|
||||
# If we can't find the execution module, we can't install hooks
|
||||
if execution is None:
|
||||
print("Could not locate ComfyUI execution module, metadata collection disabled")
|
||||
return
|
||||
|
||||
# Detect whether we're using the new async version of ComfyUI
|
||||
is_async = False
|
||||
map_node_func_name = '_map_node_over_list'
|
||||
|
||||
if hasattr(execution, '_async_map_node_over_list'):
|
||||
is_async = inspect.iscoroutinefunction(execution._async_map_node_over_list)
|
||||
map_node_func_name = '_async_map_node_over_list'
|
||||
elif hasattr(execution, '_map_node_over_list'):
|
||||
is_async = inspect.iscoroutinefunction(execution._map_node_over_list)
|
||||
|
||||
if is_async:
|
||||
print("Detected async ComfyUI execution, installing async metadata hooks")
|
||||
MetadataHook._install_async_hooks(execution, map_node_func_name)
|
||||
else:
|
||||
print("Detected sync ComfyUI execution, installing sync metadata hooks")
|
||||
MetadataHook._install_sync_hooks(execution)
|
||||
|
||||
print("Metadata collection hooks installed for runtime values")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error installing metadata hooks: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
def _install_sync_hooks(execution):
|
||||
"""Install hooks for synchronous execution model"""
|
||||
# Store the original _map_node_over_list function
|
||||
original_map_node_over_list = execution._map_node_over_list
|
||||
|
||||
# Define the wrapped _map_node_over_list function
|
||||
def map_node_over_list_with_metadata(obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None):
|
||||
# Only collect metadata when calling the main function of nodes
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
try:
|
||||
# Get the current prompt_id from the registry
|
||||
registry = MetadataRegistry()
|
||||
prompt_id = registry.current_prompt_id
|
||||
|
||||
if prompt_id is not None:
|
||||
# Get node class type
|
||||
class_type = obj.__class__.__name__
|
||||
|
||||
# Unique ID might be available through the obj if it has a unique_id field
|
||||
node_id = getattr(obj, 'unique_id', None)
|
||||
if node_id is None and pre_execute_cb:
|
||||
# Try to extract node_id through reflection on GraphBuilder.set_default_prefix
|
||||
frame = inspect.currentframe()
|
||||
while frame:
|
||||
if 'unique_id' in frame.f_locals:
|
||||
node_id = frame.f_locals['unique_id']
|
||||
break
|
||||
frame = frame.f_back
|
||||
|
||||
# Record inputs before execution
|
||||
if node_id is not None:
|
||||
registry.record_node_execution(node_id, class_type, input_data_all, None)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
|
||||
# Execute the original function
|
||||
results = original_map_node_over_list(obj, input_data_all, func, allow_interrupt, execution_block_cb, pre_execute_cb)
|
||||
|
||||
# After execution, collect outputs for relevant nodes
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
try:
|
||||
# Get the current prompt_id from the registry
|
||||
registry = MetadataRegistry()
|
||||
prompt_id = registry.current_prompt_id
|
||||
|
||||
if prompt_id is not None:
|
||||
# Get node class type
|
||||
class_type = obj.__class__.__name__
|
||||
|
||||
# Unique ID might be available through the obj if it has a unique_id field
|
||||
node_id = getattr(obj, 'unique_id', None)
|
||||
if node_id is None and pre_execute_cb:
|
||||
# Try to extract node_id through reflection
|
||||
frame = inspect.currentframe()
|
||||
while frame:
|
||||
if 'unique_id' in frame.f_locals:
|
||||
node_id = frame.f_locals['unique_id']
|
||||
break
|
||||
frame = frame.f_back
|
||||
|
||||
# Record outputs after execution
|
||||
if node_id is not None:
|
||||
registry.update_node_execution(node_id, class_type, results)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (post-execution): {str(e)}")
|
||||
|
||||
return results
|
||||
|
||||
# Also hook the execute function to track the current prompt_id
|
||||
original_execute = execution.execute
|
||||
|
||||
def execute_with_prompt_tracking(*args, **kwargs):
|
||||
if len(args) >= 7: # Check if we have enough arguments
|
||||
server, prompt, caches, node_id, extra_data, executed, prompt_id = args[:7]
|
||||
registry = MetadataRegistry()
|
||||
|
||||
# Start collection if this is a new prompt
|
||||
if not registry.current_prompt_id or registry.current_prompt_id != prompt_id:
|
||||
registry.start_collection(prompt_id)
|
||||
|
||||
# Store the dynprompt reference for node lookups
|
||||
if hasattr(prompt, 'original_prompt'):
|
||||
registry.set_current_prompt(prompt)
|
||||
|
||||
# Execute the original function
|
||||
return original_execute(*args, **kwargs)
|
||||
|
||||
# Replace the functions
|
||||
execution._map_node_over_list = map_node_over_list_with_metadata
|
||||
execution.execute = execute_with_prompt_tracking
|
||||
|
||||
@staticmethod
|
||||
def _install_async_hooks(execution, map_node_func_name='_async_map_node_over_list'):
|
||||
"""Install hooks for asynchronous execution model"""
|
||||
# Store the original _async_map_node_over_list function
|
||||
original_map_node_over_list = getattr(execution, map_node_func_name)
|
||||
|
||||
# Wrapped async function, compatible with both stable and nightly
|
||||
async def async_map_node_over_list_with_metadata(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None, *args, **kwargs):
|
||||
hidden_inputs = kwargs.get('hidden_inputs', None)
|
||||
# Only collect metadata when calling the main function of nodes
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
try:
|
||||
registry = MetadataRegistry()
|
||||
if prompt_id is not None:
|
||||
class_type = obj.__class__.__name__
|
||||
node_id = unique_id
|
||||
if node_id is not None:
|
||||
registry.record_node_execution(node_id, class_type, input_data_all, None)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
|
||||
# Call original function with all args/kwargs
|
||||
results = await original_map_node_over_list(
|
||||
prompt_id, unique_id, obj, input_data_all, func,
|
||||
allow_interrupt, execution_block_cb, pre_execute_cb, *args, **kwargs
|
||||
)
|
||||
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
try:
|
||||
registry = MetadataRegistry()
|
||||
if prompt_id is not None:
|
||||
class_type = obj.__class__.__name__
|
||||
node_id = unique_id
|
||||
if node_id is not None:
|
||||
registry.update_node_execution(node_id, class_type, results)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (post-execution): {str(e)}")
|
||||
|
||||
return results
|
||||
|
||||
# Also hook the execute function to track the current prompt_id
|
||||
original_execute = execution.execute
|
||||
|
||||
async def async_execute_with_prompt_tracking(*args, **kwargs):
|
||||
if len(args) >= 7: # Check if we have enough arguments
|
||||
server, prompt, caches, node_id, extra_data, executed, prompt_id = args[:7]
|
||||
registry = MetadataRegistry()
|
||||
|
||||
# Start collection if this is a new prompt
|
||||
if not registry.current_prompt_id or registry.current_prompt_id != prompt_id:
|
||||
registry.start_collection(prompt_id)
|
||||
|
||||
# Store the dynprompt reference for node lookups
|
||||
if hasattr(prompt, 'original_prompt'):
|
||||
registry.set_current_prompt(prompt)
|
||||
|
||||
# Execute the original function
|
||||
return await original_execute(*args, **kwargs)
|
||||
|
||||
# Replace the functions with async versions
|
||||
setattr(execution, map_node_func_name, async_map_node_over_list_with_metadata)
|
||||
execution.execute = async_execute_with_prompt_tracking
|
||||
456
py/metadata_collector/metadata_processor.py
Normal file
456
py/metadata_collector/metadata_processor.py
Normal file
@@ -0,0 +1,456 @@
|
||||
import json
|
||||
import sys
|
||||
from .constants import IMAGES
|
||||
|
||||
# Check if running in standalone mode
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
|
||||
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IS_SAMPLER
|
||||
|
||||
class MetadataProcessor:
|
||||
"""Process and format collected metadata"""
|
||||
|
||||
@staticmethod
|
||||
def find_primary_sampler(metadata, downstream_id=None):
|
||||
"""
|
||||
Find the primary KSampler node that executed before the given downstream node
|
||||
|
||||
Parameters:
|
||||
- metadata: The workflow metadata
|
||||
- downstream_id: Optional ID of a downstream node to help identify the specific primary sampler
|
||||
"""
|
||||
if downstream_id is None:
|
||||
if IMAGES in metadata and "first_decode" in metadata[IMAGES]:
|
||||
downstream_id = metadata[IMAGES]["first_decode"]["node_id"]
|
||||
|
||||
# If we have a downstream_id and execution_order, use it to narrow down potential samplers
|
||||
if downstream_id and "execution_order" in metadata:
|
||||
execution_order = metadata["execution_order"]
|
||||
|
||||
# Find the index of the downstream node in the execution order
|
||||
if downstream_id in execution_order:
|
||||
downstream_index = execution_order.index(downstream_id)
|
||||
|
||||
# Extract all sampler nodes that executed before the downstream node
|
||||
candidate_samplers = {}
|
||||
for i in range(downstream_index):
|
||||
node_id = execution_order[i]
|
||||
# Use IS_SAMPLER flag to identify true sampler nodes
|
||||
if node_id in metadata.get(SAMPLING, {}) and metadata[SAMPLING][node_id].get(IS_SAMPLER, False):
|
||||
candidate_samplers[node_id] = metadata[SAMPLING][node_id]
|
||||
|
||||
# If we found candidate samplers, apply primary sampler logic to these candidates only
|
||||
if candidate_samplers:
|
||||
# Collect potential primary samplers based on different criteria
|
||||
custom_advanced_samplers = []
|
||||
advanced_add_noise_samplers = []
|
||||
high_denoise_samplers = []
|
||||
max_denoise = -1
|
||||
high_denoise_id = None
|
||||
|
||||
# First, check for SamplerCustomAdvanced among candidates
|
||||
prompt = metadata.get("current_prompt")
|
||||
if prompt and prompt.original_prompt:
|
||||
for node_id in candidate_samplers:
|
||||
node_info = prompt.original_prompt.get(node_id, {})
|
||||
if node_info.get("class_type") == "SamplerCustomAdvanced":
|
||||
custom_advanced_samplers.append(node_id)
|
||||
|
||||
# Next, check for KSamplerAdvanced with add_noise="enable" among candidates
|
||||
for node_id, sampler_info in candidate_samplers.items():
|
||||
parameters = sampler_info.get("parameters", {})
|
||||
add_noise = parameters.get("add_noise")
|
||||
if add_noise == "enable":
|
||||
advanced_add_noise_samplers.append(node_id)
|
||||
|
||||
# Find the sampler with highest denoise value among candidates
|
||||
for node_id, sampler_info in candidate_samplers.items():
|
||||
parameters = sampler_info.get("parameters", {})
|
||||
denoise = parameters.get("denoise")
|
||||
if denoise is not None and denoise > max_denoise:
|
||||
max_denoise = denoise
|
||||
high_denoise_id = node_id
|
||||
|
||||
if high_denoise_id:
|
||||
high_denoise_samplers.append(high_denoise_id)
|
||||
|
||||
# Combine all potential primary samplers
|
||||
potential_samplers = custom_advanced_samplers + advanced_add_noise_samplers + high_denoise_samplers
|
||||
|
||||
# Find the most recent potential primary sampler (closest to downstream node)
|
||||
for i in range(downstream_index - 1, -1, -1):
|
||||
node_id = execution_order[i]
|
||||
if node_id in potential_samplers:
|
||||
return node_id, candidate_samplers[node_id]
|
||||
|
||||
# If no potential sampler found from our criteria, return the most recent sampler
|
||||
if candidate_samplers:
|
||||
for i in range(downstream_index - 1, -1, -1):
|
||||
node_id = execution_order[i]
|
||||
if node_id in candidate_samplers:
|
||||
return node_id, candidate_samplers[node_id]
|
||||
|
||||
# If no downstream_id provided or no suitable sampler found, fall back to original logic
|
||||
primary_sampler = None
|
||||
primary_sampler_id = None
|
||||
max_denoise = -1
|
||||
|
||||
# First, check for SamplerCustomAdvanced
|
||||
prompt = metadata.get("current_prompt")
|
||||
if prompt and prompt.original_prompt:
|
||||
for node_id, node_info in prompt.original_prompt.items():
|
||||
if node_info.get("class_type") == "SamplerCustomAdvanced":
|
||||
# Check if the node is in SAMPLING and has IS_SAMPLER flag
|
||||
if node_id in metadata.get(SAMPLING, {}) and metadata[SAMPLING][node_id].get(IS_SAMPLER, False):
|
||||
return node_id, metadata[SAMPLING][node_id]
|
||||
|
||||
# Next, check for KSamplerAdvanced with add_noise="enable" using IS_SAMPLER flag
|
||||
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
|
||||
# Skip if not marked as a sampler
|
||||
if not sampler_info.get(IS_SAMPLER, False):
|
||||
continue
|
||||
|
||||
parameters = sampler_info.get("parameters", {})
|
||||
add_noise = parameters.get("add_noise")
|
||||
if add_noise == "enable":
|
||||
primary_sampler = sampler_info
|
||||
primary_sampler_id = node_id
|
||||
break
|
||||
|
||||
# If no specialized sampler found, find the sampler with highest denoise value
|
||||
if primary_sampler is None:
|
||||
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
|
||||
# Skip if not marked as a sampler
|
||||
if not sampler_info.get(IS_SAMPLER, False):
|
||||
continue
|
||||
|
||||
parameters = sampler_info.get("parameters", {})
|
||||
denoise = parameters.get("denoise")
|
||||
if denoise is not None and denoise > max_denoise:
|
||||
max_denoise = denoise
|
||||
primary_sampler = sampler_info
|
||||
primary_sampler_id = node_id
|
||||
|
||||
return primary_sampler_id, primary_sampler
|
||||
|
||||
@staticmethod
|
||||
def trace_node_input(prompt, node_id, input_name, target_class=None, max_depth=10):
|
||||
"""
|
||||
Trace an input connection from a node to find the source node
|
||||
|
||||
Parameters:
|
||||
- prompt: The prompt object containing node connections
|
||||
- node_id: ID of the starting node
|
||||
- input_name: Name of the input to trace
|
||||
- target_class: Optional class name to search for (e.g., "CLIPTextEncode")
|
||||
- max_depth: Maximum depth to follow the node chain to prevent infinite loops
|
||||
|
||||
Returns:
|
||||
- node_id of the found node, or None if not found
|
||||
"""
|
||||
if not prompt or not prompt.original_prompt or node_id not in prompt.original_prompt:
|
||||
return None
|
||||
|
||||
# For depth tracking
|
||||
current_depth = 0
|
||||
|
||||
current_node_id = node_id
|
||||
current_input = input_name
|
||||
|
||||
# If we're just tracing to origin (no target_class), keep track of the last valid node
|
||||
last_valid_node = None
|
||||
|
||||
while current_depth < max_depth:
|
||||
if current_node_id not in prompt.original_prompt:
|
||||
return last_valid_node if not target_class else None
|
||||
|
||||
node_inputs = prompt.original_prompt[current_node_id].get("inputs", {})
|
||||
if current_input not in node_inputs:
|
||||
# We've reached a node without the specified input - this is our origin node
|
||||
# if we're not looking for a specific target_class
|
||||
return current_node_id if not target_class else None
|
||||
|
||||
input_value = node_inputs[current_input]
|
||||
# Input connections are formatted as [node_id, output_index]
|
||||
if isinstance(input_value, list) and len(input_value) >= 2:
|
||||
found_node_id = input_value[0] # Connected node_id
|
||||
|
||||
# If we're looking for a specific node class
|
||||
if target_class and prompt.original_prompt[found_node_id].get("class_type") == target_class:
|
||||
return found_node_id
|
||||
|
||||
# If we're not looking for a specific class, update the last valid node
|
||||
if not target_class:
|
||||
last_valid_node = found_node_id
|
||||
|
||||
# Continue tracing through intermediate nodes
|
||||
current_node_id = found_node_id
|
||||
# For most conditioning nodes, the input we want to follow is named "conditioning"
|
||||
if "conditioning" in prompt.original_prompt[current_node_id].get("inputs", {}):
|
||||
current_input = "conditioning"
|
||||
else:
|
||||
# If there's no "conditioning" input, return the current node
|
||||
# if we're not looking for a specific target_class
|
||||
return found_node_id if not target_class else None
|
||||
else:
|
||||
# We've reached a node with no further connections
|
||||
return last_valid_node if not target_class else None
|
||||
|
||||
current_depth += 1
|
||||
|
||||
# If we've reached max depth without finding target_class
|
||||
return last_valid_node if not target_class else None
|
||||
|
||||
@staticmethod
|
||||
def find_primary_checkpoint(metadata):
|
||||
"""Find the primary checkpoint model in the workflow"""
|
||||
if not metadata.get(MODELS):
|
||||
return None
|
||||
|
||||
# In most workflows, there's only one checkpoint, so we can just take the first one
|
||||
for node_id, model_info in metadata.get(MODELS, {}).items():
|
||||
if model_info.get("type") == "checkpoint":
|
||||
return model_info.get("name")
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def match_conditioning_to_prompts(metadata, sampler_id):
|
||||
"""
|
||||
Match conditioning objects from a sampler to prompts in metadata
|
||||
|
||||
Parameters:
|
||||
- metadata: The workflow metadata
|
||||
- sampler_id: ID of the sampler node to match
|
||||
|
||||
Returns:
|
||||
- Dictionary with 'prompt' and 'negative_prompt' if found
|
||||
"""
|
||||
result = {
|
||||
"prompt": "",
|
||||
"negative_prompt": ""
|
||||
}
|
||||
|
||||
# Check if we have stored conditioning objects for this sampler
|
||||
if sampler_id in metadata.get(PROMPTS, {}) and (
|
||||
"pos_conditioning" in metadata[PROMPTS][sampler_id] or
|
||||
"neg_conditioning" in metadata[PROMPTS][sampler_id]):
|
||||
|
||||
pos_conditioning = metadata[PROMPTS][sampler_id].get("pos_conditioning")
|
||||
neg_conditioning = metadata[PROMPTS][sampler_id].get("neg_conditioning")
|
||||
|
||||
# Helper function to recursively find prompt text for a conditioning object
|
||||
def find_prompt_text_for_conditioning(conditioning_obj, is_positive=True):
|
||||
if conditioning_obj is None:
|
||||
return ""
|
||||
|
||||
# Try to match conditioning objects with those stored by extractors
|
||||
for prompt_node_id, prompt_data in metadata[PROMPTS].items():
|
||||
# For nodes with single conditioning output
|
||||
if "conditioning" in prompt_data:
|
||||
if id(prompt_data["conditioning"]) == id(conditioning_obj):
|
||||
return prompt_data.get("text", "")
|
||||
|
||||
# For nodes with separate pos_conditioning and neg_conditioning outputs (like TSC_EfficientLoader)
|
||||
if is_positive and "positive_encoded" in prompt_data:
|
||||
if id(prompt_data["positive_encoded"]) == id(conditioning_obj):
|
||||
if "positive_text" in prompt_data:
|
||||
return prompt_data["positive_text"]
|
||||
else:
|
||||
orig_conditioning = prompt_data.get("orig_pos_cond", None)
|
||||
if orig_conditioning is not None:
|
||||
# Recursively find the prompt text for the original conditioning
|
||||
return find_prompt_text_for_conditioning(orig_conditioning, is_positive=True)
|
||||
|
||||
if not is_positive and "negative_encoded" in prompt_data:
|
||||
if id(prompt_data["negative_encoded"]) == id(conditioning_obj):
|
||||
if "negative_text" in prompt_data:
|
||||
return prompt_data["negative_text"]
|
||||
else:
|
||||
orig_conditioning = prompt_data.get("orig_neg_cond", None)
|
||||
if orig_conditioning is not None:
|
||||
# Recursively find the prompt text for the original conditioning
|
||||
return find_prompt_text_for_conditioning(orig_conditioning, is_positive=False)
|
||||
|
||||
return ""
|
||||
|
||||
# Find prompt texts using the helper function
|
||||
result["prompt"] = find_prompt_text_for_conditioning(pos_conditioning, is_positive=True)
|
||||
result["negative_prompt"] = find_prompt_text_for_conditioning(neg_conditioning, is_positive=False)
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def extract_generation_params(metadata, id=None):
|
||||
"""
|
||||
Extract generation parameters from metadata using node relationships
|
||||
|
||||
Parameters:
|
||||
- metadata: The workflow metadata
|
||||
- id: Optional ID of a downstream node to help identify the specific primary sampler
|
||||
"""
|
||||
params = {
|
||||
"prompt": "",
|
||||
"negative_prompt": "",
|
||||
"seed": None,
|
||||
"steps": None,
|
||||
"cfg_scale": None,
|
||||
"guidance": None, # Add guidance parameter
|
||||
"sampler": None,
|
||||
"scheduler": None,
|
||||
"checkpoint": None,
|
||||
"loras": "",
|
||||
"size": None,
|
||||
"clip_skip": None
|
||||
}
|
||||
|
||||
# Get the prompt object for node relationship tracing
|
||||
prompt = metadata.get("current_prompt")
|
||||
|
||||
# Find the primary KSampler node
|
||||
primary_sampler_id, primary_sampler = MetadataProcessor.find_primary_sampler(metadata, id)
|
||||
|
||||
# Directly get checkpoint from metadata instead of tracing
|
||||
checkpoint = MetadataProcessor.find_primary_checkpoint(metadata)
|
||||
if checkpoint:
|
||||
params["checkpoint"] = checkpoint
|
||||
|
||||
# Check if guidance parameter exists in any sampling node
|
||||
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
|
||||
parameters = sampler_info.get("parameters", {})
|
||||
if "guidance" in parameters and parameters["guidance"] is not None:
|
||||
params["guidance"] = parameters["guidance"]
|
||||
break
|
||||
|
||||
if primary_sampler:
|
||||
# Extract sampling parameters
|
||||
sampling_params = primary_sampler.get("parameters", {})
|
||||
# Handle both seed and noise_seed
|
||||
params["seed"] = sampling_params.get("seed") if sampling_params.get("seed") is not None else sampling_params.get("noise_seed")
|
||||
params["steps"] = sampling_params.get("steps")
|
||||
params["cfg_scale"] = sampling_params.get("cfg")
|
||||
params["sampler"] = sampling_params.get("sampler_name")
|
||||
params["scheduler"] = sampling_params.get("scheduler")
|
||||
|
||||
if prompt and primary_sampler_id:
|
||||
# Check if this is a SamplerCustomAdvanced node
|
||||
is_custom_advanced = False
|
||||
if prompt.original_prompt and primary_sampler_id in prompt.original_prompt:
|
||||
is_custom_advanced = prompt.original_prompt[primary_sampler_id].get("class_type") == "SamplerCustomAdvanced"
|
||||
|
||||
if is_custom_advanced:
|
||||
# For SamplerCustomAdvanced, trace specific inputs
|
||||
|
||||
# 1. Trace sigmas input to find BasicScheduler
|
||||
scheduler_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "sigmas", "BasicScheduler", max_depth=5)
|
||||
if scheduler_node_id and scheduler_node_id in metadata.get(SAMPLING, {}):
|
||||
scheduler_params = metadata[SAMPLING][scheduler_node_id].get("parameters", {})
|
||||
params["steps"] = scheduler_params.get("steps")
|
||||
params["scheduler"] = scheduler_params.get("scheduler")
|
||||
|
||||
# 2. Trace sampler input to find KSamplerSelect
|
||||
sampler_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "sampler", "KSamplerSelect", max_depth=5)
|
||||
if sampler_node_id and sampler_node_id in metadata.get(SAMPLING, {}):
|
||||
sampler_params = metadata[SAMPLING][sampler_node_id].get("parameters", {})
|
||||
params["sampler"] = sampler_params.get("sampler_name")
|
||||
|
||||
# 3. Trace guider input for CFGGuider and CLIPTextEncode
|
||||
guider_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "guider", max_depth=5)
|
||||
if guider_node_id and guider_node_id in prompt.original_prompt:
|
||||
# Check if the guider node is a CFGGuider
|
||||
if prompt.original_prompt[guider_node_id].get("class_type") == "CFGGuider":
|
||||
# Extract cfg value from the CFGGuider
|
||||
if guider_node_id in metadata.get(SAMPLING, {}):
|
||||
cfg_params = metadata[SAMPLING][guider_node_id].get("parameters", {})
|
||||
params["cfg_scale"] = cfg_params.get("cfg")
|
||||
|
||||
# Find CLIPTextEncode for positive prompt
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "positive", "CLIPTextEncode", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
|
||||
# Find CLIPTextEncode for negative prompt
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "negative", "CLIPTextEncode", max_depth=10)
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
else:
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
|
||||
else:
|
||||
# For standard samplers, match conditioning objects to prompts
|
||||
prompt_results = MetadataProcessor.match_conditioning_to_prompts(metadata, primary_sampler_id)
|
||||
params["prompt"] = prompt_results["prompt"]
|
||||
params["negative_prompt"] = prompt_results["negative_prompt"]
|
||||
|
||||
# If prompts were still not found, fall back to tracing connections
|
||||
if not params["prompt"]:
|
||||
# Original tracing for standard samplers
|
||||
# Trace positive prompt - look specifically for CLIPTextEncode
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
else:
|
||||
# If CLIPTextEncode is not found, try to find CLIPTextEncodeFlux
|
||||
positive_flux_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", "CLIPTextEncodeFlux", max_depth=10)
|
||||
if positive_flux_node_id and positive_flux_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_flux_node_id].get("text", "")
|
||||
|
||||
# Trace negative prompt - look specifically for CLIPTextEncode
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "negative", max_depth=10)
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
|
||||
# Size extraction is same for all sampler types
|
||||
# Check if the sampler itself has size information (from latent_image)
|
||||
if primary_sampler_id in metadata.get(SIZE, {}):
|
||||
width = metadata[SIZE][primary_sampler_id].get("width")
|
||||
height = metadata[SIZE][primary_sampler_id].get("height")
|
||||
if width and height:
|
||||
params["size"] = f"{width}x{height}"
|
||||
|
||||
# Extract LoRAs using the standardized format
|
||||
lora_parts = []
|
||||
for node_id, lora_info in metadata.get(LORAS, {}).items():
|
||||
# Access the lora_list from the standardized format
|
||||
lora_list = lora_info.get("lora_list", [])
|
||||
for lora in lora_list:
|
||||
name = lora.get("name", "unknown")
|
||||
strength = lora.get("strength", 1.0)
|
||||
lora_parts.append(f"<lora:{name}:{strength}>")
|
||||
|
||||
params["loras"] = " ".join(lora_parts)
|
||||
|
||||
# Set default clip_skip value
|
||||
params["clip_skip"] = "1" # Common default
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def to_dict(metadata, id=None):
|
||||
"""
|
||||
Convert extracted metadata to the ComfyUI output.json format
|
||||
|
||||
Parameters:
|
||||
- metadata: The workflow metadata
|
||||
- id: Optional ID of a downstream node to help identify the specific primary sampler
|
||||
"""
|
||||
if standalone_mode:
|
||||
# Return empty dictionary in standalone mode
|
||||
return {}
|
||||
|
||||
params = MetadataProcessor.extract_generation_params(metadata, id)
|
||||
|
||||
# Convert all values to strings to match output.json format
|
||||
for key in params:
|
||||
if params[key] is not None:
|
||||
params[key] = str(params[key])
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def to_json(metadata, id=None):
|
||||
"""Convert metadata to JSON string"""
|
||||
params = MetadataProcessor.to_dict(metadata, id)
|
||||
return json.dumps(params, indent=4)
|
||||
275
py/metadata_collector/metadata_registry.py
Normal file
275
py/metadata_collector/metadata_registry.py
Normal file
@@ -0,0 +1,275 @@
|
||||
import time
|
||||
from nodes import NODE_CLASS_MAPPINGS
|
||||
from .node_extractors import NODE_EXTRACTORS, GenericNodeExtractor
|
||||
from .constants import METADATA_CATEGORIES, IMAGES
|
||||
|
||||
class MetadataRegistry:
|
||||
"""A singleton registry to store and retrieve workflow metadata"""
|
||||
_instance = None
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._reset()
|
||||
return cls._instance
|
||||
|
||||
def _reset(self):
|
||||
self.current_prompt_id = None
|
||||
self.current_prompt = None
|
||||
self.metadata = {}
|
||||
self.prompt_metadata = {}
|
||||
self.executed_nodes = set()
|
||||
|
||||
# Node-level cache for metadata
|
||||
self.node_cache = {}
|
||||
|
||||
# Limit the number of stored prompts
|
||||
self.max_prompt_history = 3
|
||||
|
||||
# Categories we want to track and retrieve from cache
|
||||
self.metadata_categories = METADATA_CATEGORIES
|
||||
|
||||
def _clean_old_prompts(self):
|
||||
"""Clean up old prompt metadata, keeping only recent ones"""
|
||||
if len(self.prompt_metadata) <= self.max_prompt_history:
|
||||
return
|
||||
|
||||
# Sort all prompt_ids by timestamp
|
||||
sorted_prompts = sorted(
|
||||
self.prompt_metadata.keys(),
|
||||
key=lambda pid: self.prompt_metadata[pid].get("timestamp", 0)
|
||||
)
|
||||
|
||||
# Remove oldest records
|
||||
prompts_to_remove = sorted_prompts[:len(sorted_prompts) - self.max_prompt_history]
|
||||
for pid in prompts_to_remove:
|
||||
del self.prompt_metadata[pid]
|
||||
|
||||
def start_collection(self, prompt_id):
|
||||
"""Begin metadata collection for a new prompt"""
|
||||
self.current_prompt_id = prompt_id
|
||||
self.executed_nodes = set()
|
||||
self.prompt_metadata[prompt_id] = {
|
||||
category: {} for category in METADATA_CATEGORIES
|
||||
}
|
||||
# Add additional metadata fields
|
||||
self.prompt_metadata[prompt_id].update({
|
||||
"execution_order": [],
|
||||
"current_prompt": None, # Will store the prompt object
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
# Clean up old prompt data
|
||||
self._clean_old_prompts()
|
||||
|
||||
def set_current_prompt(self, prompt):
|
||||
"""Set the current prompt object reference"""
|
||||
self.current_prompt = prompt
|
||||
if self.current_prompt_id and self.current_prompt_id in self.prompt_metadata:
|
||||
# Store the prompt in the metadata for later relationship tracing
|
||||
self.prompt_metadata[self.current_prompt_id]["current_prompt"] = prompt
|
||||
|
||||
def get_metadata(self, prompt_id=None):
|
||||
"""Get collected metadata for a prompt"""
|
||||
key = prompt_id if prompt_id is not None else self.current_prompt_id
|
||||
if key not in self.prompt_metadata:
|
||||
return {}
|
||||
|
||||
metadata = self.prompt_metadata[key]
|
||||
|
||||
# If we have a current prompt object, check for non-executed nodes
|
||||
prompt_obj = metadata.get("current_prompt")
|
||||
if prompt_obj and hasattr(prompt_obj, "original_prompt"):
|
||||
original_prompt = prompt_obj.original_prompt
|
||||
|
||||
# Fill in missing metadata from cache for nodes that weren't executed
|
||||
self._fill_missing_metadata(key, original_prompt)
|
||||
|
||||
return self.prompt_metadata.get(key, {})
|
||||
|
||||
def _fill_missing_metadata(self, prompt_id, original_prompt):
|
||||
"""Fill missing metadata from cache for non-executed nodes"""
|
||||
if not original_prompt:
|
||||
return
|
||||
|
||||
executed_nodes = self.executed_nodes
|
||||
metadata = self.prompt_metadata[prompt_id]
|
||||
|
||||
# Iterate through nodes in the original prompt
|
||||
for node_id, node_data in original_prompt.items():
|
||||
# Skip if already executed in this run
|
||||
if node_id in executed_nodes:
|
||||
continue
|
||||
|
||||
# Get the node type from the prompt (this is the key in NODE_CLASS_MAPPINGS)
|
||||
prompt_class_type = node_data.get("class_type")
|
||||
if not prompt_class_type:
|
||||
continue
|
||||
|
||||
# Convert to actual class name (which is what we use in our cache)
|
||||
class_type = prompt_class_type
|
||||
if prompt_class_type in NODE_CLASS_MAPPINGS:
|
||||
class_obj = NODE_CLASS_MAPPINGS[prompt_class_type]
|
||||
class_type = class_obj.__name__
|
||||
|
||||
# Create cache key using the actual class name
|
||||
cache_key = f"{node_id}:{class_type}"
|
||||
|
||||
# Check if this node type is relevant for metadata collection
|
||||
if class_type in NODE_EXTRACTORS:
|
||||
# Check if we have cached metadata for this node
|
||||
if cache_key in self.node_cache:
|
||||
cached_data = self.node_cache[cache_key]
|
||||
|
||||
# Apply cached metadata to the current metadata
|
||||
for category in self.metadata_categories:
|
||||
if category in cached_data and node_id in cached_data[category]:
|
||||
if node_id not in metadata[category]:
|
||||
metadata[category][node_id] = cached_data[category][node_id]
|
||||
|
||||
def record_node_execution(self, node_id, class_type, inputs, outputs):
|
||||
"""Record information about a node's execution"""
|
||||
if not self.current_prompt_id:
|
||||
return
|
||||
|
||||
# Add to execution order and mark as executed
|
||||
if node_id not in self.executed_nodes:
|
||||
self.executed_nodes.add(node_id)
|
||||
self.prompt_metadata[self.current_prompt_id]["execution_order"].append(node_id)
|
||||
|
||||
# Process inputs to simplify working with them
|
||||
processed_inputs = {}
|
||||
for input_name, input_values in inputs.items():
|
||||
if isinstance(input_values, list) and len(input_values) > 0:
|
||||
# For single values, just use the first one (most common case)
|
||||
processed_inputs[input_name] = input_values[0]
|
||||
else:
|
||||
processed_inputs[input_name] = input_values
|
||||
|
||||
# Extract node-specific metadata
|
||||
extractor = NODE_EXTRACTORS.get(class_type, GenericNodeExtractor)
|
||||
extractor.extract(
|
||||
node_id,
|
||||
processed_inputs,
|
||||
outputs,
|
||||
self.prompt_metadata[self.current_prompt_id]
|
||||
)
|
||||
|
||||
# Cache this node's metadata
|
||||
self._cache_node_metadata(node_id, class_type)
|
||||
|
||||
def update_node_execution(self, node_id, class_type, outputs):
|
||||
"""Update node metadata with output information"""
|
||||
if not self.current_prompt_id:
|
||||
return
|
||||
|
||||
# Process outputs to make them more usable
|
||||
processed_outputs = outputs
|
||||
|
||||
# Use the same extractor to update with outputs
|
||||
extractor = NODE_EXTRACTORS.get(class_type, GenericNodeExtractor)
|
||||
if hasattr(extractor, 'update'):
|
||||
extractor.update(
|
||||
node_id,
|
||||
processed_outputs,
|
||||
self.prompt_metadata[self.current_prompt_id]
|
||||
)
|
||||
|
||||
# Update the cached metadata for this node
|
||||
self._cache_node_metadata(node_id, class_type)
|
||||
|
||||
def _cache_node_metadata(self, node_id, class_type):
|
||||
"""Cache the metadata for a specific node"""
|
||||
if not self.current_prompt_id or not node_id or not class_type:
|
||||
return
|
||||
|
||||
# Create a cache key combining node_id and class_type
|
||||
cache_key = f"{node_id}:{class_type}"
|
||||
|
||||
# Create a shallow copy of the node's metadata
|
||||
node_metadata = {}
|
||||
current_metadata = self.prompt_metadata[self.current_prompt_id]
|
||||
|
||||
for category in self.metadata_categories:
|
||||
if category in current_metadata and node_id in current_metadata[category]:
|
||||
if category not in node_metadata:
|
||||
node_metadata[category] = {}
|
||||
node_metadata[category][node_id] = current_metadata[category][node_id]
|
||||
|
||||
# Save to cache if we have any metadata for this node
|
||||
if any(node_metadata.values()):
|
||||
self.node_cache[cache_key] = node_metadata
|
||||
|
||||
def clear_unused_cache(self):
|
||||
"""Clean up node_cache entries that are no longer in use"""
|
||||
# Collect all node_ids currently in prompt_metadata
|
||||
active_node_ids = set()
|
||||
for prompt_data in self.prompt_metadata.values():
|
||||
for category in self.metadata_categories:
|
||||
if category in prompt_data:
|
||||
active_node_ids.update(prompt_data[category].keys())
|
||||
|
||||
# Find cache keys that are no longer needed
|
||||
keys_to_remove = []
|
||||
for cache_key in self.node_cache:
|
||||
node_id = cache_key.split(':')[0]
|
||||
if node_id not in active_node_ids:
|
||||
keys_to_remove.append(cache_key)
|
||||
|
||||
# Remove cache entries that are no longer needed
|
||||
for key in keys_to_remove:
|
||||
del self.node_cache[key]
|
||||
|
||||
def clear_metadata(self, prompt_id=None):
|
||||
"""Clear metadata for a specific prompt or reset all data"""
|
||||
if prompt_id is not None:
|
||||
if prompt_id in self.prompt_metadata:
|
||||
del self.prompt_metadata[prompt_id]
|
||||
# Clean up cache after removing prompt
|
||||
self.clear_unused_cache()
|
||||
else:
|
||||
# Reset all data
|
||||
self._reset()
|
||||
|
||||
def get_first_decoded_image(self, prompt_id=None):
|
||||
"""Get the first decoded image result"""
|
||||
key = prompt_id if prompt_id is not None else self.current_prompt_id
|
||||
if key not in self.prompt_metadata:
|
||||
return None
|
||||
|
||||
metadata = self.prompt_metadata[key]
|
||||
if IMAGES in metadata and "first_decode" in metadata[IMAGES]:
|
||||
image_data = metadata[IMAGES]["first_decode"]["image"]
|
||||
|
||||
# If it's an image batch or tuple, handle various formats
|
||||
if isinstance(image_data, (list, tuple)) and len(image_data) > 0:
|
||||
# Return first element of list/tuple
|
||||
return image_data[0]
|
||||
|
||||
# If it's a tensor, return as is for processing in the route handler
|
||||
return image_data
|
||||
|
||||
# If no image is found in the current metadata, try to find it in the cache
|
||||
# This handles the case where VAEDecode was cached by ComfyUI and not executed
|
||||
prompt_obj = metadata.get("current_prompt")
|
||||
if prompt_obj and hasattr(prompt_obj, "original_prompt"):
|
||||
original_prompt = prompt_obj.original_prompt
|
||||
for node_id, node_data in original_prompt.items():
|
||||
class_type = node_data.get("class_type")
|
||||
if class_type and class_type in NODE_CLASS_MAPPINGS:
|
||||
class_obj = NODE_CLASS_MAPPINGS[class_type]
|
||||
class_name = class_obj.__name__
|
||||
# Check if this is a VAEDecode node
|
||||
if class_name == "VAEDecode":
|
||||
# Try to find this node in the cache
|
||||
cache_key = f"{node_id}:{class_name}"
|
||||
if cache_key in self.node_cache:
|
||||
cached_data = self.node_cache[cache_key]
|
||||
if IMAGES in cached_data and node_id in cached_data[IMAGES]:
|
||||
image_data = cached_data[IMAGES][node_id]["image"]
|
||||
# Handle different image formats
|
||||
if isinstance(image_data, (list, tuple)) and len(image_data) > 0:
|
||||
return image_data[0]
|
||||
return image_data
|
||||
|
||||
return None
|
||||
678
py/metadata_collector/node_extractors.py
Normal file
678
py/metadata_collector/node_extractors.py
Normal file
@@ -0,0 +1,678 @@
|
||||
import os
|
||||
|
||||
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES, IS_SAMPLER
|
||||
|
||||
|
||||
class NodeMetadataExtractor:
|
||||
"""Base class for node-specific metadata extraction"""
|
||||
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
"""Extract metadata from node inputs/outputs"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
"""Update metadata with node outputs after execution"""
|
||||
pass
|
||||
|
||||
class GenericNodeExtractor(NodeMetadataExtractor):
|
||||
"""Default extractor for nodes without specific handling"""
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
pass
|
||||
|
||||
class CheckpointLoaderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "ckpt_name" not in inputs:
|
||||
return
|
||||
|
||||
model_name = inputs.get("ckpt_name")
|
||||
if model_name:
|
||||
metadata[MODELS][node_id] = {
|
||||
"name": model_name,
|
||||
"type": "checkpoint",
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class TSCCheckpointLoaderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "ckpt_name" not in inputs:
|
||||
return
|
||||
|
||||
model_name = inputs.get("ckpt_name")
|
||||
if model_name:
|
||||
metadata[MODELS][node_id] = {
|
||||
"name": model_name,
|
||||
"type": "checkpoint",
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
# For loader node has lora_stack input, like Efficient Loader from Efficient Nodes
|
||||
active_loras = []
|
||||
|
||||
# Process lora_stack if available
|
||||
if "lora_stack" in inputs:
|
||||
lora_stack = inputs.get("lora_stack", [])
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
# Extract lora name from path (following the format in lora_loader.py)
|
||||
lora_name = os.path.splitext(os.path.basename(lora_path))[0]
|
||||
active_loras.append({
|
||||
"name": lora_name,
|
||||
"strength": model_strength
|
||||
})
|
||||
|
||||
if active_loras:
|
||||
metadata[LORAS][node_id] = {
|
||||
"lora_list": active_loras,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
# Extract positive and negative prompt text if available
|
||||
positive_text = inputs.get("positive", "")
|
||||
negative_text = inputs.get("negative", "")
|
||||
|
||||
if positive_text or negative_text:
|
||||
if node_id not in metadata[PROMPTS]:
|
||||
metadata[PROMPTS][node_id] = {"node_id": node_id}
|
||||
|
||||
# Store both positive and negative text
|
||||
metadata[PROMPTS][node_id]["positive_text"] = positive_text
|
||||
metadata[PROMPTS][node_id]["negative_text"] = negative_text
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
# Handle conditioning outputs from TSC_EfficientLoader
|
||||
# outputs is a list with [(model, positive_encoded, negative_encoded, {"samples":latent}, vae, clip, dependencies,)]
|
||||
if outputs and isinstance(outputs, list) and len(outputs) > 0:
|
||||
first_output = outputs[0]
|
||||
if isinstance(first_output, tuple) and len(first_output) >= 3:
|
||||
positive_conditioning = first_output[1]
|
||||
negative_conditioning = first_output[2]
|
||||
|
||||
# Save both conditioning objects in metadata
|
||||
if node_id not in metadata[PROMPTS]:
|
||||
metadata[PROMPTS][node_id] = {"node_id": node_id}
|
||||
|
||||
metadata[PROMPTS][node_id]["positive_encoded"] = positive_conditioning
|
||||
metadata[PROMPTS][node_id]["negative_encoded"] = negative_conditioning
|
||||
|
||||
class CLIPTextEncodeExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "text" not in inputs:
|
||||
return
|
||||
|
||||
text = inputs.get("text", "")
|
||||
metadata[PROMPTS][node_id] = {
|
||||
"text": text,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
if outputs and isinstance(outputs, list) and len(outputs) > 0:
|
||||
if isinstance(outputs[0], tuple) and len(outputs[0]) > 0:
|
||||
conditioning = outputs[0][0]
|
||||
metadata[PROMPTS][node_id]["conditioning"] = conditioning
|
||||
|
||||
# Base Sampler Extractor to reduce code redundancy
|
||||
class BaseSamplerExtractor(NodeMetadataExtractor):
|
||||
"""Base extractor for sampler nodes with common functionality"""
|
||||
@staticmethod
|
||||
def extract_sampling_params(node_id, inputs, metadata, param_keys):
|
||||
"""Extract sampling parameters from inputs"""
|
||||
sampling_params = {}
|
||||
for key in param_keys:
|
||||
if key in inputs:
|
||||
sampling_params[key] = inputs[key]
|
||||
|
||||
metadata[SAMPLING][node_id] = {
|
||||
"parameters": sampling_params,
|
||||
"node_id": node_id,
|
||||
IS_SAMPLER: True # Add sampler flag
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def extract_conditioning(node_id, inputs, metadata):
|
||||
"""Extract conditioning objects from inputs"""
|
||||
# Store the conditioning objects directly in metadata for later matching
|
||||
pos_conditioning = inputs.get("positive", None)
|
||||
neg_conditioning = inputs.get("negative", None)
|
||||
|
||||
# Save conditioning objects in metadata for later matching
|
||||
if pos_conditioning is not None or neg_conditioning is not None:
|
||||
if node_id not in metadata[PROMPTS]:
|
||||
metadata[PROMPTS][node_id] = {"node_id": node_id}
|
||||
|
||||
metadata[PROMPTS][node_id]["pos_conditioning"] = pos_conditioning
|
||||
metadata[PROMPTS][node_id]["neg_conditioning"] = neg_conditioning
|
||||
|
||||
@staticmethod
|
||||
def extract_latent_dimensions(node_id, inputs, metadata):
|
||||
"""Extract dimensions from latent image"""
|
||||
# Extract latent image dimensions if available
|
||||
if "latent_image" in inputs and inputs["latent_image"] is not None:
|
||||
latent = inputs["latent_image"]
|
||||
if isinstance(latent, dict) and "samples" in latent:
|
||||
# Extract dimensions from latent tensor
|
||||
samples = latent["samples"]
|
||||
if hasattr(samples, "shape") and len(samples.shape) >= 3:
|
||||
# Correct shape interpretation: [batch_size, channels, height/8, width/8]
|
||||
# Multiply by 8 to get actual pixel dimensions
|
||||
height = int(samples.shape[2] * 8)
|
||||
width = int(samples.shape[3] * 8)
|
||||
|
||||
if SIZE not in metadata:
|
||||
metadata[SIZE] = {}
|
||||
|
||||
metadata[SIZE][node_id] = {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class SamplerExtractor(BaseSamplerExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
# Extract common sampling parameters
|
||||
BaseSamplerExtractor.extract_sampling_params(
|
||||
node_id, inputs, metadata,
|
||||
["seed", "steps", "cfg", "sampler_name", "scheduler", "denoise"]
|
||||
)
|
||||
|
||||
# Extract conditioning objects
|
||||
BaseSamplerExtractor.extract_conditioning(node_id, inputs, metadata)
|
||||
|
||||
# Extract latent dimensions
|
||||
BaseSamplerExtractor.extract_latent_dimensions(node_id, inputs, metadata)
|
||||
|
||||
class KSamplerAdvancedExtractor(BaseSamplerExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
# Extract common sampling parameters
|
||||
BaseSamplerExtractor.extract_sampling_params(
|
||||
node_id, inputs, metadata,
|
||||
["noise_seed", "steps", "cfg", "sampler_name", "scheduler", "add_noise"]
|
||||
)
|
||||
|
||||
# Extract conditioning objects
|
||||
BaseSamplerExtractor.extract_conditioning(node_id, inputs, metadata)
|
||||
|
||||
# Extract latent dimensions
|
||||
BaseSamplerExtractor.extract_latent_dimensions(node_id, inputs, metadata)
|
||||
|
||||
class KSamplerBasicPipeExtractor(BaseSamplerExtractor):
|
||||
"""Extractor for KSamplerBasicPipe and KSampler_inspire_pipe nodes"""
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
# Extract common sampling parameters
|
||||
BaseSamplerExtractor.extract_sampling_params(
|
||||
node_id, inputs, metadata,
|
||||
["seed", "steps", "cfg", "sampler_name", "scheduler", "denoise"]
|
||||
)
|
||||
|
||||
# Extract conditioning objects from basic_pipe
|
||||
if "basic_pipe" in inputs and inputs["basic_pipe"] is not None:
|
||||
basic_pipe = inputs["basic_pipe"]
|
||||
# Typically, basic_pipe structure is (model, clip, vae, positive, negative)
|
||||
if isinstance(basic_pipe, tuple) and len(basic_pipe) >= 5:
|
||||
pos_conditioning = basic_pipe[3] # positive is at index 3
|
||||
neg_conditioning = basic_pipe[4] # negative is at index 4
|
||||
|
||||
# Save conditioning objects in metadata
|
||||
if node_id not in metadata[PROMPTS]:
|
||||
metadata[PROMPTS][node_id] = {"node_id": node_id}
|
||||
|
||||
metadata[PROMPTS][node_id]["pos_conditioning"] = pos_conditioning
|
||||
metadata[PROMPTS][node_id]["neg_conditioning"] = neg_conditioning
|
||||
|
||||
# Extract latent dimensions
|
||||
BaseSamplerExtractor.extract_latent_dimensions(node_id, inputs, metadata)
|
||||
|
||||
class KSamplerAdvancedBasicPipeExtractor(BaseSamplerExtractor):
|
||||
"""Extractor for KSamplerAdvancedBasicPipe nodes"""
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
# Extract common sampling parameters
|
||||
BaseSamplerExtractor.extract_sampling_params(
|
||||
node_id, inputs, metadata,
|
||||
["noise_seed", "steps", "cfg", "sampler_name", "scheduler", "add_noise"]
|
||||
)
|
||||
|
||||
# Extract conditioning objects from basic_pipe
|
||||
if "basic_pipe" in inputs and inputs["basic_pipe"] is not None:
|
||||
basic_pipe = inputs["basic_pipe"]
|
||||
# Typically, basic_pipe structure is (model, clip, vae, positive, negative)
|
||||
if isinstance(basic_pipe, tuple) and len(basic_pipe) >= 5:
|
||||
pos_conditioning = basic_pipe[3] # positive is at index 3
|
||||
neg_conditioning = basic_pipe[4] # negative is at index 4
|
||||
|
||||
# Save conditioning objects in metadata
|
||||
if node_id not in metadata[PROMPTS]:
|
||||
metadata[PROMPTS][node_id] = {"node_id": node_id}
|
||||
|
||||
metadata[PROMPTS][node_id]["pos_conditioning"] = pos_conditioning
|
||||
metadata[PROMPTS][node_id]["neg_conditioning"] = neg_conditioning
|
||||
|
||||
# Extract latent dimensions
|
||||
BaseSamplerExtractor.extract_latent_dimensions(node_id, inputs, metadata)
|
||||
|
||||
class TSCSamplerBaseExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
# Store vae_decode setting for later use in update
|
||||
if inputs and "vae_decode" in inputs:
|
||||
if SAMPLING not in metadata:
|
||||
metadata[SAMPLING] = {}
|
||||
|
||||
if node_id not in metadata[SAMPLING]:
|
||||
metadata[SAMPLING][node_id] = {"parameters": {}, "node_id": node_id}
|
||||
|
||||
# Store the vae_decode setting
|
||||
metadata[SAMPLING][node_id]["vae_decode"] = inputs["vae_decode"]
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
# Check if vae_decode was set to "true"
|
||||
should_save_image = True
|
||||
if SAMPLING in metadata and node_id in metadata[SAMPLING]:
|
||||
vae_decode = metadata[SAMPLING][node_id].get("vae_decode")
|
||||
if vae_decode is not None:
|
||||
should_save_image = (vae_decode == "true")
|
||||
|
||||
# Skip image saving if vae_decode isn't "true"
|
||||
if not should_save_image:
|
||||
return
|
||||
|
||||
# Ensure IMAGES category exists
|
||||
if IMAGES not in metadata:
|
||||
metadata[IMAGES] = {}
|
||||
|
||||
# Extract output_images from the TSC sampler format
|
||||
# outputs = [{"ui": {"images": preview_images}, "result": result}]
|
||||
# where result = (original_model, original_positive, original_negative, latent_list, optional_vae, output_images,)
|
||||
if outputs and isinstance(outputs, list) and len(outputs) > 0:
|
||||
# Get the first item in the list
|
||||
output_item = outputs[0]
|
||||
if isinstance(output_item, dict) and "result" in output_item:
|
||||
result = output_item["result"]
|
||||
if isinstance(result, tuple) and len(result) >= 6:
|
||||
# The output_images is the last element in the result tuple
|
||||
output_images = (result[5],)
|
||||
|
||||
# Save image data under node ID index to be captured by caching mechanism
|
||||
metadata[IMAGES][node_id] = {
|
||||
"node_id": node_id,
|
||||
"image": output_images
|
||||
}
|
||||
|
||||
# Only set first_decode if it hasn't been recorded yet
|
||||
if "first_decode" not in metadata[IMAGES]:
|
||||
metadata[IMAGES]["first_decode"] = metadata[IMAGES][node_id]
|
||||
|
||||
class TSCKSamplerExtractor(SamplerExtractor, TSCSamplerBaseExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
# Call parent extract methods
|
||||
SamplerExtractor.extract(node_id, inputs, outputs, metadata)
|
||||
TSCSamplerBaseExtractor.extract(node_id, inputs, outputs, metadata)
|
||||
|
||||
# Update method is inherited from TSCSamplerBaseExtractor
|
||||
|
||||
|
||||
class TSCKSamplerAdvancedExtractor(KSamplerAdvancedExtractor, TSCSamplerBaseExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
# Call parent extract methods
|
||||
KSamplerAdvancedExtractor.extract(node_id, inputs, outputs, metadata)
|
||||
TSCSamplerBaseExtractor.extract(node_id, inputs, outputs, metadata)
|
||||
|
||||
# Update method is inherited from TSCSamplerBaseExtractor
|
||||
|
||||
class LoraLoaderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "lora_name" not in inputs:
|
||||
return
|
||||
|
||||
lora_name = inputs.get("lora_name")
|
||||
# Extract base filename without extension from path
|
||||
lora_name = os.path.splitext(os.path.basename(lora_name))[0]
|
||||
strength_model = round(float(inputs.get("strength_model", 1.0)), 2)
|
||||
|
||||
# Use the standardized format with lora_list
|
||||
metadata[LORAS][node_id] = {
|
||||
"lora_list": [
|
||||
{
|
||||
"name": lora_name,
|
||||
"strength": strength_model
|
||||
}
|
||||
],
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class ImageSizeExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
width = inputs.get("width", 512)
|
||||
height = inputs.get("height", 512)
|
||||
|
||||
if SIZE not in metadata:
|
||||
metadata[SIZE] = {}
|
||||
|
||||
metadata[SIZE][node_id] = {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class LoraLoaderManagerExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
active_loras = []
|
||||
|
||||
# Process lora_stack if available
|
||||
if "lora_stack" in inputs:
|
||||
lora_stack = inputs.get("lora_stack", [])
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
# Extract lora name from path (following the format in lora_loader.py)
|
||||
lora_name = os.path.splitext(os.path.basename(lora_path))[0]
|
||||
active_loras.append({
|
||||
"name": lora_name,
|
||||
"strength": model_strength
|
||||
})
|
||||
|
||||
# Process loras from inputs
|
||||
if "loras" in inputs:
|
||||
loras_data = inputs.get("loras", [])
|
||||
|
||||
# Handle new format: {'loras': {'__value__': [...]}}
|
||||
if isinstance(loras_data, dict) and '__value__' in loras_data:
|
||||
loras_list = loras_data['__value__']
|
||||
# Handle old format: {'loras': [...]}
|
||||
elif isinstance(loras_data, list):
|
||||
loras_list = loras_data
|
||||
else:
|
||||
loras_list = []
|
||||
|
||||
# Filter for active loras
|
||||
for lora in loras_list:
|
||||
if isinstance(lora, dict) and lora.get("active", True) and not lora.get("_isDummy", False):
|
||||
active_loras.append({
|
||||
"name": lora.get("name", ""),
|
||||
"strength": float(lora.get("strength", 1.0))
|
||||
})
|
||||
|
||||
if active_loras:
|
||||
metadata[LORAS][node_id] = {
|
||||
"lora_list": active_loras,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class FluxGuidanceExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "guidance" not in inputs:
|
||||
return
|
||||
|
||||
guidance_value = inputs.get("guidance")
|
||||
|
||||
# Store the guidance value in SAMPLING category
|
||||
if node_id not in metadata[SAMPLING]:
|
||||
metadata[SAMPLING][node_id] = {"parameters": {}, "node_id": node_id}
|
||||
|
||||
metadata[SAMPLING][node_id]["parameters"]["guidance"] = guidance_value
|
||||
|
||||
class UNETLoaderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "unet_name" not in inputs:
|
||||
return
|
||||
|
||||
model_name = inputs.get("unet_name")
|
||||
if model_name:
|
||||
metadata[MODELS][node_id] = {
|
||||
"name": model_name,
|
||||
"type": "checkpoint",
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class VAEDecodeExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
# Ensure IMAGES category exists
|
||||
if IMAGES not in metadata:
|
||||
metadata[IMAGES] = {}
|
||||
|
||||
# Save image data under node ID index to be captured by caching mechanism
|
||||
metadata[IMAGES][node_id] = {
|
||||
"node_id": node_id,
|
||||
"image": outputs
|
||||
}
|
||||
|
||||
# Only set first_decode if it hasn't been recorded yet
|
||||
if "first_decode" not in metadata[IMAGES]:
|
||||
metadata[IMAGES]["first_decode"] = metadata[IMAGES][node_id]
|
||||
|
||||
class KSamplerSelectExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "sampler_name" not in inputs:
|
||||
return
|
||||
|
||||
sampling_params = {}
|
||||
if "sampler_name" in inputs:
|
||||
sampling_params["sampler_name"] = inputs["sampler_name"]
|
||||
|
||||
metadata[SAMPLING][node_id] = {
|
||||
"parameters": sampling_params,
|
||||
"node_id": node_id,
|
||||
IS_SAMPLER: False # Mark as non-primary sampler
|
||||
}
|
||||
|
||||
class BasicSchedulerExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
sampling_params = {}
|
||||
for key in ["scheduler", "steps", "denoise"]:
|
||||
if key in inputs:
|
||||
sampling_params[key] = inputs[key]
|
||||
|
||||
metadata[SAMPLING][node_id] = {
|
||||
"parameters": sampling_params,
|
||||
"node_id": node_id,
|
||||
IS_SAMPLER: False # Mark as non-primary sampler
|
||||
}
|
||||
|
||||
class SamplerCustomAdvancedExtractor(BaseSamplerExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
sampling_params = {}
|
||||
|
||||
# Handle noise.seed as seed
|
||||
if "noise" in inputs and inputs["noise"] is not None and hasattr(inputs["noise"], "seed"):
|
||||
noise = inputs["noise"]
|
||||
sampling_params["seed"] = noise.seed
|
||||
|
||||
metadata[SAMPLING][node_id] = {
|
||||
"parameters": sampling_params,
|
||||
"node_id": node_id,
|
||||
IS_SAMPLER: True # Add sampler flag
|
||||
}
|
||||
|
||||
# Extract latent dimensions
|
||||
BaseSamplerExtractor.extract_latent_dimensions(node_id, inputs, metadata)
|
||||
|
||||
import json
|
||||
|
||||
class CLIPTextEncodeFluxExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "clip_l" not in inputs or "t5xxl" not in inputs:
|
||||
return
|
||||
|
||||
clip_l_text = inputs.get("clip_l", "")
|
||||
t5xxl_text = inputs.get("t5xxl", "")
|
||||
|
||||
# If both are empty, use empty string
|
||||
if not clip_l_text and not t5xxl_text:
|
||||
combined_text = ""
|
||||
# If one is empty, use the non-empty one
|
||||
elif not clip_l_text:
|
||||
combined_text = t5xxl_text
|
||||
elif not t5xxl_text:
|
||||
combined_text = clip_l_text
|
||||
# If both have content, use JSON format
|
||||
else:
|
||||
combined_text = json.dumps({
|
||||
"T5": t5xxl_text,
|
||||
"CLIP-L": clip_l_text
|
||||
})
|
||||
|
||||
metadata[PROMPTS][node_id] = {
|
||||
"text": combined_text,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
# Extract guidance value if available
|
||||
if "guidance" in inputs:
|
||||
guidance_value = inputs.get("guidance")
|
||||
|
||||
# Store the guidance value in SAMPLING category
|
||||
if SAMPLING not in metadata:
|
||||
metadata[SAMPLING] = {}
|
||||
|
||||
if node_id not in metadata[SAMPLING]:
|
||||
metadata[SAMPLING][node_id] = {"parameters": {}, "node_id": node_id}
|
||||
|
||||
metadata[SAMPLING][node_id]["parameters"]["guidance"] = guidance_value
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
if outputs and isinstance(outputs, list) and len(outputs) > 0:
|
||||
if isinstance(outputs[0], tuple) and len(outputs[0]) > 0:
|
||||
conditioning = outputs[0][0]
|
||||
metadata[PROMPTS][node_id]["conditioning"] = conditioning
|
||||
|
||||
class CFGGuiderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "cfg" not in inputs:
|
||||
return
|
||||
|
||||
cfg_value = inputs.get("cfg")
|
||||
|
||||
# Store the cfg value in SAMPLING category
|
||||
if SAMPLING not in metadata:
|
||||
metadata[SAMPLING] = {}
|
||||
|
||||
if node_id not in metadata[SAMPLING]:
|
||||
metadata[SAMPLING][node_id] = {"parameters": {}, "node_id": node_id}
|
||||
|
||||
metadata[SAMPLING][node_id]["parameters"]["cfg"] = cfg_value
|
||||
|
||||
class CR_ApplyControlNetStackExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
# Save the original conditioning inputs
|
||||
base_positive = inputs.get("base_positive")
|
||||
base_negative = inputs.get("base_negative")
|
||||
|
||||
if base_positive is not None or base_negative is not None:
|
||||
if node_id not in metadata[PROMPTS]:
|
||||
metadata[PROMPTS][node_id] = {"node_id": node_id}
|
||||
|
||||
metadata[PROMPTS][node_id]["orig_pos_cond"] = base_positive
|
||||
metadata[PROMPTS][node_id]["orig_neg_cond"] = base_negative
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
# Extract transformed conditionings from outputs
|
||||
# outputs structure: [(base_positive, base_negative, show_help, )]
|
||||
if outputs and isinstance(outputs, list) and len(outputs) > 0:
|
||||
first_output = outputs[0]
|
||||
if isinstance(first_output, tuple) and len(first_output) >= 2:
|
||||
transformed_positive = first_output[0]
|
||||
transformed_negative = first_output[1]
|
||||
|
||||
# Save transformed conditioning objects in metadata
|
||||
if node_id not in metadata[PROMPTS]:
|
||||
metadata[PROMPTS][node_id] = {"node_id": node_id}
|
||||
|
||||
metadata[PROMPTS][node_id]["positive_encoded"] = transformed_positive
|
||||
metadata[PROMPTS][node_id]["negative_encoded"] = transformed_negative
|
||||
|
||||
# Registry of node-specific extractors
|
||||
# Keys are node class names
|
||||
NODE_EXTRACTORS = {
|
||||
# Sampling
|
||||
"KSampler": SamplerExtractor,
|
||||
"KSamplerAdvanced": KSamplerAdvancedExtractor,
|
||||
"SamplerCustomAdvanced": SamplerCustomAdvancedExtractor,
|
||||
"TSC_KSampler": TSCKSamplerExtractor, # Efficient Nodes
|
||||
"TSC_KSamplerAdvanced": TSCKSamplerAdvancedExtractor, # Efficient Nodes
|
||||
"KSamplerBasicPipe": KSamplerBasicPipeExtractor, # comfyui-impact-pack
|
||||
"KSamplerAdvancedBasicPipe": KSamplerAdvancedBasicPipeExtractor, # comfyui-impact-pack
|
||||
"KSampler_inspire_pipe": KSamplerBasicPipeExtractor, # comfyui-inspire-pack
|
||||
"KSamplerAdvanced_inspire_pipe": KSamplerAdvancedBasicPipeExtractor, # comfyui-inspire-pack
|
||||
# Sampling Selectors
|
||||
"KSamplerSelect": KSamplerSelectExtractor, # Add KSamplerSelect
|
||||
"BasicScheduler": BasicSchedulerExtractor, # Add BasicScheduler
|
||||
# Loaders
|
||||
"CheckpointLoaderSimple": CheckpointLoaderExtractor,
|
||||
"comfyLoader": CheckpointLoaderExtractor, # easy comfyLoader
|
||||
"TSC_EfficientLoader": TSCCheckpointLoaderExtractor, # Efficient Nodes
|
||||
"UNETLoader": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
"UnetLoaderGGUF": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
"LoraLoader": LoraLoaderExtractor,
|
||||
"LoraManagerLoader": LoraLoaderManagerExtractor,
|
||||
# Conditioning
|
||||
"CLIPTextEncode": CLIPTextEncodeExtractor,
|
||||
"CLIPTextEncodeFlux": CLIPTextEncodeFluxExtractor, # Add CLIPTextEncodeFlux
|
||||
"WAS_Text_to_Conditioning": CLIPTextEncodeExtractor,
|
||||
"AdvancedCLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb
|
||||
"smZ_CLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/shiimizu/ComfyUI_smZNodes
|
||||
"CR_ApplyControlNetStack": CR_ApplyControlNetStackExtractor, # Add CR_ApplyControlNetStack
|
||||
# Latent
|
||||
"EmptyLatentImage": ImageSizeExtractor,
|
||||
# Flux
|
||||
"FluxGuidance": FluxGuidanceExtractor, # Add FluxGuidance
|
||||
"CFGGuider": CFGGuiderExtractor, # Add CFGGuider
|
||||
# Image
|
||||
"VAEDecode": VAEDecodeExtractor, # Added VAEDecode extractor
|
||||
# Add other nodes as needed
|
||||
}
|
||||
45
py/nodes/debug_metadata.py
Normal file
45
py/nodes/debug_metadata.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import logging
|
||||
from server import PromptServer # type: ignore
|
||||
from ..metadata_collector.metadata_processor import MetadataProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DebugMetadata:
|
||||
NAME = "Debug Metadata (LoraManager)"
|
||||
CATEGORY = "Lora Manager/utils"
|
||||
DESCRIPTION = "Debug node to verify metadata_processor functionality"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"images": ("IMAGE",),
|
||||
},
|
||||
"hidden": {
|
||||
"id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ()
|
||||
FUNCTION = "process_metadata"
|
||||
|
||||
def process_metadata(self, images, id):
|
||||
try:
|
||||
# Get the current execution context's metadata
|
||||
from ..metadata_collector import get_metadata
|
||||
metadata = get_metadata()
|
||||
|
||||
# Use the MetadataProcessor to convert it to JSON string
|
||||
metadata_json = MetadataProcessor.to_json(metadata, id)
|
||||
|
||||
# Send metadata to frontend for display
|
||||
PromptServer.instance.send_sync("metadata_update", {
|
||||
"id": id,
|
||||
"metadata": metadata_json
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing metadata: {e}")
|
||||
|
||||
return ()
|
||||
@@ -1,18 +1,15 @@
|
||||
import logging
|
||||
from nodes import LoraLoader
|
||||
from comfy.comfy_types import IO # type: ignore
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from ..config import config
|
||||
import asyncio
|
||||
import os
|
||||
from .utils import FlexibleOptionalInputType, any_type
|
||||
from ..utils.utils import get_lora_info
|
||||
from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list, nunchaku_load_lora
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraManagerLoader:
|
||||
NAME = "Lora Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
@@ -32,48 +29,6 @@ class LoraManagerLoader:
|
||||
RETURN_TYPES = ("MODEL", "CLIP", IO.STRING, IO.STRING)
|
||||
RETURN_NAMES = ("MODEL", "CLIP", "trigger_words", "loaded_loras")
|
||||
FUNCTION = "load_loras"
|
||||
|
||||
async def get_lora_info(self, lora_name):
|
||||
"""Get the lora path and trigger words from cache"""
|
||||
scanner = await LoraScanner.get_instance()
|
||||
cache = await scanner.get_cached_data()
|
||||
|
||||
for item in cache.raw_data:
|
||||
if item.get('file_name') == lora_name:
|
||||
file_path = item.get('file_path')
|
||||
if file_path:
|
||||
for root in config.loras_roots:
|
||||
root = root.replace(os.sep, '/')
|
||||
if file_path.startswith(root):
|
||||
relative_path = os.path.relpath(file_path, root).replace(os.sep, '/')
|
||||
# Get trigger words from civitai metadata
|
||||
civitai = item.get('civitai', {})
|
||||
trigger_words = civitai.get('trainedWords', []) if civitai else []
|
||||
return relative_path, trigger_words
|
||||
return lora_name, [] # Fallback if not found
|
||||
|
||||
def extract_lora_name(self, lora_path):
|
||||
"""Extract the lora name from a lora path (e.g., 'IL\\aorunIllstrious.safetensors' -> 'aorunIllstrious')"""
|
||||
# Get the basename without extension
|
||||
basename = os.path.basename(lora_path)
|
||||
return os.path.splitext(basename)[0]
|
||||
|
||||
def _get_loras_list(self, kwargs):
|
||||
"""Helper to extract loras list from either old or new kwargs format"""
|
||||
if 'loras' not in kwargs:
|
||||
return []
|
||||
|
||||
loras_data = kwargs['loras']
|
||||
# Handle new format: {'loras': {'__value__': [...]}}
|
||||
if isinstance(loras_data, dict) and '__value__' in loras_data:
|
||||
return loras_data['__value__']
|
||||
# Handle old format: {'loras': [...]}
|
||||
elif isinstance(loras_data, list):
|
||||
return loras_data
|
||||
# Unexpected format
|
||||
else:
|
||||
logger.warning(f"Unexpected loras format: {type(loras_data)}")
|
||||
return []
|
||||
|
||||
def load_loras(self, model, text, **kwargs):
|
||||
"""Loads multiple LoRAs based on the kwargs input and lora_stack."""
|
||||
@@ -82,34 +37,71 @@ class LoraManagerLoader:
|
||||
|
||||
clip = kwargs.get('clip', None)
|
||||
lora_stack = kwargs.get('lora_stack', None)
|
||||
|
||||
# Check if model is a Nunchaku Flux model - simplified approach
|
||||
is_nunchaku_model = False
|
||||
|
||||
try:
|
||||
model_wrapper = model.model.diffusion_model
|
||||
# Check if model is a Nunchaku Flux model using only class name
|
||||
if model_wrapper.__class__.__name__ == "ComfyFluxWrapper":
|
||||
is_nunchaku_model = True
|
||||
logger.info("Detected Nunchaku Flux model")
|
||||
except (AttributeError, TypeError):
|
||||
# Not a model with the expected structure
|
||||
pass
|
||||
|
||||
# First process lora_stack if available
|
||||
if lora_stack:
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
# Apply the LoRA using the provided path and strengths
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# Use our custom function for Flux models
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged for Nunchaku models
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
|
||||
# Extract lora name for trigger words lookup
|
||||
lora_name = self.extract_lora_name(lora_path)
|
||||
_, trigger_words = asyncio.run(self.get_lora_info(lora_name))
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
_, trigger_words = get_lora_info(lora_name)
|
||||
|
||||
all_trigger_words.extend(trigger_words)
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
# Add clip strength to output if different from model strength (except for Nunchaku models)
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Then process loras from kwargs with support for both old and new formats
|
||||
loras_list = self._get_loras_list(kwargs)
|
||||
loras_list = get_loras_list(kwargs)
|
||||
for lora in loras_list:
|
||||
if not lora.get('active', False):
|
||||
continue
|
||||
|
||||
lora_name = lora['name']
|
||||
strength = float(lora['strength'])
|
||||
model_strength = float(lora['strength'])
|
||||
# Get clip strength - use model strength as default if not specified
|
||||
clip_strength = float(lora.get('clipStrength', model_strength))
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = asyncio.run(self.get_lora_info(lora_name))
|
||||
lora_path, trigger_words = get_lora_info(lora_name)
|
||||
|
||||
# Apply the LoRA using the resolved path
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, strength, strength)
|
||||
loaded_loras.append(f"{lora_name}: {strength}")
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# For Nunchaku models, use our custom function
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
|
||||
# Include clip strength in output if different from model strength and not a Nunchaku model
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Add trigger words to collection
|
||||
all_trigger_words.extend(trigger_words)
|
||||
@@ -117,8 +109,23 @@ class LoraManagerLoader:
|
||||
# use ',, ' to separate trigger words for group mode
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
# Format loaded_loras as <lora:lora_name:strength> separated by spaces
|
||||
formatted_loras = " ".join([f"<lora:{name.split(':')[0].strip()}:{str(strength).strip()}>"
|
||||
for name, strength in [item.split(':') for item in loaded_loras]])
|
||||
# Format loaded_loras with support for both formats
|
||||
formatted_loras = []
|
||||
for item in loaded_loras:
|
||||
parts = item.split(":")
|
||||
lora_name = parts[0].strip()
|
||||
strength_parts = parts[1].strip().split(",")
|
||||
|
||||
if len(strength_parts) > 1:
|
||||
# Different model and clip strengths
|
||||
model_str = strength_parts[0].strip()
|
||||
clip_str = strength_parts[1].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}:{clip_str}>")
|
||||
else:
|
||||
# Same strength for both
|
||||
model_str = strength_parts[0].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}>")
|
||||
|
||||
formatted_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (model, clip, trigger_words_text, formatted_loras)
|
||||
return (model, clip, trigger_words_text, formatted_loras_text)
|
||||
@@ -1,9 +1,8 @@
|
||||
from comfy.comfy_types import IO # type: ignore
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from ..config import config
|
||||
import asyncio
|
||||
import os
|
||||
from .utils import FlexibleOptionalInputType, any_type
|
||||
from ..utils.utils import get_lora_info
|
||||
from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -29,48 +28,6 @@ class LoraStacker:
|
||||
RETURN_TYPES = ("LORA_STACK", IO.STRING, IO.STRING)
|
||||
RETURN_NAMES = ("LORA_STACK", "trigger_words", "active_loras")
|
||||
FUNCTION = "stack_loras"
|
||||
|
||||
async def get_lora_info(self, lora_name):
|
||||
"""Get the lora path and trigger words from cache"""
|
||||
scanner = await LoraScanner.get_instance()
|
||||
cache = await scanner.get_cached_data()
|
||||
|
||||
for item in cache.raw_data:
|
||||
if item.get('file_name') == lora_name:
|
||||
file_path = item.get('file_path')
|
||||
if file_path:
|
||||
for root in config.loras_roots:
|
||||
root = root.replace(os.sep, '/')
|
||||
if file_path.startswith(root):
|
||||
relative_path = os.path.relpath(file_path, root).replace(os.sep, '/')
|
||||
# Get trigger words from civitai metadata
|
||||
civitai = item.get('civitai', {})
|
||||
trigger_words = civitai.get('trainedWords', []) if civitai else []
|
||||
return relative_path, trigger_words
|
||||
return lora_name, [] # Fallback if not found
|
||||
|
||||
def extract_lora_name(self, lora_path):
|
||||
"""Extract the lora name from a lora path (e.g., 'IL\\aorunIllstrious.safetensors' -> 'aorunIllstrious')"""
|
||||
# Get the basename without extension
|
||||
basename = os.path.basename(lora_path)
|
||||
return os.path.splitext(basename)[0]
|
||||
|
||||
def _get_loras_list(self, kwargs):
|
||||
"""Helper to extract loras list from either old or new kwargs format"""
|
||||
if 'loras' not in kwargs:
|
||||
return []
|
||||
|
||||
loras_data = kwargs['loras']
|
||||
# Handle new format: {'loras': {'__value__': [...]}}
|
||||
if isinstance(loras_data, dict) and '__value__' in loras_data:
|
||||
return loras_data['__value__']
|
||||
# Handle old format: {'loras': [...]}
|
||||
elif isinstance(loras_data, list):
|
||||
return loras_data
|
||||
# Unexpected format
|
||||
else:
|
||||
logger.warning(f"Unexpected loras format: {type(loras_data)}")
|
||||
return []
|
||||
|
||||
def stack_loras(self, text, **kwargs):
|
||||
"""Stacks multiple LoRAs based on the kwargs input without loading them."""
|
||||
@@ -80,39 +37,49 @@ class LoraStacker:
|
||||
|
||||
# Process existing lora_stack if available
|
||||
lora_stack = kwargs.get('lora_stack', None)
|
||||
if lora_stack:
|
||||
if (lora_stack):
|
||||
stack.extend(lora_stack)
|
||||
# Get trigger words from existing stack entries
|
||||
for lora_path, _, _ in lora_stack:
|
||||
lora_name = self.extract_lora_name(lora_path)
|
||||
_, trigger_words = asyncio.run(self.get_lora_info(lora_name))
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
_, trigger_words = get_lora_info(lora_name)
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
# Process loras from kwargs with support for both old and new formats
|
||||
loras_list = self._get_loras_list(kwargs)
|
||||
loras_list = get_loras_list(kwargs)
|
||||
for lora in loras_list:
|
||||
if not lora.get('active', False):
|
||||
continue
|
||||
|
||||
lora_name = lora['name']
|
||||
model_strength = float(lora['strength'])
|
||||
clip_strength = model_strength # Using same strength for both as in the original loader
|
||||
# Get clip strength - use model strength as default if not specified
|
||||
clip_strength = float(lora.get('clipStrength', model_strength))
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = asyncio.run(self.get_lora_info(lora_name))
|
||||
lora_path, trigger_words = get_lora_info(lora_name)
|
||||
|
||||
# Add to stack without loading
|
||||
# replace '/' with os.sep to avoid different OS path format
|
||||
stack.append((lora_path.replace('/', os.sep), model_strength, clip_strength))
|
||||
active_loras.append((lora_name, model_strength))
|
||||
active_loras.append((lora_name, model_strength, clip_strength))
|
||||
|
||||
# Add trigger words to collection
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
# use ',, ' to separate trigger words for group mode
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
# Format active_loras as <lora:lora_name:strength> separated by spaces
|
||||
active_loras_text = " ".join([f"<lora:{name}:{str(strength).strip()}>"
|
||||
for name, strength in active_loras])
|
||||
|
||||
# Format active_loras with support for both formats
|
||||
formatted_loras = []
|
||||
for name, model_strength, clip_strength in active_loras:
|
||||
if abs(model_strength - clip_strength) > 0.001:
|
||||
# Different model and clip strengths
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}:{str(clip_strength).strip()}>")
|
||||
else:
|
||||
# Same strength for both
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}>")
|
||||
|
||||
active_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (stack, trigger_words_text, active_loras_text)
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import json
|
||||
import os
|
||||
import asyncio
|
||||
import re
|
||||
import numpy as np
|
||||
import folder_paths # type: ignore
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from ..workflow.parser import WorkflowParser
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..metadata_collector.metadata_processor import MetadataProcessor
|
||||
from ..metadata_collector import get_metadata
|
||||
from PIL import Image, PngImagePlugin
|
||||
import piexif
|
||||
from io import BytesIO
|
||||
|
||||
class SaveImage:
|
||||
NAME = "Save Image (LoraManager)"
|
||||
@@ -30,16 +29,36 @@ class SaveImage:
|
||||
return {
|
||||
"required": {
|
||||
"images": ("IMAGE",),
|
||||
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
|
||||
"file_format": (["png", "jpeg", "webp"],),
|
||||
"filename_prefix": ("STRING", {
|
||||
"default": "ComfyUI",
|
||||
"tooltip": "Base filename for saved images. Supports format patterns like %seed%, %width%, %height%, %model%, etc."
|
||||
}),
|
||||
"file_format": (["png", "jpeg", "webp"], {
|
||||
"tooltip": "Image format to save as. PNG preserves quality, JPEG is smaller, WebP balances size and quality."
|
||||
}),
|
||||
},
|
||||
"optional": {
|
||||
"lossless_webp": ("BOOLEAN", {"default": True}),
|
||||
"quality": ("INT", {"default": 100, "min": 1, "max": 100}),
|
||||
"embed_workflow": ("BOOLEAN", {"default": False}),
|
||||
"add_counter_to_filename": ("BOOLEAN", {"default": True}),
|
||||
"lossless_webp": ("BOOLEAN", {
|
||||
"default": False,
|
||||
"tooltip": "When enabled, saves WebP images with lossless compression. Results in larger files but no quality loss."
|
||||
}),
|
||||
"quality": ("INT", {
|
||||
"default": 100,
|
||||
"min": 1,
|
||||
"max": 100,
|
||||
"tooltip": "Compression quality for JPEG and lossy WebP formats (1-100). Higher values mean better quality but larger files."
|
||||
}),
|
||||
"embed_workflow": ("BOOLEAN", {
|
||||
"default": False,
|
||||
"tooltip": "Embeds the complete workflow data into the image metadata. Only works with PNG and WebP formats."
|
||||
}),
|
||||
"add_counter_to_filename": ("BOOLEAN", {
|
||||
"default": True,
|
||||
"tooltip": "Adds an incremental counter to filenames to prevent overwriting previous images."
|
||||
}),
|
||||
},
|
||||
"hidden": {
|
||||
"id": "UNIQUE_ID",
|
||||
"prompt": "PROMPT",
|
||||
"extra_pnginfo": "EXTRA_PNGINFO",
|
||||
},
|
||||
@@ -50,27 +69,51 @@ class SaveImage:
|
||||
FUNCTION = "process_image"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
async def get_lora_hash(self, lora_name):
|
||||
def get_lora_hash(self, lora_name):
|
||||
"""Get the lora hash from cache"""
|
||||
scanner = await LoraScanner.get_instance()
|
||||
cache = await scanner.get_cached_data()
|
||||
scanner = ServiceRegistry.get_service_sync("lora_scanner")
|
||||
|
||||
for item in cache.raw_data:
|
||||
if item.get('file_name') == lora_name:
|
||||
return item.get('sha256')
|
||||
# Use the new direct filename lookup method
|
||||
hash_value = scanner.get_hash_by_filename(lora_name)
|
||||
if hash_value:
|
||||
return hash_value
|
||||
|
||||
return None
|
||||
|
||||
async def format_metadata(self, parsed_workflow):
|
||||
def get_checkpoint_hash(self, checkpoint_path):
|
||||
"""Get the checkpoint hash from cache"""
|
||||
scanner = ServiceRegistry.get_service_sync("checkpoint_scanner")
|
||||
|
||||
if not checkpoint_path:
|
||||
return None
|
||||
|
||||
# Extract basename without extension
|
||||
checkpoint_name = os.path.basename(checkpoint_path)
|
||||
checkpoint_name = os.path.splitext(checkpoint_name)[0]
|
||||
|
||||
# Try direct filename lookup first
|
||||
hash_value = scanner.get_hash_by_filename(checkpoint_name)
|
||||
if hash_value:
|
||||
return hash_value
|
||||
|
||||
return None
|
||||
|
||||
def format_metadata(self, metadata_dict):
|
||||
"""Format metadata in the requested format similar to userComment example"""
|
||||
if not parsed_workflow:
|
||||
if not metadata_dict:
|
||||
return ""
|
||||
|
||||
# Helper function to only add parameter if value is not None
|
||||
def add_param_if_not_none(param_list, label, value):
|
||||
if value is not None:
|
||||
param_list.append(f"{label}: {value}")
|
||||
|
||||
# Extract the prompt and negative prompt
|
||||
prompt = parsed_workflow.get('prompt', '')
|
||||
negative_prompt = parsed_workflow.get('negative_prompt', '')
|
||||
prompt = metadata_dict.get('prompt', '')
|
||||
negative_prompt = metadata_dict.get('negative_prompt', '')
|
||||
|
||||
# Extract loras from the prompt if present
|
||||
loras_text = parsed_workflow.get('loras', '')
|
||||
loras_text = metadata_dict.get('loras', '')
|
||||
lora_hashes = {}
|
||||
|
||||
# If loras are found, add them on a new line after the prompt
|
||||
@@ -82,7 +125,7 @@ class SaveImage:
|
||||
|
||||
# Get hash for each lora
|
||||
for lora_name, strength in lora_matches:
|
||||
hash_value = await self.get_lora_hash(lora_name)
|
||||
hash_value = self.get_lora_hash(lora_name)
|
||||
if hash_value:
|
||||
lora_hashes[lora_name] = hash_value
|
||||
else:
|
||||
@@ -99,11 +142,15 @@ class SaveImage:
|
||||
params = []
|
||||
|
||||
# Add standard parameters in the correct order
|
||||
if 'steps' in parsed_workflow:
|
||||
params.append(f"Steps: {parsed_workflow.get('steps')}")
|
||||
if 'steps' in metadata_dict:
|
||||
add_param_if_not_none(params, "Steps", metadata_dict.get('steps'))
|
||||
|
||||
if 'sampler' in parsed_workflow:
|
||||
sampler = parsed_workflow.get('sampler')
|
||||
# Combine sampler and scheduler information
|
||||
sampler_name = None
|
||||
scheduler_name = None
|
||||
|
||||
if 'sampler' in metadata_dict:
|
||||
sampler = metadata_dict.get('sampler')
|
||||
# Convert ComfyUI sampler names to user-friendly names
|
||||
sampler_mapping = {
|
||||
'euler': 'Euler',
|
||||
@@ -123,10 +170,9 @@ class SaveImage:
|
||||
'ddim': 'DDIM'
|
||||
}
|
||||
sampler_name = sampler_mapping.get(sampler, sampler)
|
||||
params.append(f"Sampler: {sampler_name}")
|
||||
|
||||
if 'scheduler' in parsed_workflow:
|
||||
scheduler = parsed_workflow.get('scheduler')
|
||||
if 'scheduler' in metadata_dict:
|
||||
scheduler = metadata_dict.get('scheduler')
|
||||
scheduler_mapping = {
|
||||
'normal': 'Simple',
|
||||
'karras': 'Karras',
|
||||
@@ -135,35 +181,54 @@ class SaveImage:
|
||||
'sgm_quadratic': 'SGM Quadratic'
|
||||
}
|
||||
scheduler_name = scheduler_mapping.get(scheduler, scheduler)
|
||||
params.append(f"Schedule type: {scheduler_name}")
|
||||
|
||||
# CFG scale (cfg in parsed_workflow)
|
||||
if 'cfg_scale' in parsed_workflow:
|
||||
params.append(f"CFG scale: {parsed_workflow.get('cfg_scale')}")
|
||||
elif 'cfg' in parsed_workflow:
|
||||
params.append(f"CFG scale: {parsed_workflow.get('cfg')}")
|
||||
# Add combined sampler and scheduler information
|
||||
if sampler_name:
|
||||
if scheduler_name:
|
||||
params.append(f"Sampler: {sampler_name} {scheduler_name}")
|
||||
else:
|
||||
params.append(f"Sampler: {sampler_name}")
|
||||
|
||||
# CFG scale (Use guidance if available, otherwise fall back to cfg_scale or cfg)
|
||||
if 'guidance' in metadata_dict:
|
||||
add_param_if_not_none(params, "CFG scale", metadata_dict.get('guidance'))
|
||||
elif 'cfg_scale' in metadata_dict:
|
||||
add_param_if_not_none(params, "CFG scale", metadata_dict.get('cfg_scale'))
|
||||
elif 'cfg' in metadata_dict:
|
||||
add_param_if_not_none(params, "CFG scale", metadata_dict.get('cfg'))
|
||||
|
||||
# Seed
|
||||
if 'seed' in parsed_workflow:
|
||||
params.append(f"Seed: {parsed_workflow.get('seed')}")
|
||||
if 'seed' in metadata_dict:
|
||||
add_param_if_not_none(params, "Seed", metadata_dict.get('seed'))
|
||||
|
||||
# Size
|
||||
if 'size' in parsed_workflow:
|
||||
params.append(f"Size: {parsed_workflow.get('size')}")
|
||||
if 'size' in metadata_dict:
|
||||
add_param_if_not_none(params, "Size", metadata_dict.get('size'))
|
||||
|
||||
# Model info
|
||||
if 'checkpoint' in parsed_workflow:
|
||||
# Extract basename without path
|
||||
checkpoint = os.path.basename(parsed_workflow.get('checkpoint', ''))
|
||||
# Remove extension if present
|
||||
checkpoint = os.path.splitext(checkpoint)[0]
|
||||
params.append(f"Model: {checkpoint}")
|
||||
if 'checkpoint' in metadata_dict:
|
||||
# Ensure checkpoint is a string before processing
|
||||
checkpoint = metadata_dict.get('checkpoint')
|
||||
if checkpoint is not None:
|
||||
# Get model hash
|
||||
model_hash = self.get_checkpoint_hash(checkpoint)
|
||||
|
||||
# Extract basename without path
|
||||
checkpoint_name = os.path.basename(checkpoint)
|
||||
# Remove extension if present
|
||||
checkpoint_name = os.path.splitext(checkpoint_name)[0]
|
||||
|
||||
# Add model hash if available
|
||||
if model_hash:
|
||||
params.append(f"Model hash: {model_hash[:10]}, Model: {checkpoint_name}")
|
||||
else:
|
||||
params.append(f"Model: {checkpoint_name}")
|
||||
|
||||
# Add LoRA hashes if available
|
||||
if lora_hashes:
|
||||
lora_hash_parts = []
|
||||
for lora_name, hash_value in lora_hashes.items():
|
||||
lora_hash_parts.append(f"{lora_name}: {hash_value}")
|
||||
lora_hash_parts.append(f"{lora_name}: {hash_value[:10]}")
|
||||
|
||||
if lora_hash_parts:
|
||||
params.append(f"Lora hashes: \"{', '.join(lora_hash_parts)}\"")
|
||||
@@ -176,9 +241,9 @@ class SaveImage:
|
||||
|
||||
# credit to nkchocoai
|
||||
# Add format_filename method to handle pattern substitution
|
||||
def format_filename(self, filename, parsed_workflow):
|
||||
def format_filename(self, filename, metadata_dict):
|
||||
"""Format filename with metadata values"""
|
||||
if not parsed_workflow:
|
||||
if not metadata_dict:
|
||||
return filename
|
||||
|
||||
result = re.findall(self.pattern_format, filename)
|
||||
@@ -186,30 +251,30 @@ class SaveImage:
|
||||
parts = segment.replace("%", "").split(":")
|
||||
key = parts[0]
|
||||
|
||||
if key == "seed" and 'seed' in parsed_workflow:
|
||||
filename = filename.replace(segment, str(parsed_workflow.get('seed', '')))
|
||||
elif key == "width" and 'size' in parsed_workflow:
|
||||
size = parsed_workflow.get('size', 'x')
|
||||
if key == "seed" and 'seed' in metadata_dict:
|
||||
filename = filename.replace(segment, str(metadata_dict.get('seed', '')))
|
||||
elif key == "width" and 'size' in metadata_dict:
|
||||
size = metadata_dict.get('size', 'x')
|
||||
w = size.split('x')[0] if isinstance(size, str) else size[0]
|
||||
filename = filename.replace(segment, str(w))
|
||||
elif key == "height" and 'size' in parsed_workflow:
|
||||
size = parsed_workflow.get('size', 'x')
|
||||
elif key == "height" and 'size' in metadata_dict:
|
||||
size = metadata_dict.get('size', 'x')
|
||||
h = size.split('x')[1] if isinstance(size, str) else size[1]
|
||||
filename = filename.replace(segment, str(h))
|
||||
elif key == "pprompt" and 'prompt' in parsed_workflow:
|
||||
prompt = parsed_workflow.get('prompt', '').replace("\n", " ")
|
||||
elif key == "pprompt" and 'prompt' in metadata_dict:
|
||||
prompt = metadata_dict.get('prompt', '').replace("\n", " ")
|
||||
if len(parts) >= 2:
|
||||
length = int(parts[1])
|
||||
prompt = prompt[:length]
|
||||
filename = filename.replace(segment, prompt.strip())
|
||||
elif key == "nprompt" and 'negative_prompt' in parsed_workflow:
|
||||
prompt = parsed_workflow.get('negative_prompt', '').replace("\n", " ")
|
||||
elif key == "nprompt" and 'negative_prompt' in metadata_dict:
|
||||
prompt = metadata_dict.get('negative_prompt', '').replace("\n", " ")
|
||||
if len(parts) >= 2:
|
||||
length = int(parts[1])
|
||||
prompt = prompt[:length]
|
||||
filename = filename.replace(segment, prompt.strip())
|
||||
elif key == "model" and 'checkpoint' in parsed_workflow:
|
||||
model = parsed_workflow.get('checkpoint', '')
|
||||
elif key == "model" and 'checkpoint' in metadata_dict:
|
||||
model = metadata_dict.get('checkpoint', '')
|
||||
model = os.path.splitext(os.path.basename(model))[0]
|
||||
if len(parts) >= 2:
|
||||
length = int(parts[1])
|
||||
@@ -219,12 +284,13 @@ class SaveImage:
|
||||
from datetime import datetime
|
||||
now = datetime.now()
|
||||
date_table = {
|
||||
"yyyy": str(now.year),
|
||||
"MM": str(now.month).zfill(2),
|
||||
"dd": str(now.day).zfill(2),
|
||||
"hh": str(now.hour).zfill(2),
|
||||
"mm": str(now.minute).zfill(2),
|
||||
"ss": str(now.second).zfill(2),
|
||||
"yyyy": f"{now.year:04d}",
|
||||
"yy": f"{now.year % 100:02d}",
|
||||
"MM": f"{now.month:02d}",
|
||||
"dd": f"{now.day:02d}",
|
||||
"hh": f"{now.hour:02d}",
|
||||
"mm": f"{now.minute:02d}",
|
||||
"ss": f"{now.second:02d}",
|
||||
}
|
||||
if len(parts) >= 2:
|
||||
date_format = parts[1]
|
||||
@@ -239,58 +305,58 @@ class SaveImage:
|
||||
|
||||
return filename
|
||||
|
||||
def save_images(self, images, filename_prefix, file_format, prompt=None, extra_pnginfo=None,
|
||||
def save_images(self, images, filename_prefix, file_format, id, prompt=None, extra_pnginfo=None,
|
||||
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True):
|
||||
"""Save images with metadata"""
|
||||
results = []
|
||||
|
||||
# Parse the workflow using the WorkflowParser
|
||||
parser = WorkflowParser()
|
||||
if prompt:
|
||||
parsed_workflow = parser.parse_workflow(prompt)
|
||||
else:
|
||||
parsed_workflow = {}
|
||||
|
||||
# Get metadata using the metadata collector
|
||||
raw_metadata = get_metadata()
|
||||
metadata_dict = MetadataProcessor.to_dict(raw_metadata, id)
|
||||
|
||||
# Get or create metadata asynchronously
|
||||
metadata = asyncio.run(self.format_metadata(parsed_workflow))
|
||||
metadata = self.format_metadata(metadata_dict)
|
||||
|
||||
# Process filename_prefix with pattern substitution
|
||||
filename_prefix = self.format_filename(filename_prefix, parsed_workflow)
|
||||
filename_prefix = self.format_filename(filename_prefix, metadata_dict)
|
||||
|
||||
# Process each image
|
||||
# Get initial save path info once for the batch
|
||||
full_output_folder, filename, counter, subfolder, processed_prefix = folder_paths.get_save_image_path(
|
||||
filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]
|
||||
)
|
||||
|
||||
# Create directory if it doesn't exist
|
||||
if not os.path.exists(full_output_folder):
|
||||
os.makedirs(full_output_folder, exist_ok=True)
|
||||
|
||||
# Process each image with incrementing counter
|
||||
for i, image in enumerate(images):
|
||||
# Convert the tensor image to numpy array
|
||||
img = 255. * image.cpu().numpy()
|
||||
img = Image.fromarray(np.clip(img, 0, 255).astype(np.uint8))
|
||||
|
||||
# Create directory if filename_prefix contains path separators
|
||||
output_path = os.path.join(self.output_dir, filename_prefix)
|
||||
if not os.path.exists(os.path.dirname(output_path)):
|
||||
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||
|
||||
# Use folder_paths.get_save_image_path for better counter handling
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
|
||||
filename_prefix, self.output_dir, img.width, img.height
|
||||
)
|
||||
|
||||
# Generate filename with counter if needed
|
||||
base_filename = filename
|
||||
if add_counter_to_filename:
|
||||
filename += f"_{counter:05}"
|
||||
# Use counter + i to ensure unique filenames for all images in batch
|
||||
current_counter = counter + i
|
||||
base_filename += f"_{current_counter:05}_"
|
||||
|
||||
# Set file extension and prepare saving parameters
|
||||
if file_format == "png":
|
||||
file = filename + ".png"
|
||||
file = base_filename + ".png"
|
||||
file_extension = ".png"
|
||||
save_kwargs = {"optimize": True, "compress_level": self.compress_level}
|
||||
# Remove "optimize": True to match built-in node behavior
|
||||
save_kwargs = {"compress_level": self.compress_level}
|
||||
pnginfo = PngImagePlugin.PngInfo()
|
||||
elif file_format == "jpeg":
|
||||
file = filename + ".jpg"
|
||||
file = base_filename + ".jpg"
|
||||
file_extension = ".jpg"
|
||||
save_kwargs = {"quality": quality, "optimize": True}
|
||||
elif file_format == "webp":
|
||||
file = filename + ".webp"
|
||||
file = base_filename + ".webp"
|
||||
file_extension = ".webp"
|
||||
save_kwargs = {"quality": quality, "lossless": lossless_webp}
|
||||
# Add optimization param to control performance
|
||||
save_kwargs = {"quality": quality, "lossless": lossless_webp, "method": 0}
|
||||
|
||||
# Full save path
|
||||
file_path = os.path.join(full_output_folder, file)
|
||||
@@ -316,14 +382,23 @@ class SaveImage:
|
||||
print(f"Error adding EXIF data: {e}")
|
||||
img.save(file_path, format="JPEG", **save_kwargs)
|
||||
elif file_format == "webp":
|
||||
# For WebP, also use piexif for metadata
|
||||
if metadata:
|
||||
try:
|
||||
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
save_kwargs["exif"] = exif_bytes
|
||||
except Exception as e:
|
||||
print(f"Error adding EXIF data: {e}")
|
||||
try:
|
||||
# For WebP, use piexif for metadata
|
||||
exif_dict = {}
|
||||
|
||||
if metadata:
|
||||
exif_dict['Exif'] = {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}
|
||||
|
||||
# Add workflow if needed
|
||||
if embed_workflow and extra_pnginfo is not None:
|
||||
workflow_json = json.dumps(extra_pnginfo["workflow"])
|
||||
exif_dict['0th'] = {piexif.ImageIFD.ImageDescription: "Workflow:" + workflow_json}
|
||||
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
save_kwargs["exif"] = exif_bytes
|
||||
except Exception as e:
|
||||
print(f"Error adding EXIF data: {e}")
|
||||
|
||||
img.save(file_path, format="WEBP", **save_kwargs)
|
||||
|
||||
results.append({
|
||||
@@ -337,20 +412,24 @@ class SaveImage:
|
||||
|
||||
return results
|
||||
|
||||
def process_image(self, images, filename_prefix="ComfyUI", file_format="png", prompt=None, extra_pnginfo=None,
|
||||
def process_image(self, images, id, filename_prefix="ComfyUI", file_format="png", prompt=None, extra_pnginfo=None,
|
||||
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True):
|
||||
"""Process and save image with metadata"""
|
||||
# Make sure the output directory exists
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
|
||||
# Convert single image to list for consistent processing
|
||||
images = [images[0]] if len(images.shape) == 3 else [img for img in images]
|
||||
# Ensure images is always a list of images
|
||||
if len(images.shape) == 3: # Single image (height, width, channels)
|
||||
images = [images]
|
||||
else: # Multiple images (batch, height, width, channels)
|
||||
images = [img for img in images]
|
||||
|
||||
# Save all images
|
||||
results = self.save_images(
|
||||
images,
|
||||
filename_prefix,
|
||||
file_format,
|
||||
id,
|
||||
prompt,
|
||||
extra_pnginfo,
|
||||
lossless_webp,
|
||||
|
||||
@@ -16,11 +16,18 @@ class TriggerWordToggle:
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"group_mode": ("BOOLEAN", {"default": True}),
|
||||
"group_mode": ("BOOLEAN", {
|
||||
"default": True,
|
||||
"tooltip": "When enabled, treats each group of trigger words as a single toggleable unit."
|
||||
}),
|
||||
"default_active": ("BOOLEAN", {
|
||||
"default": True,
|
||||
"tooltip": "Sets the default initial state (active or inactive) when trigger words are added."
|
||||
}),
|
||||
},
|
||||
"optional": FlexibleOptionalInputType(any_type),
|
||||
"hidden": {
|
||||
"id": "UNIQUE_ID", # 会被 ComfyUI 自动替换为唯一ID
|
||||
"id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -41,17 +48,11 @@ class TriggerWordToggle:
|
||||
else:
|
||||
return data
|
||||
|
||||
def process_trigger_words(self, id, group_mode, **kwargs):
|
||||
def process_trigger_words(self, id, group_mode, default_active, **kwargs):
|
||||
# Handle both old and new formats for trigger_words
|
||||
trigger_words_data = self._get_toggle_data(kwargs, 'trigger_words')
|
||||
trigger_words_data = self._get_toggle_data(kwargs, 'orinalMessage')
|
||||
trigger_words = trigger_words_data if isinstance(trigger_words_data, str) else ""
|
||||
|
||||
# Send trigger words to frontend
|
||||
PromptServer.instance.send_sync("trigger_word_update", {
|
||||
"id": id,
|
||||
"message": trigger_words
|
||||
})
|
||||
|
||||
filtered_triggers = trigger_words
|
||||
|
||||
# Get toggle data with support for both formats
|
||||
|
||||
@@ -30,4 +30,101 @@ class FlexibleOptionalInputType(dict):
|
||||
return True
|
||||
|
||||
|
||||
any_type = AnyType("*")
|
||||
any_type = AnyType("*")
|
||||
|
||||
# Common methods extracted from lora_loader.py and lora_stacker.py
|
||||
import os
|
||||
import logging
|
||||
import copy
|
||||
import folder_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def extract_lora_name(lora_path):
|
||||
"""Extract the lora name from a lora path (e.g., 'IL\\aorunIllstrious.safetensors' -> 'aorunIllstrious')"""
|
||||
# Get the basename without extension
|
||||
basename = os.path.basename(lora_path)
|
||||
return os.path.splitext(basename)[0]
|
||||
|
||||
def get_loras_list(kwargs):
|
||||
"""Helper to extract loras list from either old or new kwargs format"""
|
||||
if 'loras' not in kwargs:
|
||||
return []
|
||||
|
||||
loras_data = kwargs['loras']
|
||||
# Handle new format: {'loras': {'__value__': [...]}}
|
||||
if isinstance(loras_data, dict) and '__value__' in loras_data:
|
||||
return loras_data['__value__']
|
||||
# Handle old format: {'loras': [...]}
|
||||
elif isinstance(loras_data, list):
|
||||
return loras_data
|
||||
# Unexpected format
|
||||
else:
|
||||
logger.warning(f"Unexpected loras format: {type(loras_data)}")
|
||||
return []
|
||||
|
||||
def load_state_dict_in_safetensors(path, device="cpu", filter_prefix=""):
|
||||
"""Simplified version of load_state_dict_in_safetensors that just loads from a local path"""
|
||||
import safetensors.torch
|
||||
|
||||
state_dict = {}
|
||||
with safetensors.torch.safe_open(path, framework="pt", device=device) as f:
|
||||
for k in f.keys():
|
||||
if filter_prefix and not k.startswith(filter_prefix):
|
||||
continue
|
||||
state_dict[k.removeprefix(filter_prefix)] = f.get_tensor(k)
|
||||
return state_dict
|
||||
|
||||
def to_diffusers(input_lora):
|
||||
"""Simplified version of to_diffusers for Flux LoRA conversion"""
|
||||
import torch
|
||||
from diffusers.utils.state_dict_utils import convert_unet_state_dict_to_peft
|
||||
from diffusers.loaders import FluxLoraLoaderMixin
|
||||
|
||||
if isinstance(input_lora, str):
|
||||
tensors = load_state_dict_in_safetensors(input_lora, device="cpu")
|
||||
else:
|
||||
tensors = {k: v for k, v in input_lora.items()}
|
||||
|
||||
# Convert FP8 tensors to BF16
|
||||
for k, v in tensors.items():
|
||||
if v.dtype not in [torch.float64, torch.float32, torch.bfloat16, torch.float16]:
|
||||
tensors[k] = v.to(torch.bfloat16)
|
||||
|
||||
new_tensors = FluxLoraLoaderMixin.lora_state_dict(tensors)
|
||||
new_tensors = convert_unet_state_dict_to_peft(new_tensors)
|
||||
|
||||
return new_tensors
|
||||
|
||||
def nunchaku_load_lora(model, lora_name, lora_strength):
|
||||
"""Load a Flux LoRA for Nunchaku model"""
|
||||
model_wrapper = model.model.diffusion_model
|
||||
transformer = model_wrapper.model
|
||||
|
||||
# Save the transformer temporarily
|
||||
model_wrapper.model = None
|
||||
ret_model = copy.deepcopy(model) # copy everything except the model
|
||||
ret_model_wrapper = ret_model.model.diffusion_model
|
||||
|
||||
# Restore the model and set it for the copy
|
||||
model_wrapper.model = transformer
|
||||
ret_model_wrapper.model = transformer
|
||||
|
||||
# Get full path to the LoRA file
|
||||
lora_path = folder_paths.get_full_path("loras", lora_name)
|
||||
ret_model_wrapper.loras.append((lora_path, lora_strength))
|
||||
|
||||
# Convert the LoRA to diffusers format
|
||||
sd = to_diffusers(lora_path)
|
||||
|
||||
# Handle embedding adjustment if needed
|
||||
if "transformer.x_embedder.lora_A.weight" in sd:
|
||||
new_in_channels = sd["transformer.x_embedder.lora_A.weight"].shape[1]
|
||||
assert new_in_channels % 4 == 0
|
||||
new_in_channels = new_in_channels // 4
|
||||
|
||||
old_in_channels = ret_model.model.model_config.unet_config["in_channels"]
|
||||
if old_in_channels < new_in_channels:
|
||||
ret_model.model.model_config.unet_config["in_channels"] = new_in_channels
|
||||
|
||||
return ret_model
|
||||
92
py/nodes/wanvideo_lora_select.py
Normal file
92
py/nodes/wanvideo_lora_select.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from comfy.comfy_types import IO # type: ignore
|
||||
import folder_paths # type: ignore
|
||||
from ..utils.utils import get_lora_info
|
||||
from .utils import FlexibleOptionalInputType, any_type, get_loras_list
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class WanVideoLoraSelect:
|
||||
NAME = "WanVideo Lora Select (LoraManager)"
|
||||
CATEGORY = "Lora Manager/stackers"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load the LORA model with less VRAM usage, slower loading"}),
|
||||
"text": (IO.STRING, {
|
||||
"multiline": True,
|
||||
"dynamicPrompts": True,
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
"placeholder": "LoRA syntax input: <lora:name:strength>"
|
||||
}),
|
||||
},
|
||||
"optional": FlexibleOptionalInputType(any_type),
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("WANVIDLORA", IO.STRING, IO.STRING)
|
||||
RETURN_NAMES = ("lora", "trigger_words", "active_loras")
|
||||
FUNCTION = "process_loras"
|
||||
|
||||
def process_loras(self, text, low_mem_load=False, **kwargs):
|
||||
loras_list = []
|
||||
all_trigger_words = []
|
||||
active_loras = []
|
||||
|
||||
# Process existing prev_lora if available
|
||||
prev_lora = kwargs.get('prev_lora', None)
|
||||
if prev_lora is not None:
|
||||
loras_list.extend(prev_lora)
|
||||
|
||||
# Get blocks if available
|
||||
blocks = kwargs.get('blocks', {})
|
||||
selected_blocks = blocks.get("selected_blocks", {})
|
||||
layer_filter = blocks.get("layer_filter", "")
|
||||
|
||||
# Process loras from kwargs with support for both old and new formats
|
||||
loras_from_widget = get_loras_list(kwargs)
|
||||
for lora in loras_from_widget:
|
||||
if not lora.get('active', False):
|
||||
continue
|
||||
|
||||
lora_name = lora['name']
|
||||
model_strength = float(lora['strength'])
|
||||
clip_strength = float(lora.get('clipStrength', model_strength))
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = get_lora_info(lora_name)
|
||||
|
||||
# Create lora item for WanVideo format
|
||||
lora_item = {
|
||||
"path": folder_paths.get_full_path("loras", lora_path),
|
||||
"strength": model_strength,
|
||||
"name": lora_path.split(".")[0],
|
||||
"blocks": selected_blocks,
|
||||
"layer_filter": layer_filter,
|
||||
"low_mem_load": low_mem_load,
|
||||
}
|
||||
|
||||
# Add to list and collect active loras
|
||||
loras_list.append(lora_item)
|
||||
active_loras.append((lora_name, model_strength, clip_strength))
|
||||
|
||||
# Add trigger words to collection
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
# Format trigger_words for output
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
# Format active_loras for output
|
||||
formatted_loras = []
|
||||
for name, model_strength, clip_strength in active_loras:
|
||||
if abs(model_strength - clip_strength) > 0.001:
|
||||
# Different model and clip strengths
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}:{str(clip_strength).strip()}>")
|
||||
else:
|
||||
# Same strength for both
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}>")
|
||||
|
||||
active_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (loras_list, trigger_words_text, active_loras_text)
|
||||
24
py/recipes/__init__.py
Normal file
24
py/recipes/__init__.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""Recipe metadata parser package for ComfyUI-Lora-Manager."""
|
||||
|
||||
from .base import RecipeMetadataParser
|
||||
from .factory import RecipeParserFactory
|
||||
from .constants import GEN_PARAM_KEYS, VALID_LORA_TYPES
|
||||
from .parsers import (
|
||||
RecipeFormatParser,
|
||||
ComfyMetadataParser,
|
||||
MetaFormatParser,
|
||||
AutomaticMetadataParser,
|
||||
CivitaiApiMetadataParser
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'RecipeMetadataParser',
|
||||
'RecipeParserFactory',
|
||||
'GEN_PARAM_KEYS',
|
||||
'VALID_LORA_TYPES',
|
||||
'RecipeFormatParser',
|
||||
'ComfyMetadataParser',
|
||||
'MetaFormatParser',
|
||||
'AutomaticMetadataParser',
|
||||
'CivitaiApiMetadataParser'
|
||||
]
|
||||
184
py/recipes/base.py
Normal file
184
py/recipes/base.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""Base classes for recipe parsers."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
from abc import ABC, abstractmethod
|
||||
from ..config import config
|
||||
from ..utils.constants import VALID_LORA_TYPES
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class RecipeMetadataParser(ABC):
|
||||
"""Interface for parsing recipe metadata from image user comments"""
|
||||
|
||||
METADATA_MARKER = None
|
||||
|
||||
@abstractmethod
|
||||
def is_metadata_matching(self, user_comment: str) -> bool:
|
||||
"""Check if the user comment matches the metadata format"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""
|
||||
Parse metadata from user comment and return structured recipe data
|
||||
|
||||
Args:
|
||||
user_comment: The EXIF UserComment string from the image
|
||||
recipe_scanner: Optional recipe scanner instance for local LoRA lookup
|
||||
civitai_client: Optional Civitai client for fetching model information
|
||||
|
||||
Returns:
|
||||
Dict containing parsed recipe data with standardized format
|
||||
"""
|
||||
pass
|
||||
|
||||
async def populate_lora_from_civitai(self, lora_entry: Dict[str, Any], civitai_info_tuple: Tuple[Dict[str, Any], Optional[str]],
|
||||
recipe_scanner=None, base_model_counts=None, hash_value=None) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Populate a lora entry with information from Civitai API response
|
||||
|
||||
Args:
|
||||
lora_entry: The lora entry to populate
|
||||
civitai_info_tuple: The response tuple from Civitai API (data, error_msg)
|
||||
recipe_scanner: Optional recipe scanner for local file lookup
|
||||
base_model_counts: Optional dict to track base model counts
|
||||
hash_value: Optional hash value to use if not available in civitai_info
|
||||
|
||||
Returns:
|
||||
The populated lora_entry dict if type is valid, None otherwise
|
||||
"""
|
||||
try:
|
||||
# Unpack the tuple to get the actual data
|
||||
civitai_info, error_msg = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
|
||||
|
||||
if not civitai_info or civitai_info.get("error") == "Model not found":
|
||||
# Model not found or deleted
|
||||
lora_entry['isDeleted'] = True
|
||||
lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png'
|
||||
return lora_entry
|
||||
|
||||
# Get model type and validate
|
||||
model_type = civitai_info.get('model', {}).get('type', '').lower()
|
||||
lora_entry['type'] = model_type
|
||||
if model_type not in VALID_LORA_TYPES:
|
||||
logger.debug(f"Skipping non-LoRA model type: {model_type}")
|
||||
return None
|
||||
|
||||
# Check if this is an early access lora
|
||||
if civitai_info.get('earlyAccessEndsAt'):
|
||||
# Convert earlyAccessEndsAt to a human-readable date
|
||||
early_access_date = civitai_info.get('earlyAccessEndsAt', '')
|
||||
lora_entry['isEarlyAccess'] = True
|
||||
lora_entry['earlyAccessEndsAt'] = early_access_date
|
||||
|
||||
# Update model name if available
|
||||
if 'model' in civitai_info and 'name' in civitai_info['model']:
|
||||
lora_entry['name'] = civitai_info['model']['name']
|
||||
|
||||
lora_entry['id'] = civitai_info.get('id')
|
||||
lora_entry['modelId'] = civitai_info.get('modelId')
|
||||
|
||||
# Update version if available
|
||||
if 'name' in civitai_info:
|
||||
lora_entry['version'] = civitai_info.get('name', '')
|
||||
|
||||
# Get thumbnail URL from first image
|
||||
if 'images' in civitai_info and civitai_info['images']:
|
||||
lora_entry['thumbnailUrl'] = civitai_info['images'][0].get('url', '')
|
||||
|
||||
# Get base model
|
||||
current_base_model = civitai_info.get('baseModel', '')
|
||||
lora_entry['baseModel'] = current_base_model
|
||||
|
||||
# Update base model counts if tracking them
|
||||
if base_model_counts is not None and current_base_model:
|
||||
base_model_counts[current_base_model] = base_model_counts.get(current_base_model, 0) + 1
|
||||
|
||||
# Get download URL
|
||||
lora_entry['downloadUrl'] = civitai_info.get('downloadUrl', '')
|
||||
|
||||
# Process file information if available
|
||||
if 'files' in civitai_info:
|
||||
# Find the primary model file (type="Model" and primary=true) in the files list
|
||||
model_file = next((file for file in civitai_info.get('files', [])
|
||||
if file.get('type') == 'Model' and file.get('primary') == True), None)
|
||||
|
||||
if model_file:
|
||||
# Get size
|
||||
lora_entry['size'] = model_file.get('sizeKB', 0) * 1024
|
||||
|
||||
# Get SHA256 hash
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256', hash_value)
|
||||
if sha256:
|
||||
lora_entry['hash'] = sha256.lower()
|
||||
|
||||
# Check if exists locally
|
||||
if recipe_scanner and lora_entry['hash']:
|
||||
lora_scanner = recipe_scanner._lora_scanner
|
||||
exists_locally = lora_scanner.has_hash(lora_entry['hash'])
|
||||
if exists_locally:
|
||||
try:
|
||||
local_path = lora_scanner.get_path_by_hash(lora_entry['hash'])
|
||||
lora_entry['existsLocally'] = True
|
||||
lora_entry['localPath'] = local_path
|
||||
lora_entry['file_name'] = os.path.splitext(os.path.basename(local_path))[0]
|
||||
|
||||
# Get thumbnail from local preview if available
|
||||
lora_cache = await lora_scanner.get_cached_data()
|
||||
lora_item = next((item for item in lora_cache.raw_data
|
||||
if item['sha256'].lower() == lora_entry['hash'].lower()), None)
|
||||
if lora_item and 'preview_url' in lora_item:
|
||||
lora_entry['thumbnailUrl'] = config.get_preview_static_url(lora_item['preview_url'])
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting local lora path: {e}")
|
||||
else:
|
||||
# For missing LoRAs, get file_name from model_file.name
|
||||
file_name = model_file.get('name', '')
|
||||
lora_entry['file_name'] = os.path.splitext(file_name)[0] if file_name else ''
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error populating lora from Civitai info: {e}")
|
||||
|
||||
return lora_entry
|
||||
|
||||
async def populate_checkpoint_from_civitai(self, checkpoint: Dict[str, Any], civitai_info: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Populate checkpoint information from Civitai API response
|
||||
|
||||
Args:
|
||||
checkpoint: The checkpoint entry to populate
|
||||
civitai_info: The response from Civitai API
|
||||
|
||||
Returns:
|
||||
The populated checkpoint dict
|
||||
"""
|
||||
try:
|
||||
if civitai_info and civitai_info.get("error") != "Model not found":
|
||||
# Update model name if available
|
||||
if 'model' in civitai_info and 'name' in civitai_info['model']:
|
||||
checkpoint['name'] = civitai_info['model']['name']
|
||||
|
||||
# Update version if available
|
||||
if 'name' in civitai_info:
|
||||
checkpoint['version'] = civitai_info.get('name', '')
|
||||
|
||||
# Get thumbnail URL from first image
|
||||
if 'images' in civitai_info and civitai_info['images']:
|
||||
checkpoint['thumbnailUrl'] = civitai_info['images'][0].get('url', '')
|
||||
|
||||
# Get base model
|
||||
checkpoint['baseModel'] = civitai_info.get('baseModel', '')
|
||||
|
||||
# Get download URL
|
||||
checkpoint['downloadUrl'] = civitai_info.get('downloadUrl', '')
|
||||
else:
|
||||
# Model not found or deleted
|
||||
checkpoint['isDeleted'] = True
|
||||
except Exception as e:
|
||||
logger.error(f"Error populating checkpoint from Civitai info: {e}")
|
||||
|
||||
return checkpoint
|
||||
16
py/recipes/constants.py
Normal file
16
py/recipes/constants.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""Constants used across recipe parsers."""
|
||||
|
||||
# Import VALID_LORA_TYPES from utils.constants
|
||||
from ..utils.constants import VALID_LORA_TYPES
|
||||
|
||||
# Constants for generation parameters
|
||||
GEN_PARAM_KEYS = [
|
||||
'prompt',
|
||||
'negative_prompt',
|
||||
'steps',
|
||||
'sampler',
|
||||
'cfg_scale',
|
||||
'seed',
|
||||
'size',
|
||||
'clip_skip',
|
||||
]
|
||||
64
py/recipes/factory.py
Normal file
64
py/recipes/factory.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""Factory for creating recipe metadata parsers."""
|
||||
|
||||
import logging
|
||||
from .parsers import (
|
||||
RecipeFormatParser,
|
||||
ComfyMetadataParser,
|
||||
MetaFormatParser,
|
||||
AutomaticMetadataParser,
|
||||
CivitaiApiMetadataParser
|
||||
)
|
||||
from .base import RecipeMetadataParser
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class RecipeParserFactory:
|
||||
"""Factory for creating recipe metadata parsers"""
|
||||
|
||||
@staticmethod
|
||||
def create_parser(metadata) -> RecipeMetadataParser:
|
||||
"""
|
||||
Create appropriate parser based on the metadata content
|
||||
|
||||
Args:
|
||||
metadata: The metadata from the image (dict or str)
|
||||
|
||||
Returns:
|
||||
Appropriate RecipeMetadataParser implementation
|
||||
"""
|
||||
# First, try CivitaiApiMetadataParser for dict input
|
||||
if isinstance(metadata, dict):
|
||||
try:
|
||||
if CivitaiApiMetadataParser().is_metadata_matching(metadata):
|
||||
return CivitaiApiMetadataParser()
|
||||
except Exception as e:
|
||||
logger.debug(f"CivitaiApiMetadataParser check failed: {e}")
|
||||
pass
|
||||
|
||||
# Convert dict to string for other parsers that expect string input
|
||||
try:
|
||||
import json
|
||||
metadata_str = json.dumps(metadata)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to convert dict to JSON string: {e}")
|
||||
return None
|
||||
else:
|
||||
metadata_str = metadata
|
||||
|
||||
# Try ComfyMetadataParser which requires valid JSON
|
||||
try:
|
||||
if ComfyMetadataParser().is_metadata_matching(metadata_str):
|
||||
return ComfyMetadataParser()
|
||||
except Exception:
|
||||
# If JSON parsing fails, move on to other parsers
|
||||
pass
|
||||
|
||||
# Check other parsers that expect string input
|
||||
if RecipeFormatParser().is_metadata_matching(metadata_str):
|
||||
return RecipeFormatParser()
|
||||
elif AutomaticMetadataParser().is_metadata_matching(metadata_str):
|
||||
return AutomaticMetadataParser()
|
||||
elif MetaFormatParser().is_metadata_matching(metadata_str):
|
||||
return MetaFormatParser()
|
||||
else:
|
||||
return None
|
||||
15
py/recipes/parsers/__init__.py
Normal file
15
py/recipes/parsers/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""Recipe parsers package."""
|
||||
|
||||
from .recipe_format import RecipeFormatParser
|
||||
from .comfy import ComfyMetadataParser
|
||||
from .meta_format import MetaFormatParser
|
||||
from .automatic import AutomaticMetadataParser
|
||||
from .civitai_image import CivitaiApiMetadataParser
|
||||
|
||||
__all__ = [
|
||||
'RecipeFormatParser',
|
||||
'ComfyMetadataParser',
|
||||
'MetaFormatParser',
|
||||
'AutomaticMetadataParser',
|
||||
'CivitaiApiMetadataParser',
|
||||
]
|
||||
321
py/recipes/parsers/automatic.py
Normal file
321
py/recipes/parsers/automatic.py
Normal file
@@ -0,0 +1,321 @@
|
||||
"""Parser for Automatic1111 metadata format."""
|
||||
|
||||
import re
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AutomaticMetadataParser(RecipeMetadataParser):
|
||||
"""Parser for Automatic1111 metadata format"""
|
||||
|
||||
METADATA_MARKER = r"Steps: \d+"
|
||||
|
||||
# Regular expressions for extracting specific metadata
|
||||
HASHES_REGEX = r', Hashes:\s*({[^}]+})'
|
||||
LORA_HASHES_REGEX = r', Lora hashes:\s*"([^"]+)"'
|
||||
CIVITAI_RESOURCES_REGEX = r', Civitai resources:\s*(\[\{.*?\}\])'
|
||||
CIVITAI_METADATA_REGEX = r', Civitai metadata:\s*(\{.*?\})'
|
||||
EXTRANETS_REGEX = r'<(lora|hypernet):([^:]+):(-?[0-9.]+)>'
|
||||
MODEL_HASH_PATTERN = r'Model hash: ([a-zA-Z0-9]+)'
|
||||
VAE_HASH_PATTERN = r'VAE hash: ([a-zA-Z0-9]+)'
|
||||
|
||||
def is_metadata_matching(self, user_comment: str) -> bool:
|
||||
"""Check if the user comment matches the Automatic1111 format"""
|
||||
return re.search(self.METADATA_MARKER, user_comment) is not None
|
||||
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from Automatic1111 format"""
|
||||
try:
|
||||
# Split on Negative prompt if it exists
|
||||
if "Negative prompt:" in user_comment:
|
||||
parts = user_comment.split('Negative prompt:', 1)
|
||||
prompt = parts[0].strip()
|
||||
negative_and_params = parts[1] if len(parts) > 1 else ""
|
||||
else:
|
||||
# No negative prompt section
|
||||
param_start = re.search(self.METADATA_MARKER, user_comment)
|
||||
if param_start:
|
||||
prompt = user_comment[:param_start.start()].strip()
|
||||
negative_and_params = user_comment[param_start.start():]
|
||||
else:
|
||||
prompt = user_comment.strip()
|
||||
negative_and_params = ""
|
||||
|
||||
# Initialize metadata
|
||||
metadata = {
|
||||
"prompt": prompt,
|
||||
"loras": []
|
||||
}
|
||||
|
||||
# Extract negative prompt and parameters
|
||||
if negative_and_params:
|
||||
# If we split on "Negative prompt:", check for params section
|
||||
if "Negative prompt:" in user_comment:
|
||||
param_start = re.search(r'Steps: ', negative_and_params)
|
||||
if param_start:
|
||||
neg_prompt = negative_and_params[:param_start.start()].strip()
|
||||
metadata["negative_prompt"] = neg_prompt
|
||||
params_section = negative_and_params[param_start.start():]
|
||||
else:
|
||||
metadata["negative_prompt"] = negative_and_params.strip()
|
||||
params_section = ""
|
||||
else:
|
||||
# No negative prompt, entire section is params
|
||||
params_section = negative_and_params
|
||||
|
||||
# Extract generation parameters
|
||||
if params_section:
|
||||
# Extract Civitai resources
|
||||
civitai_resources_match = re.search(self.CIVITAI_RESOURCES_REGEX, params_section)
|
||||
if civitai_resources_match:
|
||||
try:
|
||||
civitai_resources = json.loads(civitai_resources_match.group(1))
|
||||
metadata["civitai_resources"] = civitai_resources
|
||||
params_section = params_section.replace(civitai_resources_match.group(0), '')
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error parsing Civitai resources JSON")
|
||||
|
||||
# Extract Hashes
|
||||
hashes_match = re.search(self.HASHES_REGEX, params_section)
|
||||
if hashes_match:
|
||||
try:
|
||||
hashes = json.loads(hashes_match.group(1))
|
||||
# Process hash keys
|
||||
processed_hashes = {}
|
||||
for key, value in hashes.items():
|
||||
# Convert Model: or LORA: prefix to lowercase if present
|
||||
if ':' in key:
|
||||
prefix, name = key.split(':', 1)
|
||||
prefix = prefix.lower()
|
||||
else:
|
||||
prefix = ''
|
||||
name = key
|
||||
|
||||
# Clean up the name part
|
||||
if '/' in name:
|
||||
name = name.split('/')[-1] # Get last part after /
|
||||
if '.safetensors' in name:
|
||||
name = name.split('.safetensors')[0] # Remove .safetensors
|
||||
|
||||
# Reconstruct the key
|
||||
new_key = f"{prefix}:{name}" if prefix else name
|
||||
processed_hashes[new_key] = value
|
||||
|
||||
metadata["hashes"] = processed_hashes
|
||||
# Remove hashes from params section to not interfere with other parsing
|
||||
params_section = params_section.replace(hashes_match.group(0), '')
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error parsing hashes JSON")
|
||||
|
||||
# Extract Lora hashes in alternative format
|
||||
lora_hashes_match = re.search(self.LORA_HASHES_REGEX, params_section)
|
||||
if not hashes_match and lora_hashes_match:
|
||||
try:
|
||||
lora_hashes_str = lora_hashes_match.group(1)
|
||||
lora_hash_entries = lora_hashes_str.split(', ')
|
||||
|
||||
# Initialize hashes dict if it doesn't exist
|
||||
if "hashes" not in metadata:
|
||||
metadata["hashes"] = {}
|
||||
|
||||
# Parse each lora hash entry (format: "name: hash")
|
||||
for entry in lora_hash_entries:
|
||||
if ': ' in entry:
|
||||
lora_name, lora_hash = entry.split(': ', 1)
|
||||
# Add as lora type in the same format as regular hashes
|
||||
metadata["hashes"][f"lora:{lora_name}"] = lora_hash.strip()
|
||||
|
||||
# Remove lora hashes from params section
|
||||
params_section = params_section.replace(lora_hashes_match.group(0), '')
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing Lora hashes: {e}")
|
||||
|
||||
# Extract basic parameters
|
||||
param_pattern = r'([A-Za-z\s]+): ([^,]+)'
|
||||
params = re.findall(param_pattern, params_section)
|
||||
gen_params = {}
|
||||
|
||||
for key, value in params:
|
||||
clean_key = key.strip().lower().replace(' ', '_')
|
||||
|
||||
# Skip if not in recognized gen param keys
|
||||
if clean_key not in GEN_PARAM_KEYS:
|
||||
continue
|
||||
|
||||
# Convert numeric values
|
||||
if clean_key in ['steps', 'seed']:
|
||||
try:
|
||||
gen_params[clean_key] = int(value.strip())
|
||||
except ValueError:
|
||||
gen_params[clean_key] = value.strip()
|
||||
elif clean_key in ['cfg_scale']:
|
||||
try:
|
||||
gen_params[clean_key] = float(value.strip())
|
||||
except ValueError:
|
||||
gen_params[clean_key] = value.strip()
|
||||
else:
|
||||
gen_params[clean_key] = value.strip()
|
||||
|
||||
# Extract size if available and add to gen_params if a recognized key
|
||||
size_match = re.search(r'Size: (\d+)x(\d+)', params_section)
|
||||
if size_match and 'size' in GEN_PARAM_KEYS:
|
||||
width, height = size_match.groups()
|
||||
gen_params['size'] = f"{width}x{height}"
|
||||
|
||||
# Add prompt and negative_prompt to gen_params if they're in GEN_PARAM_KEYS
|
||||
if 'prompt' in GEN_PARAM_KEYS and 'prompt' in metadata:
|
||||
gen_params['prompt'] = metadata['prompt']
|
||||
if 'negative_prompt' in GEN_PARAM_KEYS and 'negative_prompt' in metadata:
|
||||
gen_params['negative_prompt'] = metadata['negative_prompt']
|
||||
|
||||
metadata["gen_params"] = gen_params
|
||||
|
||||
# Extract LoRA information
|
||||
loras = []
|
||||
base_model_counts = {}
|
||||
|
||||
# First use Civitai resources if available (more reliable source)
|
||||
if metadata.get("civitai_resources"):
|
||||
for resource in metadata.get("civitai_resources", []):
|
||||
# --- Added: Parse 'air' field if present ---
|
||||
air = resource.get("air")
|
||||
if air:
|
||||
# Format: urn:air:sdxl:lora:civitai:1221007@1375651
|
||||
# Or: urn:air:sdxl:checkpoint:civitai:623891@2019115
|
||||
air_pattern = r"urn:air:[^:]+:(?P<type>[^:]+):civitai:(?P<modelId>\d+)@(?P<modelVersionId>\d+)"
|
||||
air_match = re.match(air_pattern, air)
|
||||
if air_match:
|
||||
air_type = air_match.group("type")
|
||||
air_modelId = int(air_match.group("modelId"))
|
||||
air_modelVersionId = int(air_match.group("modelVersionId"))
|
||||
# checkpoint/lycoris/lora/hypernet
|
||||
resource["type"] = air_type
|
||||
resource["modelId"] = air_modelId
|
||||
resource["modelVersionId"] = air_modelVersionId
|
||||
# --- End added ---
|
||||
|
||||
if resource.get("type") in ["lora", "lycoris", "hypernet"] and resource.get("modelVersionId"):
|
||||
# Initialize lora entry
|
||||
lora_entry = {
|
||||
'id': resource.get("modelVersionId", 0),
|
||||
'modelId': resource.get("modelId", 0),
|
||||
'name': resource.get("modelName", "Unknown LoRA"),
|
||||
'version': resource.get("modelVersionName", resource.get("versionName", "")),
|
||||
'type': resource.get("type", "lora"),
|
||||
'weight': round(float(resource.get("weight", 1.0)), 2),
|
||||
'existsLocally': False,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Get additional info from Civitai
|
||||
if civitai_client:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_version_info(resource.get("modelVersionId"))
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts
|
||||
)
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
lora_entry = populated_entry
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA {lora_entry['name']}: {e}")
|
||||
|
||||
loras.append(lora_entry)
|
||||
|
||||
# If no LoRAs from Civitai resources or to supplement, extract from metadata["hashes"]
|
||||
if not loras or len(loras) == 0:
|
||||
# Extract lora weights from extranet tags in prompt (for later use)
|
||||
lora_weights = {}
|
||||
lora_matches = re.findall(self.EXTRANETS_REGEX, prompt)
|
||||
for lora_type, lora_name, lora_weight in lora_matches:
|
||||
key = f"{lora_type}:{lora_name}"
|
||||
lora_weights[key] = round(float(lora_weight), 2)
|
||||
|
||||
# Use hashes from metadata as the primary source
|
||||
if metadata.get("hashes"):
|
||||
for hash_key, lora_hash in metadata.get("hashes", {}).items():
|
||||
# Only process lora or hypernet types
|
||||
if not hash_key.startswith(("lora:", "hypernet:")):
|
||||
continue
|
||||
|
||||
lora_type, lora_name = hash_key.split(':', 1)
|
||||
|
||||
# Get weight from extranet tags if available, else default to 1.0
|
||||
weight = lora_weights.get(hash_key, 1.0)
|
||||
|
||||
# Initialize lora entry
|
||||
lora_entry = {
|
||||
'name': lora_name,
|
||||
'type': lora_type, # 'lora' or 'hypernet'
|
||||
'weight': weight,
|
||||
'hash': lora_hash,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': lora_name,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Try to get info from Civitai
|
||||
if civitai_client:
|
||||
try:
|
||||
if lora_hash:
|
||||
# If we have hash, use it for lookup
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
else:
|
||||
civitai_info = None
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts,
|
||||
lora_hash
|
||||
)
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
lora_entry = populated_entry
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA {lora_name}: {e}")
|
||||
|
||||
loras.append(lora_entry)
|
||||
|
||||
# Try to get base model from resources or make educated guess
|
||||
base_model = None
|
||||
if base_model_counts:
|
||||
# Use the most common base model from the loras
|
||||
base_model = max(base_model_counts.items(), key=lambda x: x[1])[0]
|
||||
|
||||
# Prepare final result structure
|
||||
# Make sure gen_params only contains recognized keys
|
||||
filtered_gen_params = {}
|
||||
for key in GEN_PARAM_KEYS:
|
||||
if key in metadata.get("gen_params", {}):
|
||||
filtered_gen_params[key] = metadata["gen_params"][key]
|
||||
|
||||
result = {
|
||||
'base_model': base_model,
|
||||
'loras': loras,
|
||||
'gen_params': filtered_gen_params,
|
||||
'from_automatic_metadata': True
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing Automatic1111 metadata: {e}", exc_info=True)
|
||||
return {"error": str(e), "loras": []}
|
||||
342
py/recipes/parsers/civitai_image.py
Normal file
342
py/recipes/parsers/civitai_image.py
Normal file
@@ -0,0 +1,342 @@
|
||||
"""Parser for Civitai image metadata format."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any, Union
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
"""Parser for Civitai image metadata format"""
|
||||
|
||||
def is_metadata_matching(self, metadata) -> bool:
|
||||
"""Check if the metadata matches the Civitai image metadata format
|
||||
|
||||
Args:
|
||||
metadata: The metadata from the image (dict)
|
||||
|
||||
Returns:
|
||||
bool: True if this parser can handle the metadata
|
||||
"""
|
||||
if not metadata or not isinstance(metadata, dict):
|
||||
return False
|
||||
|
||||
# Check for key markers specific to Civitai image metadata
|
||||
return any([
|
||||
"resources" in metadata,
|
||||
"civitaiResources" in metadata,
|
||||
"additionalResources" in metadata
|
||||
])
|
||||
|
||||
async def parse_metadata(self, metadata, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from Civitai image format
|
||||
|
||||
Args:
|
||||
metadata: The metadata from the image (dict)
|
||||
recipe_scanner: Optional recipe scanner service
|
||||
civitai_client: Optional Civitai API client
|
||||
|
||||
Returns:
|
||||
Dict containing parsed recipe data
|
||||
"""
|
||||
try:
|
||||
# Initialize result structure
|
||||
result = {
|
||||
'base_model': None,
|
||||
'loras': [],
|
||||
'gen_params': {},
|
||||
'from_civitai_image': True
|
||||
}
|
||||
|
||||
# Track already added LoRAs to prevent duplicates
|
||||
added_loras = {} # key: model_version_id or hash, value: index in result["loras"]
|
||||
|
||||
# Extract prompt and negative prompt
|
||||
if "prompt" in metadata:
|
||||
result["gen_params"]["prompt"] = metadata["prompt"]
|
||||
|
||||
if "negativePrompt" in metadata:
|
||||
result["gen_params"]["negative_prompt"] = metadata["negativePrompt"]
|
||||
|
||||
# Extract other generation parameters
|
||||
param_mapping = {
|
||||
"steps": "steps",
|
||||
"sampler": "sampler",
|
||||
"cfgScale": "cfg_scale",
|
||||
"seed": "seed",
|
||||
"Size": "size",
|
||||
"clipSkip": "clip_skip",
|
||||
}
|
||||
|
||||
for civitai_key, our_key in param_mapping.items():
|
||||
if civitai_key in metadata and our_key in GEN_PARAM_KEYS:
|
||||
result["gen_params"][our_key] = metadata[civitai_key]
|
||||
|
||||
# Extract base model information - directly if available
|
||||
if "baseModel" in metadata:
|
||||
result["base_model"] = metadata["baseModel"]
|
||||
elif "Model hash" in metadata and civitai_client:
|
||||
model_hash = metadata["Model hash"]
|
||||
model_info = await civitai_client.get_model_by_hash(model_hash)
|
||||
if model_info:
|
||||
result["base_model"] = model_info.get("baseModel", "")
|
||||
elif "Model" in metadata and isinstance(metadata.get("resources"), list):
|
||||
# Try to find base model in resources
|
||||
for resource in metadata.get("resources", []):
|
||||
if resource.get("type") == "model" and resource.get("name") == metadata.get("Model"):
|
||||
# This is likely the checkpoint model
|
||||
if civitai_client and resource.get("hash"):
|
||||
model_info = await civitai_client.get_model_by_hash(resource.get("hash"))
|
||||
if model_info:
|
||||
result["base_model"] = model_info.get("baseModel", "")
|
||||
|
||||
base_model_counts = {}
|
||||
|
||||
# Process standard resources array
|
||||
if "resources" in metadata and isinstance(metadata["resources"], list):
|
||||
for resource in metadata["resources"]:
|
||||
# Modified to process resources without a type field as potential LoRAs
|
||||
if resource.get("type", "lora") == "lora":
|
||||
lora_hash = resource.get("hash", "")
|
||||
|
||||
# Skip if we've already added this LoRA by hash
|
||||
if lora_hash and lora_hash in added_loras:
|
||||
continue
|
||||
|
||||
lora_entry = {
|
||||
'name': resource.get("name", "Unknown LoRA"),
|
||||
'type': "lora",
|
||||
'weight': float(resource.get("weight", 1.0)),
|
||||
'hash': lora_hash,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': resource.get("name", "Unknown"),
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if hash is available
|
||||
if lora_entry['hash'] and civitai_client:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts,
|
||||
lora_hash
|
||||
)
|
||||
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
lora_entry = populated_entry
|
||||
|
||||
# If we have a version ID from Civitai, track it for deduplication
|
||||
if 'id' in lora_entry and lora_entry['id']:
|
||||
added_loras[str(lora_entry['id'])] = len(result["loras"])
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA hash {lora_entry['hash']}: {e}")
|
||||
|
||||
# Track by hash if we have it
|
||||
if lora_hash:
|
||||
added_loras[lora_hash] = len(result["loras"])
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
# Process civitaiResources array
|
||||
if "civitaiResources" in metadata and isinstance(metadata["civitaiResources"], list):
|
||||
for resource in metadata["civitaiResources"]:
|
||||
# Get unique identifier for deduplication
|
||||
version_id = str(resource.get("modelVersionId", ""))
|
||||
|
||||
# Skip if we've already added this LoRA
|
||||
if version_id and version_id in added_loras:
|
||||
continue
|
||||
|
||||
# Initialize lora entry
|
||||
lora_entry = {
|
||||
'id': resource.get("modelVersionId", 0),
|
||||
'modelId': resource.get("modelId", 0),
|
||||
'name': resource.get("modelName", "Unknown LoRA"),
|
||||
'version': resource.get("modelVersionName", ""),
|
||||
'type': resource.get("type", "lora"),
|
||||
'weight': round(float(resource.get("weight", 1.0)), 2),
|
||||
'existsLocally': False,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if modelVersionId is available
|
||||
if version_id and civitai_client:
|
||||
try:
|
||||
# Use get_model_version_info instead of get_model_version
|
||||
civitai_info, error = await civitai_client.get_model_version_info(version_id)
|
||||
|
||||
if error:
|
||||
logger.warning(f"Error getting model version info: {error}")
|
||||
continue
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts
|
||||
)
|
||||
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
lora_entry = populated_entry
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for model version {version_id}: {e}")
|
||||
|
||||
# Track this LoRA in our deduplication dict
|
||||
if version_id:
|
||||
added_loras[version_id] = len(result["loras"])
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
# Process additionalResources array
|
||||
if "additionalResources" in metadata and isinstance(metadata["additionalResources"], list):
|
||||
for resource in metadata["additionalResources"]:
|
||||
# Skip resources that aren't LoRAs or LyCORIS
|
||||
if resource.get("type") not in ["lora", "lycoris"] and "type" not in resource:
|
||||
continue
|
||||
|
||||
lora_type = resource.get("type", "lora")
|
||||
name = resource.get("name", "")
|
||||
|
||||
# Extract ID from URN format if available
|
||||
version_id = None
|
||||
if name and "civitai:" in name:
|
||||
parts = name.split("@")
|
||||
if len(parts) > 1:
|
||||
version_id = parts[1]
|
||||
|
||||
# Skip if we've already added this LoRA
|
||||
if version_id in added_loras:
|
||||
continue
|
||||
|
||||
lora_entry = {
|
||||
'name': name,
|
||||
'type': lora_type,
|
||||
'weight': float(resource.get("strength", 1.0)),
|
||||
'hash': "",
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': name,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# If we have a version ID and civitai client, try to get more info
|
||||
if version_id and civitai_client:
|
||||
try:
|
||||
# Use get_model_version_info with the version ID
|
||||
civitai_info, error = await civitai_client.get_model_version_info(version_id)
|
||||
|
||||
if error:
|
||||
logger.warning(f"Error getting model version info: {error}")
|
||||
else:
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts
|
||||
)
|
||||
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
lora_entry = populated_entry
|
||||
|
||||
# Track this LoRA for deduplication
|
||||
if version_id:
|
||||
added_loras[version_id] = len(result["loras"])
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for model ID {version_id}: {e}")
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
# Check for LoRA info in the format "Lora_0 Model hash", "Lora_0 Model name", etc.
|
||||
lora_index = 0
|
||||
while f"Lora_{lora_index} Model hash" in metadata and f"Lora_{lora_index} Model name" in metadata:
|
||||
lora_hash = metadata[f"Lora_{lora_index} Model hash"]
|
||||
lora_name = metadata[f"Lora_{lora_index} Model name"]
|
||||
lora_strength_model = float(metadata.get(f"Lora_{lora_index} Strength model", 1.0))
|
||||
|
||||
# Skip if we've already added this LoRA by hash
|
||||
if lora_hash and lora_hash in added_loras:
|
||||
lora_index += 1
|
||||
continue
|
||||
|
||||
lora_entry = {
|
||||
'name': lora_name,
|
||||
'type': "lora",
|
||||
'weight': lora_strength_model,
|
||||
'hash': lora_hash,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': lora_name,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if hash is available
|
||||
if lora_entry['hash'] and civitai_client:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts,
|
||||
lora_hash
|
||||
)
|
||||
|
||||
if populated_entry is None:
|
||||
lora_index += 1
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
lora_entry = populated_entry
|
||||
|
||||
# If we have a version ID from Civitai, track it for deduplication
|
||||
if 'id' in lora_entry and lora_entry['id']:
|
||||
added_loras[str(lora_entry['id'])] = len(result["loras"])
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA hash {lora_entry['hash']}: {e}")
|
||||
|
||||
# Track by hash if we have it
|
||||
if lora_hash:
|
||||
added_loras[lora_hash] = len(result["loras"])
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
lora_index += 1
|
||||
|
||||
# If base model wasn't found earlier, use the most common one from LoRAs
|
||||
if not result["base_model"] and base_model_counts:
|
||||
result["base_model"] = max(base_model_counts.items(), key=lambda x: x[1])[0]
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing Civitai image metadata: {e}", exc_info=True)
|
||||
return {"error": str(e), "loras": []}
|
||||
216
py/recipes/parsers/comfy.py
Normal file
216
py/recipes/parsers/comfy.py
Normal file
@@ -0,0 +1,216 @@
|
||||
"""Parser for ComfyUI metadata format."""
|
||||
|
||||
import re
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ComfyMetadataParser(RecipeMetadataParser):
|
||||
"""Parser for Civitai ComfyUI metadata JSON format"""
|
||||
|
||||
METADATA_MARKER = r"class_type"
|
||||
|
||||
def is_metadata_matching(self, user_comment: str) -> bool:
|
||||
"""Check if the user comment matches the ComfyUI metadata format"""
|
||||
try:
|
||||
data = json.loads(user_comment)
|
||||
# Check if it contains class_type nodes typical of ComfyUI workflow
|
||||
return isinstance(data, dict) and any(isinstance(v, dict) and 'class_type' in v for v in data.values())
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return False
|
||||
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from Civitai ComfyUI metadata format"""
|
||||
try:
|
||||
data = json.loads(user_comment)
|
||||
loras = []
|
||||
|
||||
# Find all LoraLoader nodes
|
||||
lora_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and v.get('class_type') == 'LoraLoader'}
|
||||
|
||||
if not lora_nodes:
|
||||
return {"error": "No LoRA information found in this ComfyUI workflow", "loras": []}
|
||||
|
||||
# Process each LoraLoader node
|
||||
for node_id, node in lora_nodes.items():
|
||||
if 'inputs' not in node or 'lora_name' not in node['inputs']:
|
||||
continue
|
||||
|
||||
lora_name = node['inputs'].get('lora_name', '')
|
||||
|
||||
# Parse the URN to extract model ID and version ID
|
||||
# Format: "urn:air:sdxl:lora:civitai:1107767@1253442"
|
||||
lora_id_match = re.search(r'civitai:(\d+)@(\d+)', lora_name)
|
||||
if not lora_id_match:
|
||||
continue
|
||||
|
||||
model_id = lora_id_match.group(1)
|
||||
model_version_id = lora_id_match.group(2)
|
||||
|
||||
# Get strength from node inputs
|
||||
weight = node['inputs'].get('strength_model', 1.0)
|
||||
|
||||
# Initialize lora entry with default values
|
||||
lora_entry = {
|
||||
'id': model_version_id,
|
||||
'modelId': model_id,
|
||||
'name': f"Lora {model_id}", # Default name
|
||||
'version': '',
|
||||
'type': 'lora',
|
||||
'weight': weight,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': '',
|
||||
'hash': '',
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Get additional info from Civitai if client is available
|
||||
if civitai_client:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(model_version_id)
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info_tuple,
|
||||
recipe_scanner
|
||||
)
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
lora_entry = populated_entry
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA: {e}")
|
||||
|
||||
loras.append(lora_entry)
|
||||
|
||||
# Find checkpoint info
|
||||
checkpoint_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and v.get('class_type') == 'CheckpointLoaderSimple'}
|
||||
checkpoint = None
|
||||
checkpoint_id = None
|
||||
checkpoint_version_id = None
|
||||
|
||||
if checkpoint_nodes:
|
||||
# Get the first checkpoint node
|
||||
checkpoint_node = next(iter(checkpoint_nodes.values()))
|
||||
if 'inputs' in checkpoint_node and 'ckpt_name' in checkpoint_node['inputs']:
|
||||
checkpoint_name = checkpoint_node['inputs']['ckpt_name']
|
||||
# Parse checkpoint URN
|
||||
checkpoint_match = re.search(r'civitai:(\d+)@(\d+)', checkpoint_name)
|
||||
if checkpoint_match:
|
||||
checkpoint_id = checkpoint_match.group(1)
|
||||
checkpoint_version_id = checkpoint_match.group(2)
|
||||
checkpoint = {
|
||||
'id': checkpoint_version_id,
|
||||
'modelId': checkpoint_id,
|
||||
'name': f"Checkpoint {checkpoint_id}",
|
||||
'version': '',
|
||||
'type': 'checkpoint'
|
||||
}
|
||||
|
||||
# Get additional checkpoint info from Civitai
|
||||
if civitai_client:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(checkpoint_version_id)
|
||||
civitai_info, _ = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
|
||||
# Populate checkpoint with Civitai info
|
||||
checkpoint = await self.populate_checkpoint_from_civitai(checkpoint, civitai_info)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for checkpoint: {e}")
|
||||
|
||||
# Extract generation parameters
|
||||
gen_params = {}
|
||||
|
||||
# First try to get from extraMetadata
|
||||
if 'extraMetadata' in data:
|
||||
try:
|
||||
# extraMetadata is a JSON string that needs to be parsed
|
||||
extra_metadata = json.loads(data['extraMetadata'])
|
||||
|
||||
# Map fields from extraMetadata to our standard format
|
||||
mapping = {
|
||||
'prompt': 'prompt',
|
||||
'negativePrompt': 'negative_prompt',
|
||||
'steps': 'steps',
|
||||
'sampler': 'sampler',
|
||||
'cfgScale': 'cfg_scale',
|
||||
'seed': 'seed'
|
||||
}
|
||||
|
||||
for src_key, dest_key in mapping.items():
|
||||
if src_key in extra_metadata:
|
||||
gen_params[dest_key] = extra_metadata[src_key]
|
||||
|
||||
# If size info is available, format as "width x height"
|
||||
if 'width' in extra_metadata and 'height' in extra_metadata:
|
||||
gen_params['size'] = f"{extra_metadata['width']}x{extra_metadata['height']}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing extraMetadata: {e}")
|
||||
|
||||
# If extraMetadata doesn't have all the info, try to get from nodes
|
||||
if not gen_params or len(gen_params) < 3: # At least we want prompt, negative_prompt, and steps
|
||||
# Find positive prompt node
|
||||
positive_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and
|
||||
v.get('class_type', '').endswith('CLIPTextEncode') and
|
||||
v.get('_meta', {}).get('title') == 'Positive'}
|
||||
|
||||
if positive_nodes:
|
||||
positive_node = next(iter(positive_nodes.values()))
|
||||
if 'inputs' in positive_node and 'text' in positive_node['inputs']:
|
||||
gen_params['prompt'] = positive_node['inputs']['text']
|
||||
|
||||
# Find negative prompt node
|
||||
negative_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and
|
||||
v.get('class_type', '').endswith('CLIPTextEncode') and
|
||||
v.get('_meta', {}).get('title') == 'Negative'}
|
||||
|
||||
if negative_nodes:
|
||||
negative_node = next(iter(negative_nodes.values()))
|
||||
if 'inputs' in negative_node and 'text' in negative_node['inputs']:
|
||||
gen_params['negative_prompt'] = negative_node['inputs']['text']
|
||||
|
||||
# Find KSampler node for other parameters
|
||||
ksampler_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and v.get('class_type') == 'KSampler'}
|
||||
|
||||
if ksampler_nodes:
|
||||
ksampler_node = next(iter(ksampler_nodes.values()))
|
||||
if 'inputs' in ksampler_node:
|
||||
inputs = ksampler_node['inputs']
|
||||
if 'sampler_name' in inputs:
|
||||
gen_params['sampler'] = inputs['sampler_name']
|
||||
if 'steps' in inputs:
|
||||
gen_params['steps'] = inputs['steps']
|
||||
if 'cfg' in inputs:
|
||||
gen_params['cfg_scale'] = inputs['cfg']
|
||||
if 'seed' in inputs:
|
||||
gen_params['seed'] = inputs['seed']
|
||||
|
||||
# Determine base model from loras info
|
||||
base_model = None
|
||||
if loras:
|
||||
# Use the most common base model from loras
|
||||
base_models = [lora['baseModel'] for lora in loras if lora.get('baseModel')]
|
||||
if base_models:
|
||||
from collections import Counter
|
||||
base_model_counts = Counter(base_models)
|
||||
base_model = base_model_counts.most_common(1)[0][0]
|
||||
|
||||
return {
|
||||
'base_model': base_model,
|
||||
'loras': loras,
|
||||
'checkpoint': checkpoint,
|
||||
'gen_params': gen_params,
|
||||
'from_comfy_metadata': True
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing ComfyUI metadata: {e}", exc_info=True)
|
||||
return {"error": str(e), "loras": []}
|
||||
174
py/recipes/parsers/meta_format.py
Normal file
174
py/recipes/parsers/meta_format.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Parser for meta format (Lora_N Model hash) metadata."""
|
||||
|
||||
import re
|
||||
import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MetaFormatParser(RecipeMetadataParser):
|
||||
"""Parser for images with meta format metadata (Lora_N Model hash format)"""
|
||||
|
||||
METADATA_MARKER = r'Lora_\d+ Model hash:'
|
||||
|
||||
def is_metadata_matching(self, user_comment: str) -> bool:
|
||||
"""Check if the user comment matches the metadata format"""
|
||||
return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None
|
||||
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from images with meta format metadata"""
|
||||
try:
|
||||
# Extract prompt and negative prompt
|
||||
parts = user_comment.split('Negative prompt:', 1)
|
||||
prompt = parts[0].strip()
|
||||
|
||||
# Initialize metadata
|
||||
metadata = {"prompt": prompt, "loras": []}
|
||||
|
||||
# Extract negative prompt and parameters if available
|
||||
if len(parts) > 1:
|
||||
negative_and_params = parts[1]
|
||||
|
||||
# Extract negative prompt - everything until the first parameter (usually "Steps:")
|
||||
param_start = re.search(r'([A-Za-z]+): ', negative_and_params)
|
||||
if param_start:
|
||||
neg_prompt = negative_and_params[:param_start.start()].strip()
|
||||
metadata["negative_prompt"] = neg_prompt
|
||||
params_section = negative_and_params[param_start.start():]
|
||||
else:
|
||||
params_section = negative_and_params
|
||||
|
||||
# Extract key-value parameters (Steps, Sampler, Seed, etc.)
|
||||
param_pattern = r'([A-Za-z_0-9 ]+): ([^,]+)'
|
||||
params = re.findall(param_pattern, params_section)
|
||||
for key, value in params:
|
||||
clean_key = key.strip().lower().replace(' ', '_')
|
||||
metadata[clean_key] = value.strip()
|
||||
|
||||
# Extract LoRA information
|
||||
# Pattern to match lora entries: Lora_0 Model name: ArtVador I.safetensors, Lora_0 Model hash: 08f7133a58, etc.
|
||||
lora_pattern = r'Lora_(\d+) Model name: ([^,]+), Lora_\1 Model hash: ([^,]+), Lora_\1 Strength model: ([^,]+), Lora_\1 Strength clip: ([^,]+)'
|
||||
lora_matches = re.findall(lora_pattern, user_comment)
|
||||
|
||||
# If the regular pattern doesn't match, try a more flexible approach
|
||||
if not lora_matches:
|
||||
# First find all Lora indices
|
||||
lora_indices = set(re.findall(r'Lora_(\d+)', user_comment))
|
||||
|
||||
# For each index, extract the information
|
||||
for idx in lora_indices:
|
||||
lora_info = {}
|
||||
|
||||
# Extract model name
|
||||
name_match = re.search(f'Lora_{idx} Model name: ([^,]+)', user_comment)
|
||||
if name_match:
|
||||
lora_info['name'] = name_match.group(1).strip()
|
||||
|
||||
# Extract model hash
|
||||
hash_match = re.search(f'Lora_{idx} Model hash: ([^,]+)', user_comment)
|
||||
if hash_match:
|
||||
lora_info['hash'] = hash_match.group(1).strip()
|
||||
|
||||
# Extract strength model
|
||||
strength_model_match = re.search(f'Lora_{idx} Strength model: ([^,]+)', user_comment)
|
||||
if strength_model_match:
|
||||
lora_info['strength_model'] = float(strength_model_match.group(1).strip())
|
||||
|
||||
# Extract strength clip
|
||||
strength_clip_match = re.search(f'Lora_{idx} Strength clip: ([^,]+)', user_comment)
|
||||
if strength_clip_match:
|
||||
lora_info['strength_clip'] = float(strength_clip_match.group(1).strip())
|
||||
|
||||
# Only add if we have at least name and hash
|
||||
if 'name' in lora_info and 'hash' in lora_info:
|
||||
lora_matches.append((idx, lora_info['name'], lora_info['hash'],
|
||||
str(lora_info.get('strength_model', 1.0)),
|
||||
str(lora_info.get('strength_clip', 1.0))))
|
||||
|
||||
# Process LoRAs
|
||||
base_model_counts = {}
|
||||
loras = []
|
||||
|
||||
for match in lora_matches:
|
||||
if len(match) == 5: # Regular pattern match
|
||||
idx, name, hash_value, strength_model, strength_clip = match
|
||||
else: # Flexible approach match
|
||||
continue # Should not happen now
|
||||
|
||||
# Clean up the values
|
||||
name = name.strip()
|
||||
if name.endswith('.safetensors'):
|
||||
name = name[:-12] # Remove .safetensors extension
|
||||
|
||||
hash_value = hash_value.strip()
|
||||
weight = float(strength_model) # Use model strength as weight
|
||||
|
||||
# Initialize lora entry with default values
|
||||
lora_entry = {
|
||||
'name': name,
|
||||
'type': 'lora',
|
||||
'weight': weight,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': name,
|
||||
'hash': hash_value,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Get info from Civitai by hash if available
|
||||
if civitai_client and hash_value:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(hash_value)
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts,
|
||||
hash_value
|
||||
)
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
lora_entry = populated_entry
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA hash {hash_value}: {e}")
|
||||
|
||||
loras.append(lora_entry)
|
||||
|
||||
# Extract model information
|
||||
model = None
|
||||
if 'model' in metadata:
|
||||
model = metadata['model']
|
||||
|
||||
# Set base_model to the most common one from civitai_info
|
||||
base_model = None
|
||||
if base_model_counts:
|
||||
base_model = max(base_model_counts.items(), key=lambda x: x[1])[0]
|
||||
|
||||
# Extract generation parameters for recipe metadata
|
||||
gen_params = {}
|
||||
for key in GEN_PARAM_KEYS:
|
||||
if key in metadata:
|
||||
gen_params[key] = metadata.get(key, '')
|
||||
|
||||
# Try to extract size information if available
|
||||
if 'width' in metadata and 'height' in metadata:
|
||||
gen_params['size'] = f"{metadata['width']}x{metadata['height']}"
|
||||
|
||||
return {
|
||||
'base_model': base_model,
|
||||
'loras': loras,
|
||||
'gen_params': gen_params,
|
||||
'raw_metadata': metadata,
|
||||
'from_meta_format': True
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing meta format metadata: {e}", exc_info=True)
|
||||
return {"error": str(e), "loras": []}
|
||||
114
py/recipes/parsers/recipe_format.py
Normal file
114
py/recipes/parsers/recipe_format.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""Parser for dedicated recipe metadata format."""
|
||||
|
||||
import re
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any
|
||||
from ...config import config
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class RecipeFormatParser(RecipeMetadataParser):
|
||||
"""Parser for images with dedicated recipe metadata format"""
|
||||
|
||||
# Regular expression pattern for extracting recipe metadata
|
||||
METADATA_MARKER = r'Recipe metadata: (\{.*\})'
|
||||
|
||||
def is_metadata_matching(self, user_comment: str) -> bool:
|
||||
"""Check if the user comment matches the metadata format"""
|
||||
return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None
|
||||
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from images with dedicated recipe metadata format"""
|
||||
try:
|
||||
# Extract recipe metadata from user comment
|
||||
try:
|
||||
# Look for recipe metadata section
|
||||
recipe_match = re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL)
|
||||
if not recipe_match:
|
||||
recipe_metadata = None
|
||||
else:
|
||||
recipe_json = recipe_match.group(1)
|
||||
recipe_metadata = json.loads(recipe_json)
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting recipe metadata: {e}")
|
||||
recipe_metadata = None
|
||||
if not recipe_metadata:
|
||||
return {"error": "No recipe metadata found", "loras": []}
|
||||
|
||||
# Process the recipe metadata
|
||||
loras = []
|
||||
for lora in recipe_metadata.get('loras', []):
|
||||
# Convert recipe lora format to frontend format
|
||||
lora_entry = {
|
||||
'id': int(lora.get('modelVersionId', 0)),
|
||||
'name': lora.get('modelName', ''),
|
||||
'version': lora.get('modelVersionName', ''),
|
||||
'type': 'lora',
|
||||
'weight': lora.get('strength', 1.0),
|
||||
'file_name': lora.get('file_name', ''),
|
||||
'hash': lora.get('hash', '')
|
||||
}
|
||||
|
||||
# Check if this LoRA exists locally by SHA256 hash
|
||||
if lora.get('hash') and recipe_scanner:
|
||||
lora_scanner = recipe_scanner._lora_scanner
|
||||
exists_locally = lora_scanner.has_hash(lora['hash'])
|
||||
if exists_locally:
|
||||
lora_cache = await lora_scanner.get_cached_data()
|
||||
lora_item = next((item for item in lora_cache.raw_data if item['sha256'].lower() == lora['hash'].lower()), None)
|
||||
if lora_item:
|
||||
lora_entry['existsLocally'] = True
|
||||
lora_entry['localPath'] = lora_item['file_path']
|
||||
lora_entry['file_name'] = lora_item['file_name']
|
||||
lora_entry['size'] = lora_item['size']
|
||||
lora_entry['thumbnailUrl'] = config.get_preview_static_url(lora_item['preview_url'])
|
||||
|
||||
else:
|
||||
lora_entry['existsLocally'] = False
|
||||
lora_entry['localPath'] = None
|
||||
|
||||
# Try to get additional info from Civitai if we have a model version ID
|
||||
if lora.get('modelVersionId') and civitai_client:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(lora['modelVersionId'])
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info_tuple,
|
||||
recipe_scanner,
|
||||
None, # No need to track base model counts
|
||||
lora['hash']
|
||||
)
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
lora_entry = populated_entry
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA: {e}")
|
||||
lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png'
|
||||
|
||||
loras.append(lora_entry)
|
||||
|
||||
logger.info(f"Found {len(loras)} loras in recipe metadata")
|
||||
|
||||
# Filter gen_params to only include recognized keys
|
||||
filtered_gen_params = {}
|
||||
if 'gen_params' in recipe_metadata:
|
||||
for key, value in recipe_metadata['gen_params'].items():
|
||||
if key in GEN_PARAM_KEYS:
|
||||
filtered_gen_params[key] = value
|
||||
|
||||
return {
|
||||
'base_model': recipe_metadata.get('base_model', ''),
|
||||
'loras': loras,
|
||||
'gen_params': filtered_gen_params,
|
||||
'tags': recipe_metadata.get('tags', []),
|
||||
'title': recipe_metadata.get('title', ''),
|
||||
'from_recipe_metadata': True
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing recipe format metadata: {e}", exc_info=True)
|
||||
return {"error": str(e), "loras": []}
|
||||
File diff suppressed because it is too large
Load Diff
699
py/routes/base_model_routes.py
Normal file
699
py/routes/base_model_routes.py
Normal file
@@ -0,0 +1,699 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from aiohttp import web
|
||||
from typing import Dict
|
||||
|
||||
import jinja2
|
||||
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.settings_manager import settings
|
||||
from ..config import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class BaseModelRoutes(ABC):
|
||||
"""Base route controller for all model types"""
|
||||
|
||||
def __init__(self, service):
|
||||
"""Initialize the route controller
|
||||
|
||||
Args:
|
||||
service: Model service instance (LoraService, CheckpointService, etc.)
|
||||
"""
|
||||
self.service = service
|
||||
self.model_type = service.model_type
|
||||
self.template_env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
|
||||
def setup_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup common routes for the model type
|
||||
|
||||
Args:
|
||||
app: aiohttp application
|
||||
prefix: URL prefix (e.g., 'loras', 'checkpoints')
|
||||
"""
|
||||
# Common model management routes
|
||||
app.router.add_get(f'/api/{prefix}/list', self.get_models)
|
||||
app.router.add_post(f'/api/{prefix}/delete', self.delete_model)
|
||||
app.router.add_post(f'/api/{prefix}/exclude', self.exclude_model)
|
||||
app.router.add_post(f'/api/{prefix}/fetch-civitai', self.fetch_civitai)
|
||||
app.router.add_post(f'/api/{prefix}/relink-civitai', self.relink_civitai)
|
||||
app.router.add_post(f'/api/{prefix}/replace-preview', self.replace_preview)
|
||||
app.router.add_post(f'/api/{prefix}/save-metadata', self.save_metadata)
|
||||
app.router.add_post(f'/api/{prefix}/rename', self.rename_model)
|
||||
app.router.add_post(f'/api/{prefix}/bulk-delete', self.bulk_delete_models)
|
||||
app.router.add_post(f'/api/{prefix}/verify-duplicates', self.verify_duplicates)
|
||||
app.router.add_post(f'/api/{prefix}/move_model', self.move_model)
|
||||
app.router.add_post(f'/api/{prefix}/move_models_bulk', self.move_models_bulk)
|
||||
|
||||
# Common query routes
|
||||
app.router.add_get(f'/api/{prefix}/top-tags', self.get_top_tags)
|
||||
app.router.add_get(f'/api/{prefix}/base-models', self.get_base_models)
|
||||
app.router.add_get(f'/api/{prefix}/scan', self.scan_models)
|
||||
app.router.add_get(f'/api/{prefix}/roots', self.get_model_roots)
|
||||
app.router.add_get(f'/api/{prefix}/folders', self.get_folders)
|
||||
app.router.add_get(f'/api/{prefix}/find-duplicates', self.find_duplicate_models)
|
||||
app.router.add_get(f'/api/{prefix}/find-filename-conflicts', self.find_filename_conflicts)
|
||||
|
||||
# Common Download management
|
||||
app.router.add_post(f'/api/download-model', self.download_model)
|
||||
app.router.add_get(f'/api/download-model-get', self.download_model_get)
|
||||
app.router.add_get(f'/api/cancel-download-get', self.cancel_download_get)
|
||||
app.router.add_get(f'/api/download-progress/{{download_id}}', self.get_download_progress)
|
||||
|
||||
# CivitAI integration routes
|
||||
app.router.add_post(f'/api/{prefix}/fetch-all-civitai', self.fetch_all_civitai)
|
||||
# app.router.add_get(f'/api/civitai/versions/{{model_id}}', self.get_civitai_versions)
|
||||
|
||||
# Add generic page route
|
||||
app.router.add_get(f'/{prefix}', self.handle_models_page)
|
||||
|
||||
# Setup model-specific routes
|
||||
self.setup_specific_routes(app, prefix)
|
||||
|
||||
@abstractmethod
|
||||
def setup_specific_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup model-specific routes - to be implemented by subclasses"""
|
||||
pass
|
||||
|
||||
async def handle_models_page(self, request: web.Request) -> web.Response:
|
||||
"""
|
||||
Generic handler for model pages (e.g., /loras, /checkpoints).
|
||||
Subclasses should set self.template_env and template_name.
|
||||
"""
|
||||
try:
|
||||
# Check if the scanner is initializing
|
||||
is_initializing = (
|
||||
self.service.scanner._cache is None or
|
||||
(hasattr(self.service.scanner, 'is_initializing') and callable(self.service.scanner.is_initializing) and self.service.scanner.is_initializing()) or
|
||||
(hasattr(self.service.scanner, '_is_initializing') and self.service.scanner._is_initializing)
|
||||
)
|
||||
|
||||
template_name = getattr(self, "template_name", None)
|
||||
if not self.template_env or not template_name:
|
||||
return web.Response(text="Template environment or template name not set", status=500)
|
||||
|
||||
if is_initializing:
|
||||
rendered = self.template_env.get_template(template_name).render(
|
||||
folders=[],
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
else:
|
||||
try:
|
||||
cache = await self.service.scanner.get_cached_data(force_refresh=False)
|
||||
rendered = self.template_env.get_template(template_name).render(
|
||||
folders=getattr(cache, "folders", []),
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
except Exception as cache_error:
|
||||
logger.error(f"Error loading cache data: {cache_error}")
|
||||
rendered = self.template_env.get_template(template_name).render(
|
||||
folders=[],
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
content_type='text/html'
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling models page: {e}", exc_info=True)
|
||||
return web.Response(
|
||||
text="Error loading models page",
|
||||
status=500
|
||||
)
|
||||
|
||||
async def get_models(self, request: web.Request) -> web.Response:
|
||||
"""Get paginated model data"""
|
||||
try:
|
||||
# Parse common query parameters
|
||||
params = self._parse_common_params(request)
|
||||
|
||||
# Get data from service
|
||||
result = await self.service.get_paginated_data(**params)
|
||||
|
||||
# Format response items
|
||||
formatted_result = {
|
||||
'items': [await self.service.format_response(item) for item in result['items']],
|
||||
'total': result['total'],
|
||||
'page': result['page'],
|
||||
'page_size': result['page_size'],
|
||||
'total_pages': result['total_pages']
|
||||
}
|
||||
|
||||
return web.json_response(formatted_result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_{self.model_type}s: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
def _parse_common_params(self, request: web.Request) -> Dict:
|
||||
"""Parse common query parameters"""
|
||||
# Parse basic pagination and sorting
|
||||
page = int(request.query.get('page', '1'))
|
||||
page_size = min(int(request.query.get('page_size', '20')), 100)
|
||||
sort_by = request.query.get('sort_by', 'name')
|
||||
folder = request.query.get('folder', None)
|
||||
search = request.query.get('search', None)
|
||||
fuzzy_search = request.query.get('fuzzy_search', 'false').lower() == 'true'
|
||||
|
||||
# Parse filter arrays
|
||||
base_models = request.query.getall('base_model', [])
|
||||
tags = request.query.getall('tag', [])
|
||||
favorites_only = request.query.get('favorites_only', 'false').lower() == 'true'
|
||||
|
||||
# Parse search options
|
||||
search_options = {
|
||||
'filename': request.query.get('search_filename', 'true').lower() == 'true',
|
||||
'modelname': request.query.get('search_modelname', 'true').lower() == 'true',
|
||||
'tags': request.query.get('search_tags', 'false').lower() == 'true',
|
||||
'creator': request.query.get('search_creator', 'false').lower() == 'true',
|
||||
'recursive': request.query.get('recursive', 'false').lower() == 'true',
|
||||
}
|
||||
|
||||
# Parse hash filters if provided
|
||||
hash_filters = {}
|
||||
if 'hash' in request.query:
|
||||
hash_filters['single_hash'] = request.query['hash']
|
||||
elif 'hashes' in request.query:
|
||||
try:
|
||||
hash_list = json.loads(request.query['hashes'])
|
||||
if isinstance(hash_list, list):
|
||||
hash_filters['multiple_hashes'] = hash_list
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
return {
|
||||
'page': page,
|
||||
'page_size': page_size,
|
||||
'sort_by': sort_by,
|
||||
'folder': folder,
|
||||
'search': search,
|
||||
'fuzzy_search': fuzzy_search,
|
||||
'base_models': base_models,
|
||||
'tags': tags,
|
||||
'search_options': search_options,
|
||||
'hash_filters': hash_filters,
|
||||
'favorites_only': favorites_only,
|
||||
# Add model-specific parameters
|
||||
**self._parse_specific_params(request)
|
||||
}
|
||||
|
||||
def _parse_specific_params(self, request: web.Request) -> Dict:
|
||||
"""Parse model-specific parameters - to be overridden by subclasses"""
|
||||
return {}
|
||||
|
||||
# Common route handlers
|
||||
async def delete_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model deletion request"""
|
||||
return await ModelRouteUtils.handle_delete_model(request, self.service.scanner)
|
||||
|
||||
async def exclude_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model exclusion request"""
|
||||
return await ModelRouteUtils.handle_exclude_model(request, self.service.scanner)
|
||||
|
||||
async def fetch_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Handle CivitAI metadata fetch request"""
|
||||
response = await ModelRouteUtils.handle_fetch_civitai(request, self.service.scanner)
|
||||
|
||||
# If successful, format the metadata before returning
|
||||
if response.status == 200:
|
||||
data = json.loads(response.body.decode('utf-8'))
|
||||
if data.get("success") and data.get("metadata"):
|
||||
formatted_metadata = await self.service.format_response(data["metadata"])
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"metadata": formatted_metadata
|
||||
})
|
||||
|
||||
return response
|
||||
|
||||
async def relink_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Handle CivitAI metadata re-linking request"""
|
||||
return await ModelRouteUtils.handle_relink_civitai(request, self.service.scanner)
|
||||
|
||||
async def replace_preview(self, request: web.Request) -> web.Response:
|
||||
"""Handle preview image replacement"""
|
||||
return await ModelRouteUtils.handle_replace_preview(request, self.service.scanner)
|
||||
|
||||
async def save_metadata(self, request: web.Request) -> web.Response:
|
||||
"""Handle saving metadata updates"""
|
||||
return await ModelRouteUtils.handle_save_metadata(request, self.service.scanner)
|
||||
|
||||
async def rename_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle renaming a model file and its associated files"""
|
||||
return await ModelRouteUtils.handle_rename_model(request, self.service.scanner)
|
||||
|
||||
async def bulk_delete_models(self, request: web.Request) -> web.Response:
|
||||
"""Handle bulk deletion of models"""
|
||||
return await ModelRouteUtils.handle_bulk_delete_models(request, self.service.scanner)
|
||||
|
||||
async def verify_duplicates(self, request: web.Request) -> web.Response:
|
||||
"""Handle verification of duplicate model hashes"""
|
||||
return await ModelRouteUtils.handle_verify_duplicates(request, self.service.scanner)
|
||||
|
||||
async def get_top_tags(self, request: web.Request) -> web.Response:
|
||||
"""Handle request for top tags sorted by frequency"""
|
||||
try:
|
||||
limit = int(request.query.get('limit', '20'))
|
||||
if limit < 1 or limit > 100:
|
||||
limit = 20
|
||||
|
||||
top_tags = await self.service.get_top_tags(limit)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'tags': top_tags
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting top tags: {str(e)}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Internal server error'
|
||||
}, status=500)
|
||||
|
||||
async def get_base_models(self, request: web.Request) -> web.Response:
|
||||
"""Get base models used in models"""
|
||||
try:
|
||||
limit = int(request.query.get('limit', '20'))
|
||||
if limit < 1 or limit > 100:
|
||||
limit = 20
|
||||
|
||||
base_models = await self.service.get_base_models(limit)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'base_models': base_models
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving base models: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def scan_models(self, request: web.Request) -> web.Response:
|
||||
"""Force a rescan of model files"""
|
||||
try:
|
||||
full_rebuild = request.query.get('full_rebuild', 'false').lower() == 'true'
|
||||
|
||||
await self.service.scan_models(force_refresh=True, rebuild_cache=full_rebuild)
|
||||
return web.json_response({
|
||||
"status": "success",
|
||||
"message": f"{self.model_type.capitalize()} scan completed"
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error in scan_{self.model_type}s: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_model_roots(self, request: web.Request) -> web.Response:
|
||||
"""Return the model root directories"""
|
||||
try:
|
||||
roots = self.service.get_model_roots()
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"roots": roots
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting {self.model_type} roots: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_folders(self, request: web.Request) -> web.Response:
|
||||
"""Get all folders in the cache"""
|
||||
try:
|
||||
cache = await self.service.scanner.get_cached_data()
|
||||
return web.json_response({
|
||||
'folders': cache.folders
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting folders: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def find_duplicate_models(self, request: web.Request) -> web.Response:
|
||||
"""Find models with duplicate SHA256 hashes"""
|
||||
try:
|
||||
# Get duplicate hashes from service
|
||||
duplicates = self.service.find_duplicate_hashes()
|
||||
|
||||
# Format the response
|
||||
result = []
|
||||
cache = await self.service.scanner.get_cached_data()
|
||||
|
||||
for sha256, paths in duplicates.items():
|
||||
group = {
|
||||
"hash": sha256,
|
||||
"models": []
|
||||
}
|
||||
# Find matching models for each path
|
||||
for path in paths:
|
||||
model = next((m for m in cache.raw_data if m['file_path'] == path), None)
|
||||
if model:
|
||||
group["models"].append(await self.service.format_response(model))
|
||||
|
||||
# Add the primary model too
|
||||
primary_path = self.service.get_path_by_hash(sha256)
|
||||
if primary_path and primary_path not in paths:
|
||||
primary_model = next((m for m in cache.raw_data if m['file_path'] == primary_path), None)
|
||||
if primary_model:
|
||||
group["models"].insert(0, await self.service.format_response(primary_model))
|
||||
|
||||
if len(group["models"]) > 1: # Only include if we found multiple models
|
||||
result.append(group)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"duplicates": result,
|
||||
"count": len(result)
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error finding duplicate {self.model_type}s: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def find_filename_conflicts(self, request: web.Request) -> web.Response:
|
||||
"""Find models with conflicting filenames"""
|
||||
try:
|
||||
# Get duplicate filenames from service
|
||||
duplicates = self.service.find_duplicate_filenames()
|
||||
|
||||
# Format the response
|
||||
result = []
|
||||
cache = await self.service.scanner.get_cached_data()
|
||||
|
||||
for filename, paths in duplicates.items():
|
||||
group = {
|
||||
"filename": filename,
|
||||
"models": []
|
||||
}
|
||||
# Find matching models for each path
|
||||
for path in paths:
|
||||
model = next((m for m in cache.raw_data if m['file_path'] == path), None)
|
||||
if model:
|
||||
group["models"].append(await self.service.format_response(model))
|
||||
|
||||
# Find the model from the main index too
|
||||
hash_val = self.service.scanner.get_hash_by_filename(filename)
|
||||
if hash_val:
|
||||
main_path = self.service.get_path_by_hash(hash_val)
|
||||
if main_path and main_path not in paths:
|
||||
main_model = next((m for m in cache.raw_data if m['file_path'] == main_path), None)
|
||||
if main_model:
|
||||
group["models"].insert(0, await self.service.format_response(main_model))
|
||||
|
||||
if group["models"]:
|
||||
result.append(group)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"conflicts": result,
|
||||
"count": len(result)
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error finding filename conflicts for {self.model_type}s: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
# Download management methods
|
||||
async def download_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model download request"""
|
||||
return await ModelRouteUtils.handle_download_model(request)
|
||||
|
||||
async def download_model_get(self, request: web.Request) -> web.Response:
|
||||
"""Handle model download request via GET method"""
|
||||
try:
|
||||
# Extract query parameters
|
||||
model_id = request.query.get('model_id')
|
||||
if not model_id:
|
||||
return web.Response(
|
||||
status=400,
|
||||
text="Missing required parameter: Please provide 'model_id'"
|
||||
)
|
||||
|
||||
# Get optional parameters
|
||||
model_version_id = request.query.get('model_version_id')
|
||||
download_id = request.query.get('download_id')
|
||||
use_default_paths = request.query.get('use_default_paths', 'false').lower() == 'true'
|
||||
|
||||
# Create a data dictionary that mimics what would be received from a POST request
|
||||
data = {
|
||||
'model_id': model_id
|
||||
}
|
||||
|
||||
# Add optional parameters only if they are provided
|
||||
if model_version_id:
|
||||
data['model_version_id'] = model_version_id
|
||||
|
||||
if download_id:
|
||||
data['download_id'] = download_id
|
||||
|
||||
data['use_default_paths'] = use_default_paths
|
||||
|
||||
# Create a mock request object with the data
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
future.set_result(data)
|
||||
|
||||
mock_request = type('MockRequest', (), {
|
||||
'json': lambda self=None: future
|
||||
})()
|
||||
|
||||
# Call the existing download handler
|
||||
return await ModelRouteUtils.handle_download_model(mock_request)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
logger.error(f"Error downloading model via GET: {error_message}", exc_info=True)
|
||||
return web.Response(status=500, text=error_message)
|
||||
|
||||
async def cancel_download_get(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET request for cancelling a download by download_id"""
|
||||
try:
|
||||
download_id = request.query.get('download_id')
|
||||
if not download_id:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Download ID is required'
|
||||
}, status=400)
|
||||
|
||||
# Create a mock request with match_info for compatibility
|
||||
mock_request = type('MockRequest', (), {
|
||||
'match_info': {'download_id': download_id}
|
||||
})()
|
||||
return await ModelRouteUtils.handle_cancel_download(mock_request)
|
||||
except Exception as e:
|
||||
logger.error(f"Error cancelling download via GET: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_download_progress(self, request: web.Request) -> web.Response:
|
||||
"""Handle request for download progress by download_id"""
|
||||
try:
|
||||
# Get download_id from URL path
|
||||
download_id = request.match_info.get('download_id')
|
||||
if not download_id:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Download ID is required'
|
||||
}, status=400)
|
||||
|
||||
progress_data = ws_manager.get_download_progress(download_id)
|
||||
|
||||
if progress_data is None:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Download ID not found'
|
||||
}, status=404)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'progress': progress_data.get('progress', 0)
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting download progress: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def fetch_all_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Fetch CivitAI metadata for all models in the background"""
|
||||
try:
|
||||
cache = await self.service.scanner.get_cached_data()
|
||||
total = len(cache.raw_data)
|
||||
processed = 0
|
||||
success = 0
|
||||
needs_resort = False
|
||||
|
||||
# Prepare models to process
|
||||
to_process = [
|
||||
model for model in cache.raw_data
|
||||
if model.get('sha256') and (not model.get('civitai') or 'id' not in model.get('civitai')) and model.get('from_civitai', True)
|
||||
]
|
||||
total_to_process = len(to_process)
|
||||
|
||||
# Send initial progress
|
||||
await ws_manager.broadcast({
|
||||
'status': 'started',
|
||||
'total': total_to_process,
|
||||
'processed': 0,
|
||||
'success': 0
|
||||
})
|
||||
|
||||
# Process each model
|
||||
for model in to_process:
|
||||
try:
|
||||
original_name = model.get('model_name')
|
||||
if await ModelRouteUtils.fetch_and_update_model(
|
||||
sha256=model['sha256'],
|
||||
file_path=model['file_path'],
|
||||
model_data=model,
|
||||
update_cache_func=self.service.scanner.update_single_model_cache
|
||||
):
|
||||
success += 1
|
||||
if original_name != model.get('model_name'):
|
||||
needs_resort = True
|
||||
|
||||
processed += 1
|
||||
|
||||
# Send progress update
|
||||
await ws_manager.broadcast({
|
||||
'status': 'processing',
|
||||
'total': total_to_process,
|
||||
'processed': processed,
|
||||
'success': success,
|
||||
'current_name': model.get('model_name', 'Unknown')
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivitAI data for {model['file_path']}: {e}")
|
||||
|
||||
if needs_resort:
|
||||
await cache.resort()
|
||||
|
||||
# Send completion message
|
||||
await ws_manager.broadcast({
|
||||
'status': 'completed',
|
||||
'total': total_to_process,
|
||||
'processed': processed,
|
||||
'success': success
|
||||
})
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"message": f"Successfully updated {success} of {processed} processed {self.model_type}s (total: {total})"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
# Send error message
|
||||
await ws_manager.broadcast({
|
||||
'status': 'error',
|
||||
'error': str(e)
|
||||
})
|
||||
logger.error(f"Error in fetch_all_civitai for {self.model_type}s: {e}")
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def get_civitai_versions(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai model with local availability info"""
|
||||
# This will be implemented by subclasses as they need CivitAI client access
|
||||
return web.json_response({
|
||||
"error": "Not implemented in base class"
|
||||
}, status=501)
|
||||
|
||||
# Common model move handlers
|
||||
async def move_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model move request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
target_path = data.get('target_path')
|
||||
if not file_path or not target_path:
|
||||
return web.Response(text='File path and target path are required', status=400)
|
||||
import os
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
logger.info(f"Source and target directories are the same: {source_dir}")
|
||||
return web.json_response({'success': True, 'message': 'Source and target directories are the same'})
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file_path = os.path.join(target_path, file_name).replace(os.sep, '/')
|
||||
if os.path.exists(target_file_path):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Target file already exists: {target_file_path}"
|
||||
}, status=409)
|
||||
success = await self.service.scanner.move_model(file_path, target_path)
|
||||
if success:
|
||||
return web.json_response({'success': True, 'new_file_path': target_file_path})
|
||||
else:
|
||||
return web.Response(text='Failed to move model', status=500)
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def move_models_bulk(self, request: web.Request) -> web.Response:
|
||||
"""Handle bulk model move request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_paths = data.get('file_paths', [])
|
||||
target_path = data.get('target_path')
|
||||
if not file_paths or not target_path:
|
||||
return web.Response(text='File paths and target path are required', status=400)
|
||||
results = []
|
||||
import os
|
||||
for file_path in file_paths:
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
results.append({
|
||||
"path": file_path,
|
||||
"success": True,
|
||||
"message": "Source and target directories are the same"
|
||||
})
|
||||
continue
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file_path = os.path.join(target_path, file_name).replace(os.sep, '/')
|
||||
if os.path.exists(target_file_path):
|
||||
results.append({
|
||||
"path": file_path,
|
||||
"success": False,
|
||||
"message": f"Target file already exists: {target_file_path}"
|
||||
})
|
||||
continue
|
||||
success = await self.service.scanner.move_model(file_path, target_path)
|
||||
results.append({
|
||||
"path": file_path,
|
||||
"success": success,
|
||||
"message": "Success" if success else "Failed to move model"
|
||||
})
|
||||
success_count = sum(1 for r in results if r["success"])
|
||||
failure_count = len(results) - success_count
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'Moved {success_count} of {len(file_paths)} models',
|
||||
'results': results,
|
||||
'success_count': success_count,
|
||||
'failure_count': failure_count
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving models in bulk: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
140
py/routes/checkpoint_routes.py
Normal file
140
py/routes/checkpoint_routes.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import logging
|
||||
from aiohttp import web
|
||||
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.checkpoint_service import CheckpointService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..config import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CheckpointRoutes(BaseModelRoutes):
|
||||
"""Checkpoint-specific route controller"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize Checkpoint routes with Checkpoint service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "checkpoints.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
self.service = CheckpointService(checkpoint_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
|
||||
def setup_routes(self, app: web.Application):
|
||||
"""Setup Checkpoint routes"""
|
||||
# Schedule service initialization on app startup
|
||||
app.on_startup.append(lambda _: self.initialize_services())
|
||||
|
||||
# Setup common routes with 'checkpoints' prefix (includes page route)
|
||||
super().setup_routes(app, 'checkpoints')
|
||||
|
||||
def setup_specific_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup Checkpoint-specific routes"""
|
||||
# Checkpoint-specific CivitAI integration
|
||||
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_checkpoint)
|
||||
|
||||
# Checkpoint info by name
|
||||
app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_checkpoint_info)
|
||||
|
||||
# Checkpoint roots and Unet roots
|
||||
app.router.add_get(f'/api/{prefix}/checkpoints_roots', self.get_checkpoints_roots)
|
||||
app.router.add_get(f'/api/{prefix}/unet_roots', self.get_unet_roots)
|
||||
|
||||
async def get_checkpoint_info(self, request: web.Request) -> web.Response:
|
||||
"""Get detailed information for a specific checkpoint by name"""
|
||||
try:
|
||||
name = request.match_info.get('name', '')
|
||||
checkpoint_info = await self.service.get_model_info_by_name(name)
|
||||
|
||||
if checkpoint_info:
|
||||
return web.json_response(checkpoint_info)
|
||||
else:
|
||||
return web.json_response({"error": "Checkpoint not found"}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_checkpoint_info: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_civitai_versions_checkpoint(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai checkpoint model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be Checkpoint
|
||||
if model_type.lower() != 'checkpoint':
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected Checkpoint, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the primary model file (type="Model" and primary=true) in the files list
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model' and file.get('primary') == True), None)
|
||||
|
||||
# If no primary file found, try to find any model file
|
||||
if not model_file:
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model'), None)
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.service.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.service.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching checkpoint model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def get_checkpoints_roots(self, request: web.Request) -> web.Response:
|
||||
"""Return the list of checkpoint roots from config"""
|
||||
try:
|
||||
roots = config.checkpoints_roots
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"roots": roots
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting checkpoint roots: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_unet_roots(self, request: web.Request) -> web.Response:
|
||||
"""Return the list of unet roots from config"""
|
||||
try:
|
||||
roots = config.unet_roots
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"roots": roots
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting unet roots: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
@@ -1,44 +0,0 @@
|
||||
import os
|
||||
from aiohttp import web
|
||||
import jinja2
|
||||
import logging
|
||||
from ..config import config
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
|
||||
|
||||
class CheckpointsRoutes:
|
||||
"""Route handlers for Checkpoints management endpoints"""
|
||||
|
||||
def __init__(self):
|
||||
self.template_env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
|
||||
async def handle_checkpoints_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /checkpoints request"""
|
||||
try:
|
||||
template = self.template_env.get_template('checkpoints.html')
|
||||
rendered = template.render(
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
content_type='text/html'
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling checkpoints request: {e}", exc_info=True)
|
||||
return web.Response(
|
||||
text="Error loading checkpoints page",
|
||||
status=500
|
||||
)
|
||||
|
||||
def setup_routes(self, app: web.Application):
|
||||
"""Register routes with the application"""
|
||||
app.router.add_get('/checkpoints', self.handle_checkpoints_page)
|
||||
105
py/routes/embedding_routes.py
Normal file
105
py/routes/embedding_routes.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import logging
|
||||
from aiohttp import web
|
||||
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.embedding_service import EmbeddingService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class EmbeddingRoutes(BaseModelRoutes):
|
||||
"""Embedding-specific route controller"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize Embedding routes with Embedding service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "embeddings.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
self.service = EmbeddingService(embedding_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
|
||||
def setup_routes(self, app: web.Application):
|
||||
"""Setup Embedding routes"""
|
||||
# Schedule service initialization on app startup
|
||||
app.on_startup.append(lambda _: self.initialize_services())
|
||||
|
||||
# Setup common routes with 'embeddings' prefix (includes page route)
|
||||
super().setup_routes(app, 'embeddings')
|
||||
|
||||
def setup_specific_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup Embedding-specific routes"""
|
||||
# Embedding-specific CivitAI integration
|
||||
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_embedding)
|
||||
|
||||
# Embedding info by name
|
||||
app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_embedding_info)
|
||||
|
||||
async def get_embedding_info(self, request: web.Request) -> web.Response:
|
||||
"""Get detailed information for a specific embedding by name"""
|
||||
try:
|
||||
name = request.match_info.get('name', '')
|
||||
embedding_info = await self.service.get_model_info_by_name(name)
|
||||
|
||||
if embedding_info:
|
||||
return web.json_response(embedding_info)
|
||||
else:
|
||||
return web.json_response({"error": "Embedding not found"}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_embedding_info: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_civitai_versions_embedding(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai embedding model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be TextualInversion (Embedding)
|
||||
if model_type.lower() not in ['textualinversion', 'embedding']:
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected TextualInversion/Embedding, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the primary model file (type="Model" and primary=true) in the files list
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model' and file.get('primary') == True), None)
|
||||
|
||||
# If no primary file found, try to find any model file
|
||||
if not model_file:
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model'), None)
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.service.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.service.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching embedding model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
67
py/routes/example_images_routes.py
Normal file
67
py/routes/example_images_routes.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import logging
|
||||
from ..utils.example_images_download_manager import DownloadManager
|
||||
from ..utils.example_images_processor import ExampleImagesProcessor
|
||||
from ..utils.example_images_file_manager import ExampleImagesFileManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ExampleImagesRoutes:
|
||||
"""Routes for example images related functionality"""
|
||||
|
||||
@staticmethod
|
||||
def setup_routes(app):
|
||||
"""Register example images routes"""
|
||||
app.router.add_post('/api/download-example-images', ExampleImagesRoutes.download_example_images)
|
||||
app.router.add_post('/api/import-example-images', ExampleImagesRoutes.import_example_images)
|
||||
app.router.add_get('/api/example-images-status', ExampleImagesRoutes.get_example_images_status)
|
||||
app.router.add_post('/api/pause-example-images', ExampleImagesRoutes.pause_example_images)
|
||||
app.router.add_post('/api/resume-example-images', ExampleImagesRoutes.resume_example_images)
|
||||
app.router.add_post('/api/open-example-images-folder', ExampleImagesRoutes.open_example_images_folder)
|
||||
app.router.add_get('/api/example-image-files', ExampleImagesRoutes.get_example_image_files)
|
||||
app.router.add_get('/api/has-example-images', ExampleImagesRoutes.has_example_images)
|
||||
app.router.add_post('/api/delete-example-image', ExampleImagesRoutes.delete_example_image)
|
||||
|
||||
@staticmethod
|
||||
async def download_example_images(request):
|
||||
"""Download example images for models from Civitai"""
|
||||
return await DownloadManager.start_download(request)
|
||||
|
||||
@staticmethod
|
||||
async def get_example_images_status(request):
|
||||
"""Get the current status of example images download"""
|
||||
return await DownloadManager.get_status(request)
|
||||
|
||||
@staticmethod
|
||||
async def pause_example_images(request):
|
||||
"""Pause the example images download"""
|
||||
return await DownloadManager.pause_download(request)
|
||||
|
||||
@staticmethod
|
||||
async def resume_example_images(request):
|
||||
"""Resume the example images download"""
|
||||
return await DownloadManager.resume_download(request)
|
||||
|
||||
@staticmethod
|
||||
async def open_example_images_folder(request):
|
||||
"""Open the example images folder for a specific model"""
|
||||
return await ExampleImagesFileManager.open_folder(request)
|
||||
|
||||
@staticmethod
|
||||
async def get_example_image_files(request):
|
||||
"""Get list of example image files for a specific model"""
|
||||
return await ExampleImagesFileManager.get_files(request)
|
||||
|
||||
@staticmethod
|
||||
async def import_example_images(request):
|
||||
"""Import local example images for a model"""
|
||||
return await ExampleImagesProcessor.import_images(request)
|
||||
|
||||
@staticmethod
|
||||
async def has_example_images(request):
|
||||
"""Check if example images folder exists and is not empty for a model"""
|
||||
return await ExampleImagesFileManager.has_images(request)
|
||||
|
||||
@staticmethod
|
||||
async def delete_example_image(request):
|
||||
"""Delete a custom example image for a model"""
|
||||
return await ExampleImagesProcessor.delete_custom_image(request)
|
||||
@@ -1,178 +1,380 @@
|
||||
import os
|
||||
from aiohttp import web
|
||||
import jinja2
|
||||
from typing import Dict, List
|
||||
import asyncio
|
||||
import logging
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from ..services.recipe_scanner import RecipeScanner
|
||||
from ..config import config
|
||||
from ..services.settings_manager import settings # Add this import
|
||||
from aiohttp import web
|
||||
from typing import Dict
|
||||
from server import PromptServer # type: ignore
|
||||
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.lora_service import LoraService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..utils.utils import get_lora_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
|
||||
|
||||
class LoraRoutes:
|
||||
"""Route handlers for LoRA management endpoints"""
|
||||
class LoraRoutes(BaseModelRoutes):
|
||||
"""LoRA-specific route controller"""
|
||||
|
||||
def __init__(self):
|
||||
self.scanner = LoraScanner()
|
||||
self.recipe_scanner = RecipeScanner(self.scanner)
|
||||
self.template_env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
|
||||
def format_lora_data(self, lora: Dict) -> Dict:
|
||||
"""Format LoRA data for template rendering"""
|
||||
return {
|
||||
"model_name": lora["model_name"],
|
||||
"file_name": lora["file_name"],
|
||||
"preview_url": config.get_preview_static_url(lora["preview_url"]),
|
||||
"preview_nsfw_level": lora.get("preview_nsfw_level", 0),
|
||||
"base_model": lora["base_model"],
|
||||
"folder": lora["folder"],
|
||||
"sha256": lora["sha256"],
|
||||
"file_path": lora["file_path"].replace(os.sep, "/"),
|
||||
"size": lora["size"],
|
||||
"tags": lora["tags"],
|
||||
"modelDescription": lora["modelDescription"],
|
||||
"usage_tips": lora["usage_tips"],
|
||||
"notes": lora["notes"],
|
||||
"modified": lora["modified"],
|
||||
"from_civitai": lora.get("from_civitai", True),
|
||||
"civitai": self._filter_civitai_data(lora.get("civitai", {}))
|
||||
}
|
||||
|
||||
def _filter_civitai_data(self, data: Dict) -> Dict:
|
||||
"""Filter relevant fields from CivitAI data"""
|
||||
if not data:
|
||||
return {}
|
||||
|
||||
fields = [
|
||||
"id", "modelId", "name", "createdAt", "updatedAt",
|
||||
"publishedAt", "trainedWords", "baseModel", "description",
|
||||
"model", "images"
|
||||
]
|
||||
return {k: data[k] for k in fields if k in data}
|
||||
|
||||
async def handle_loras_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /loras request"""
|
||||
try:
|
||||
# 检查缓存初始化状态,增强判断条件
|
||||
is_initializing = (
|
||||
self.scanner._cache is None or
|
||||
(self.scanner._initialization_task is not None and
|
||||
not self.scanner._initialization_task.done()) or
|
||||
(self.scanner._cache is not None and len(self.scanner._cache.raw_data) == 0 and
|
||||
self.scanner._initialization_task is not None)
|
||||
)
|
||||
|
||||
if is_initializing:
|
||||
# 如果正在初始化,返回一个只包含加载提示的页面
|
||||
template = self.template_env.get_template('loras.html')
|
||||
rendered = template.render(
|
||||
folders=[], # 空文件夹列表
|
||||
is_initializing=True, # 新增标志
|
||||
settings=settings, # Pass settings to template
|
||||
request=request # Pass the request object to the template
|
||||
)
|
||||
|
||||
logger.info("Loras page is initializing, returning loading page")
|
||||
else:
|
||||
# 正常流程 - 但不要等待缓存刷新
|
||||
try:
|
||||
cache = await self.scanner.get_cached_data(force_refresh=False)
|
||||
template = self.template_env.get_template('loras.html')
|
||||
rendered = template.render(
|
||||
folders=cache.folders,
|
||||
is_initializing=False,
|
||||
settings=settings, # Pass settings to template
|
||||
request=request # Pass the request object to the template
|
||||
)
|
||||
logger.info(f"Loras page loaded successfully with {len(cache.raw_data)} items")
|
||||
except Exception as cache_error:
|
||||
logger.error(f"Error loading cache data: {cache_error}")
|
||||
# 如果获取缓存失败,也显示初始化页面
|
||||
template = self.template_env.get_template('loras.html')
|
||||
rendered = template.render(
|
||||
folders=[],
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
logger.info("Cache error, returning initialization page")
|
||||
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
content_type='text/html'
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling loras request: {e}", exc_info=True)
|
||||
return web.Response(
|
||||
text="Error loading loras page",
|
||||
status=500
|
||||
)
|
||||
|
||||
async def handle_recipes_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /loras/recipes request"""
|
||||
try:
|
||||
# Check cache initialization status
|
||||
is_initializing = (
|
||||
self.recipe_scanner._cache is None and
|
||||
(self.recipe_scanner._initialization_task is not None and
|
||||
not self.recipe_scanner._initialization_task.done())
|
||||
)
|
||||
|
||||
if is_initializing:
|
||||
# If initializing, return a loading page
|
||||
template = self.template_env.get_template('recipes.html')
|
||||
rendered = template.render(
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request # Pass the request object to the template
|
||||
)
|
||||
else:
|
||||
# return empty recipes
|
||||
recipes_data = []
|
||||
|
||||
template = self.template_env.get_template('recipes.html')
|
||||
rendered = template.render(
|
||||
recipes=recipes_data,
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request # Pass the request object to the template
|
||||
)
|
||||
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
content_type='text/html'
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling recipes request: {e}", exc_info=True)
|
||||
return web.Response(
|
||||
text="Error loading recipes page",
|
||||
status=500
|
||||
)
|
||||
|
||||
def _format_recipe_file_url(self, file_path: str) -> str:
|
||||
"""Format file path for recipe image as a URL - same as in recipe_routes"""
|
||||
try:
|
||||
# Return the file URL directly for the first lora root's preview
|
||||
recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/')
|
||||
if file_path.replace(os.sep, '/').startswith(recipes_dir):
|
||||
relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/')
|
||||
return f"/loras_static/root1/preview/{relative_path}"
|
||||
|
||||
# If not in recipes dir, try to create a valid URL from the file path
|
||||
file_name = os.path.basename(file_path)
|
||||
return f"/loras_static/root1/preview/recipes/{file_name}"
|
||||
except Exception as e:
|
||||
logger.error(f"Error formatting recipe file URL: {e}", exc_info=True)
|
||||
return '/loras_static/images/no-preview.png' # Return default image on error
|
||||
|
||||
"""Initialize LoRA routes with LoRA service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "loras.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.service = LoraService(lora_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
|
||||
def setup_routes(self, app: web.Application):
|
||||
"""Register routes with the application"""
|
||||
app.router.add_get('/loras', self.handle_loras_page)
|
||||
app.router.add_get('/loras/recipes', self.handle_recipes_page)
|
||||
"""Setup LoRA routes"""
|
||||
# Schedule service initialization on app startup
|
||||
app.on_startup.append(lambda _: self.initialize_services())
|
||||
|
||||
# Setup common routes with 'loras' prefix (includes page route)
|
||||
super().setup_routes(app, 'loras')
|
||||
|
||||
def setup_specific_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup LoRA-specific routes"""
|
||||
# LoRA-specific query routes
|
||||
app.router.add_get(f'/api/{prefix}/letter-counts', self.get_letter_counts)
|
||||
app.router.add_get(f'/api/{prefix}/get-notes', self.get_lora_notes)
|
||||
app.router.add_get(f'/api/{prefix}/get-trigger-words', self.get_lora_trigger_words)
|
||||
app.router.add_get(f'/api/{prefix}/preview-url', self.get_lora_preview_url)
|
||||
app.router.add_get(f'/api/{prefix}/civitai-url', self.get_lora_civitai_url)
|
||||
app.router.add_get(f'/api/{prefix}/model-description', self.get_lora_model_description)
|
||||
|
||||
# CivitAI integration with LoRA-specific validation
|
||||
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_lora)
|
||||
app.router.add_get(f'/api/{prefix}/civitai/model/version/{{modelVersionId}}', self.get_civitai_model_by_version)
|
||||
app.router.add_get(f'/api/{prefix}/civitai/model/hash/{{hash}}', self.get_civitai_model_by_hash)
|
||||
|
||||
# ComfyUI integration
|
||||
app.router.add_post(f'/api/{prefix}/get_trigger_words', self.get_trigger_words)
|
||||
|
||||
def _parse_specific_params(self, request: web.Request) -> Dict:
|
||||
"""Parse LoRA-specific parameters"""
|
||||
params = {}
|
||||
|
||||
# LoRA-specific parameters
|
||||
if 'first_letter' in request.query:
|
||||
params['first_letter'] = request.query.get('first_letter')
|
||||
|
||||
# Handle fuzzy search parameter name variation
|
||||
if request.query.get('fuzzy') == 'true':
|
||||
params['fuzzy_search'] = True
|
||||
|
||||
# Handle additional filter parameters for LoRAs
|
||||
if 'lora_hash' in request.query:
|
||||
if not params.get('hash_filters'):
|
||||
params['hash_filters'] = {}
|
||||
params['hash_filters']['single_hash'] = request.query['lora_hash'].lower()
|
||||
elif 'lora_hashes' in request.query:
|
||||
if not params.get('hash_filters'):
|
||||
params['hash_filters'] = {}
|
||||
params['hash_filters']['multiple_hashes'] = [h.lower() for h in request.query['lora_hashes'].split(',')]
|
||||
|
||||
return params
|
||||
|
||||
# LoRA-specific route handlers
|
||||
async def get_letter_counts(self, request: web.Request) -> web.Response:
|
||||
"""Get count of LoRAs for each letter of the alphabet"""
|
||||
try:
|
||||
letter_counts = await self.service.get_letter_counts()
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'letter_counts': letter_counts
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting letter counts: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_notes(self, request: web.Request) -> web.Response:
|
||||
"""Get notes for a specific LoRA file"""
|
||||
try:
|
||||
lora_name = request.query.get('name')
|
||||
if not lora_name:
|
||||
return web.Response(text='Lora file name is required', status=400)
|
||||
|
||||
notes = await self.service.get_lora_notes(lora_name)
|
||||
if notes is not None:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'notes': notes
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'LoRA not found in cache'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting lora notes: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_trigger_words(self, request: web.Request) -> web.Response:
|
||||
"""Get trigger words for a specific LoRA file"""
|
||||
try:
|
||||
lora_name = request.query.get('name')
|
||||
if not lora_name:
|
||||
return web.Response(text='Lora file name is required', status=400)
|
||||
|
||||
trigger_words = await self.service.get_lora_trigger_words(lora_name)
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'trigger_words': trigger_words
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting lora trigger words: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_preview_url(self, request: web.Request) -> web.Response:
|
||||
"""Get the static preview URL for a LoRA file"""
|
||||
try:
|
||||
lora_name = request.query.get('name')
|
||||
if not lora_name:
|
||||
return web.Response(text='Lora file name is required', status=400)
|
||||
|
||||
preview_url = await self.service.get_lora_preview_url(lora_name)
|
||||
if preview_url:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'preview_url': preview_url
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No preview URL found for the specified lora'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting lora preview URL: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_civitai_url(self, request: web.Request) -> web.Response:
|
||||
"""Get the Civitai URL for a LoRA file"""
|
||||
try:
|
||||
lora_name = request.query.get('name')
|
||||
if not lora_name:
|
||||
return web.Response(text='Lora file name is required', status=400)
|
||||
|
||||
result = await self.service.get_lora_civitai_url(lora_name)
|
||||
if result['civitai_url']:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
**result
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No Civitai data found for the specified lora'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting lora Civitai URL: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
# CivitAI integration methods
|
||||
async def get_civitai_versions_lora(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai LoRA model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be LORA, LoCon, or DORA
|
||||
from ..utils.constants import VALID_LORA_TYPES
|
||||
if model_type.lower() not in VALID_LORA_TYPES:
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected LORA or LoCon, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the model file (type="Model") in the files list
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model'), None)
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.service.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.service.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching LoRA model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def get_civitai_model_by_version(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by model version ID"""
|
||||
try:
|
||||
model_version_id = request.match_info.get('modelVersionId')
|
||||
|
||||
# Get model details from Civitai API
|
||||
model, error_msg = await self.civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
if not model:
|
||||
# Log warning for failed model retrieval
|
||||
logger.warning(f"Failed to fetch model version {model_version_id}: {error_msg}")
|
||||
|
||||
# Determine status code based on error message
|
||||
status_code = 404 if error_msg and "not found" in error_msg.lower() else 500
|
||||
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": error_msg or "Failed to fetch model information"
|
||||
}, status=status_code)
|
||||
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_civitai_model_by_hash(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by hash"""
|
||||
try:
|
||||
hash = request.match_info.get('hash')
|
||||
model = await self.civitai_client.get_model_by_hash(hash)
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details by hash: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_model_description(self, request: web.Request) -> web.Response:
|
||||
"""Get model description for a Lora model"""
|
||||
try:
|
||||
# Get parameters
|
||||
model_id = request.query.get('model_id')
|
||||
file_path = request.query.get('file_path')
|
||||
|
||||
if not model_id:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Model ID is required'
|
||||
}, status=400)
|
||||
|
||||
# Check if we already have the description stored in metadata
|
||||
description = None
|
||||
tags = []
|
||||
creator = {}
|
||||
if file_path:
|
||||
import os
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
description = metadata.get('modelDescription')
|
||||
tags = metadata.get('tags', [])
|
||||
creator = metadata.get('creator', {})
|
||||
|
||||
# If description is not in metadata, fetch from CivitAI
|
||||
if not description:
|
||||
logger.info(f"Fetching model metadata for model ID: {model_id}")
|
||||
model_metadata, _ = await self.civitai_client.get_model_metadata(model_id)
|
||||
|
||||
if model_metadata:
|
||||
description = model_metadata.get('description')
|
||||
tags = model_metadata.get('tags', [])
|
||||
creator = model_metadata.get('creator', {})
|
||||
|
||||
# Save the metadata to file if we have a file path and got metadata
|
||||
if file_path:
|
||||
try:
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
metadata['modelDescription'] = description
|
||||
metadata['tags'] = tags
|
||||
# Ensure the civitai dict exists
|
||||
if 'civitai' not in metadata:
|
||||
metadata['civitai'] = {}
|
||||
# Store creator in the civitai nested structure
|
||||
metadata['civitai']['creator'] = creator
|
||||
|
||||
await MetadataManager.save_metadata(file_path, metadata, True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving model metadata: {e}")
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'description': description or "<p>No model description available.</p>",
|
||||
'tags': tags,
|
||||
'creator': creator
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting model metadata: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_trigger_words(self, request: web.Request) -> web.Response:
|
||||
"""Get trigger words for specified LoRA models"""
|
||||
try:
|
||||
json_data = await request.json()
|
||||
lora_names = json_data.get("lora_names", [])
|
||||
node_ids = json_data.get("node_ids", [])
|
||||
|
||||
all_trigger_words = []
|
||||
for lora_name in lora_names:
|
||||
_, trigger_words = get_lora_info(lora_name)
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
# Format the trigger words
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
# Send update to all connected trigger word toggle nodes
|
||||
for node_id in node_ids:
|
||||
PromptServer.instance.send_sync("trigger_word_update", {
|
||||
"id": node_id,
|
||||
"message": trigger_words_text
|
||||
})
|
||||
|
||||
return web.json_response({"success": True})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting trigger words: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
710
py/routes/misc_routes.py
Normal file
710
py/routes/misc_routes.py
Normal file
@@ -0,0 +1,710 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import asyncio
|
||||
from server import PromptServer # type: ignore
|
||||
from aiohttp import web
|
||||
from ..services.settings_manager import settings
|
||||
from ..utils.usage_stats import UsageStats
|
||||
from ..utils.lora_metadata import extract_trained_words
|
||||
from ..config import config
|
||||
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS, NODE_TYPES, DEFAULT_NODE_COLOR
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
import re
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
|
||||
# Node registry for tracking active workflow nodes
|
||||
class NodeRegistry:
|
||||
"""Thread-safe registry for tracking Lora nodes in active workflows"""
|
||||
|
||||
def __init__(self):
|
||||
self._lock = threading.RLock()
|
||||
self._nodes = {} # node_id -> node_info
|
||||
self._registry_updated = threading.Event()
|
||||
|
||||
def register_nodes(self, nodes):
|
||||
"""Register multiple nodes at once, replacing existing registry"""
|
||||
with self._lock:
|
||||
# Clear existing registry
|
||||
self._nodes.clear()
|
||||
|
||||
# Register all new nodes
|
||||
for node in nodes:
|
||||
node_id = node['node_id']
|
||||
node_type = node.get('type', '')
|
||||
|
||||
# Convert node type name to integer
|
||||
type_id = NODE_TYPES.get(node_type, 0) # 0 for unknown types
|
||||
|
||||
# Handle null bgcolor with default color
|
||||
bgcolor = node.get('bgcolor')
|
||||
if bgcolor is None:
|
||||
bgcolor = DEFAULT_NODE_COLOR
|
||||
|
||||
self._nodes[node_id] = {
|
||||
'id': node_id,
|
||||
'bgcolor': bgcolor,
|
||||
'title': node.get('title'),
|
||||
'type': type_id,
|
||||
'type_name': node_type
|
||||
}
|
||||
|
||||
logger.debug(f"Registered {len(nodes)} nodes in registry")
|
||||
|
||||
# Signal that registry has been updated
|
||||
self._registry_updated.set()
|
||||
|
||||
def get_registry(self):
|
||||
"""Get current registry information"""
|
||||
with self._lock:
|
||||
return {
|
||||
'nodes': dict(self._nodes), # Return a copy
|
||||
'node_count': len(self._nodes)
|
||||
}
|
||||
|
||||
def clear_registry(self):
|
||||
"""Clear the entire registry"""
|
||||
with self._lock:
|
||||
self._nodes.clear()
|
||||
logger.info("Node registry cleared")
|
||||
|
||||
def wait_for_update(self, timeout=1.0):
|
||||
"""Wait for registry update with timeout"""
|
||||
self._registry_updated.clear()
|
||||
return self._registry_updated.wait(timeout)
|
||||
|
||||
# Global registry instance
|
||||
node_registry = NodeRegistry()
|
||||
|
||||
class MiscRoutes:
|
||||
"""Miscellaneous routes for various utility functions"""
|
||||
|
||||
@staticmethod
|
||||
def setup_routes(app):
|
||||
"""Register miscellaneous routes"""
|
||||
app.router.add_post('/api/settings', MiscRoutes.update_settings)
|
||||
|
||||
# Add new route for clearing cache
|
||||
app.router.add_post('/api/clear-cache', MiscRoutes.clear_cache)
|
||||
|
||||
app.router.add_get('/api/health-check', lambda request: web.json_response({'status': 'ok'}))
|
||||
|
||||
# Usage stats routes
|
||||
app.router.add_post('/api/update-usage-stats', MiscRoutes.update_usage_stats)
|
||||
app.router.add_get('/api/get-usage-stats', MiscRoutes.get_usage_stats)
|
||||
|
||||
# Lora code update endpoint
|
||||
app.router.add_post('/api/update-lora-code', MiscRoutes.update_lora_code)
|
||||
|
||||
# Add new route for getting trained words
|
||||
app.router.add_get('/api/trained-words', MiscRoutes.get_trained_words)
|
||||
|
||||
# Add new route for getting model example files
|
||||
app.router.add_get('/api/model-example-files', MiscRoutes.get_model_example_files)
|
||||
|
||||
# Node registry endpoints
|
||||
app.router.add_post('/api/register-nodes', MiscRoutes.register_nodes)
|
||||
app.router.add_get('/api/get-registry', MiscRoutes.get_registry)
|
||||
|
||||
# Add new route for checking if a model exists in the library
|
||||
app.router.add_get('/api/check-model-exists', MiscRoutes.check_model_exists)
|
||||
|
||||
@staticmethod
|
||||
async def clear_cache(request):
|
||||
"""Clear all cache files from the cache folder"""
|
||||
try:
|
||||
# Get the cache folder path (relative to project directory)
|
||||
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
cache_folder = os.path.join(project_dir, 'cache')
|
||||
|
||||
# Check if cache folder exists
|
||||
if not os.path.exists(cache_folder):
|
||||
logger.info("Cache folder does not exist, nothing to clear")
|
||||
return web.json_response({'success': True, 'message': 'No cache folder found'})
|
||||
|
||||
# Get list of cache files before deleting for reporting
|
||||
cache_files = [f for f in os.listdir(cache_folder) if os.path.isfile(os.path.join(cache_folder, f))]
|
||||
deleted_files = []
|
||||
|
||||
# Delete each .msgpack file in the cache folder
|
||||
for filename in cache_files:
|
||||
if filename.endswith('.msgpack'):
|
||||
file_path = os.path.join(cache_folder, filename)
|
||||
try:
|
||||
os.remove(file_path)
|
||||
deleted_files.append(filename)
|
||||
logger.info(f"Deleted cache file: {filename}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete {filename}: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Failed to delete {filename}: {str(e)}"
|
||||
}, status=500)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f"Successfully cleared {len(deleted_files)} cache files",
|
||||
'deleted_files': deleted_files
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing cache files: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def update_settings(request):
|
||||
"""Update application settings"""
|
||||
try:
|
||||
data = await request.json()
|
||||
|
||||
# Validate and update settings
|
||||
for key, value in data.items():
|
||||
if value == settings.get(key):
|
||||
# No change, skip
|
||||
continue
|
||||
# Special handling for example_images_path - verify path exists
|
||||
if key == 'example_images_path' and value:
|
||||
if not os.path.exists(value):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Path does not exist: {value}"
|
||||
})
|
||||
|
||||
# Path changed - server restart required for new path to take effect
|
||||
old_path = settings.get('example_images_path')
|
||||
if old_path != value:
|
||||
logger.info(f"Example images path changed to {value} - server restart required")
|
||||
|
||||
# Special handling for base_model_path_mappings - parse JSON string
|
||||
if key == 'base_model_path_mappings' and value:
|
||||
try:
|
||||
value = json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Invalid JSON format for base_model_path_mappings: {value}"
|
||||
})
|
||||
|
||||
# Save to settings
|
||||
settings.set(key, value)
|
||||
|
||||
return web.json_response({'success': True})
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating settings: {e}", exc_info=True)
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
@staticmethod
|
||||
async def update_usage_stats(request):
|
||||
"""
|
||||
Update usage statistics based on a prompt_id
|
||||
|
||||
Expects a JSON body with:
|
||||
{
|
||||
"prompt_id": "string"
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# Parse the request body
|
||||
data = await request.json()
|
||||
prompt_id = data.get('prompt_id')
|
||||
|
||||
if not prompt_id:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing prompt_id'
|
||||
}, status=400)
|
||||
|
||||
# Call the UsageStats to process this prompt_id synchronously
|
||||
usage_stats = UsageStats()
|
||||
await usage_stats.process_execution(prompt_id)
|
||||
|
||||
return web.json_response({
|
||||
'success': True
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update usage stats: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_usage_stats(request):
|
||||
"""Get current usage statistics"""
|
||||
try:
|
||||
usage_stats = UsageStats()
|
||||
stats = await usage_stats.get_stats()
|
||||
|
||||
# Add version information to help clients handle format changes
|
||||
stats_response = {
|
||||
'success': True,
|
||||
'data': stats,
|
||||
'format_version': 2 # Indicate this is the new format with history
|
||||
}
|
||||
|
||||
return web.json_response(stats_response)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get usage stats: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def update_lora_code(request):
|
||||
"""
|
||||
Update Lora code in ComfyUI nodes
|
||||
|
||||
Expects a JSON body with:
|
||||
{
|
||||
"node_ids": [123, 456], # Optional - List of node IDs to update (for browser mode)
|
||||
"lora_code": "<lora:modelname:1.0>", # The Lora code to send
|
||||
"mode": "append" # or "replace" - whether to append or replace existing code
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# Parse the request body
|
||||
data = await request.json()
|
||||
node_ids = data.get('node_ids')
|
||||
lora_code = data.get('lora_code', '')
|
||||
mode = data.get('mode', 'append')
|
||||
|
||||
if not lora_code:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing lora_code parameter'
|
||||
}, status=400)
|
||||
|
||||
results = []
|
||||
|
||||
# Desktop mode: no specific node_ids provided
|
||||
if node_ids is None:
|
||||
try:
|
||||
# Send broadcast message with id=-1 to all Lora Loader nodes
|
||||
PromptServer.instance.send_sync("lora_code_update", {
|
||||
"id": -1,
|
||||
"lora_code": lora_code,
|
||||
"mode": mode
|
||||
})
|
||||
results.append({
|
||||
'node_id': 'broadcast',
|
||||
'success': True
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error broadcasting lora code: {e}")
|
||||
results.append({
|
||||
'node_id': 'broadcast',
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
})
|
||||
else:
|
||||
# Browser mode: send to specific nodes
|
||||
for node_id in node_ids:
|
||||
try:
|
||||
# Send the message to the frontend
|
||||
PromptServer.instance.send_sync("lora_code_update", {
|
||||
"id": node_id,
|
||||
"lora_code": lora_code,
|
||||
"mode": mode
|
||||
})
|
||||
results.append({
|
||||
'node_id': node_id,
|
||||
'success': True
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending lora code to node {node_id}: {e}")
|
||||
results.append({
|
||||
'node_id': node_id,
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'results': results
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update lora code: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_trained_words(request):
|
||||
"""
|
||||
Get trained words from a safetensors file, sorted by frequency
|
||||
|
||||
Expects a query parameter:
|
||||
file_path: Path to the safetensors file
|
||||
"""
|
||||
try:
|
||||
# Get file path from query parameters
|
||||
file_path = request.query.get('file_path')
|
||||
|
||||
if not file_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing file_path parameter'
|
||||
}, status=400)
|
||||
|
||||
# Check if file exists and is a safetensors file
|
||||
if not os.path.exists(file_path):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"File not found: {file_path}"
|
||||
}, status=404)
|
||||
|
||||
if not file_path.lower().endswith('.safetensors'):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'File is not a safetensors file'
|
||||
}, status=400)
|
||||
|
||||
# Extract trained words and class_tokens
|
||||
trained_words, class_tokens = await extract_trained_words(file_path)
|
||||
|
||||
# Return result with both trained words and class tokens
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'trained_words': trained_words,
|
||||
'class_tokens': class_tokens
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get trained words: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_model_example_files(request):
|
||||
"""
|
||||
Get list of example image files for a specific model based on file path
|
||||
|
||||
Expects:
|
||||
- file_path in query parameters
|
||||
|
||||
Returns:
|
||||
- List of image files with their paths as static URLs
|
||||
"""
|
||||
try:
|
||||
# Get the model file path from query parameters
|
||||
file_path = request.query.get('file_path')
|
||||
|
||||
if not file_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing file_path parameter'
|
||||
}, status=400)
|
||||
|
||||
# Extract directory and base filename
|
||||
model_dir = os.path.dirname(file_path)
|
||||
model_filename = os.path.basename(file_path)
|
||||
model_name = os.path.splitext(model_filename)[0]
|
||||
|
||||
# Check if the directory exists
|
||||
if not os.path.exists(model_dir):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Model directory not found',
|
||||
'files': []
|
||||
}, status=404)
|
||||
|
||||
# Look for files matching the pattern modelname.example.<index>.<ext>
|
||||
files = []
|
||||
pattern = f"{model_name}.example."
|
||||
|
||||
for file in os.listdir(model_dir):
|
||||
file_lower = file.lower()
|
||||
if file_lower.startswith(pattern.lower()):
|
||||
file_full_path = os.path.join(model_dir, file)
|
||||
if os.path.isfile(file_full_path):
|
||||
# Check if the file is a supported media file
|
||||
file_ext = os.path.splitext(file)[1].lower()
|
||||
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
|
||||
|
||||
# Extract the index from the filename
|
||||
try:
|
||||
# Extract the part after '.example.' and before file extension
|
||||
index_part = file[len(pattern):].split('.')[0]
|
||||
# Try to parse it as an integer
|
||||
index = int(index_part)
|
||||
except (ValueError, IndexError):
|
||||
# If we can't parse the index, use infinity to sort at the end
|
||||
index = float('inf')
|
||||
|
||||
# Convert file path to static URL
|
||||
static_url = config.get_preview_static_url(file_full_path)
|
||||
|
||||
files.append({
|
||||
'name': file,
|
||||
'path': static_url,
|
||||
'extension': file_ext,
|
||||
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'],
|
||||
'index': index
|
||||
})
|
||||
|
||||
# Sort files by their index for consistent ordering
|
||||
files.sort(key=lambda x: x['index'])
|
||||
# Remove the index field as it's only used for sorting
|
||||
for file in files:
|
||||
file.pop('index', None)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'files': files
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get model example files: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def register_nodes(request):
|
||||
"""
|
||||
Register multiple Lora nodes at once
|
||||
|
||||
Expects a JSON body with:
|
||||
{
|
||||
"nodes": [
|
||||
{
|
||||
"node_id": 123,
|
||||
"bgcolor": "#535",
|
||||
"title": "Lora Loader (LoraManager)"
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
|
||||
# Validate required fields
|
||||
nodes = data.get('nodes', [])
|
||||
|
||||
if not isinstance(nodes, list):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'nodes must be a list'
|
||||
}, status=400)
|
||||
|
||||
# Validate each node
|
||||
for i, node in enumerate(nodes):
|
||||
if not isinstance(node, dict):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'Node {i} must be an object'
|
||||
}, status=400)
|
||||
|
||||
node_id = node.get('node_id')
|
||||
if node_id is None:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'Node {i} missing node_id parameter'
|
||||
}, status=400)
|
||||
|
||||
# Validate node_id is an integer
|
||||
try:
|
||||
node['node_id'] = int(node_id)
|
||||
except (ValueError, TypeError):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'Node {i} node_id must be an integer'
|
||||
}, status=400)
|
||||
|
||||
# Register all nodes
|
||||
node_registry.register_nodes(nodes)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'{len(nodes)} nodes registered successfully'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to register nodes: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_registry(request):
|
||||
"""Get current node registry information by refreshing from frontend"""
|
||||
try:
|
||||
# Check if running in standalone mode
|
||||
if standalone_mode:
|
||||
logger.warning("Registry refresh not available in standalone mode")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Standalone Mode Active',
|
||||
'message': 'Cannot interact with ComfyUI in standalone mode.'
|
||||
}, status=503)
|
||||
|
||||
# Send message to frontend to refresh registry
|
||||
try:
|
||||
PromptServer.instance.send_sync("lora_registry_refresh", {})
|
||||
logger.debug("Sent registry refresh request to frontend")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send registry refresh message: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Communication Error',
|
||||
'message': f'Failed to communicate with ComfyUI frontend: {str(e)}'
|
||||
}, status=500)
|
||||
|
||||
# Wait for registry update with timeout
|
||||
def wait_for_registry():
|
||||
return node_registry.wait_for_update(timeout=1.0)
|
||||
|
||||
# Run the wait in a thread to avoid blocking the event loop
|
||||
loop = asyncio.get_event_loop()
|
||||
registry_updated = await loop.run_in_executor(None, wait_for_registry)
|
||||
|
||||
if not registry_updated:
|
||||
logger.warning("Registry refresh timeout after 1 second")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Timeout Error',
|
||||
'message': 'Registry refresh timeout - ComfyUI frontend may not be responsive'
|
||||
}, status=408)
|
||||
|
||||
# Get updated registry
|
||||
registry_info = node_registry.get_registry()
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'data': registry_info
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get registry: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Internal Error',
|
||||
'message': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def check_model_exists(request):
|
||||
"""
|
||||
Check if a model with specified modelId and optionally modelVersionId exists in the library
|
||||
|
||||
Expects query parameters:
|
||||
- modelId: int - Civitai model ID (required)
|
||||
- modelVersionId: int - Civitai model version ID (optional)
|
||||
|
||||
Returns:
|
||||
- If modelVersionId is provided: JSON with a boolean 'exists' field
|
||||
- If modelVersionId is not provided: JSON with a list of modelVersionIds that exist in the library
|
||||
"""
|
||||
try:
|
||||
# Get the modelId and modelVersionId from query parameters
|
||||
model_id_str = request.query.get('modelId')
|
||||
model_version_id_str = request.query.get('modelVersionId')
|
||||
|
||||
# Validate modelId parameter (required)
|
||||
if not model_id_str:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing required parameter: modelId'
|
||||
}, status=400)
|
||||
|
||||
try:
|
||||
# Convert modelId to integer
|
||||
model_id = int(model_id_str)
|
||||
except ValueError:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Parameter modelId must be an integer'
|
||||
}, status=400)
|
||||
|
||||
# Get all scanners
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
|
||||
# If modelVersionId is provided, check for specific version
|
||||
if model_version_id_str:
|
||||
try:
|
||||
model_version_id = int(model_version_id_str)
|
||||
except ValueError:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Parameter modelVersionId must be an integer'
|
||||
}, status=400)
|
||||
|
||||
# Check lora scanner first
|
||||
exists = False
|
||||
model_type = None
|
||||
|
||||
if await lora_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
exists = True
|
||||
model_type = 'lora'
|
||||
elif checkpoint_scanner and await checkpoint_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
exists = True
|
||||
model_type = 'checkpoint'
|
||||
elif embedding_scanner and await embedding_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
exists = True
|
||||
model_type = 'embedding'
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'exists': exists,
|
||||
'modelType': model_type if exists else None
|
||||
})
|
||||
|
||||
# If modelVersionId is not provided, return all version IDs for the model
|
||||
else:
|
||||
lora_versions = await lora_scanner.get_model_versions_by_id(model_id)
|
||||
checkpoint_versions = []
|
||||
embedding_versions = []
|
||||
|
||||
# 优先lora,其次checkpoint,最后embedding
|
||||
if not lora_versions:
|
||||
checkpoint_versions = await checkpoint_scanner.get_model_versions_by_id(model_id)
|
||||
if not lora_versions and not checkpoint_versions:
|
||||
embedding_versions = await embedding_scanner.get_model_versions_by_id(model_id)
|
||||
|
||||
model_type = None
|
||||
versions = []
|
||||
|
||||
if lora_versions:
|
||||
model_type = 'lora'
|
||||
versions = lora_versions
|
||||
elif checkpoint_versions:
|
||||
model_type = 'checkpoint'
|
||||
versions = checkpoint_versions
|
||||
elif embedding_versions:
|
||||
model_type = 'embedding'
|
||||
versions = embedding_versions
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'modelId': model_id,
|
||||
'modelType': model_type,
|
||||
'versions': versions
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check model existence: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
File diff suppressed because it is too large
Load Diff
500
py/routes/stats_routes.py
Normal file
500
py/routes/stats_routes.py
Normal file
@@ -0,0 +1,500 @@
|
||||
import os
|
||||
import json
|
||||
import jinja2
|
||||
from aiohttp import web
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from collections import defaultdict, Counter
|
||||
from typing import Dict, List, Any
|
||||
|
||||
from ..config import config
|
||||
from ..services.settings_manager import settings
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.usage_stats import UsageStats
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class StatsRoutes:
|
||||
"""Route handlers for Statistics page and API endpoints"""
|
||||
|
||||
def __init__(self):
|
||||
self.lora_scanner = None
|
||||
self.checkpoint_scanner = None
|
||||
self.embedding_scanner = None
|
||||
self.usage_stats = None
|
||||
self.template_env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
|
||||
async def init_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
self.lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
self.embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
self.usage_stats = UsageStats()
|
||||
|
||||
async def handle_stats_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /statistics request"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Check if scanners are initializing
|
||||
lora_initializing = (
|
||||
self.lora_scanner._cache is None or
|
||||
(hasattr(self.lora_scanner, 'is_initializing') and self.lora_scanner.is_initializing())
|
||||
)
|
||||
|
||||
checkpoint_initializing = (
|
||||
self.checkpoint_scanner._cache is None or
|
||||
(hasattr(self.checkpoint_scanner, '_is_initializing') and self.checkpoint_scanner._is_initializing)
|
||||
)
|
||||
|
||||
embedding_initializing = (
|
||||
self.embedding_scanner._cache is None or
|
||||
(hasattr(self.embedding_scanner, 'is_initializing') and self.embedding_scanner.is_initializing())
|
||||
)
|
||||
|
||||
is_initializing = lora_initializing or checkpoint_initializing or embedding_initializing
|
||||
|
||||
template = self.template_env.get_template('statistics.html')
|
||||
rendered = template.render(
|
||||
is_initializing=is_initializing,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
content_type='text/html'
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling statistics request: {e}", exc_info=True)
|
||||
return web.Response(
|
||||
text="Error loading statistics page",
|
||||
status=500
|
||||
)
|
||||
|
||||
async def get_collection_overview(self, request: web.Request) -> web.Response:
|
||||
"""Get collection overview statistics"""
|
||||
try:
|
||||
await self.init_services()
|
||||
|
||||
# Get LoRA statistics
|
||||
lora_cache = await self.lora_scanner.get_cached_data()
|
||||
lora_count = len(lora_cache.raw_data)
|
||||
lora_size = sum(lora.get('size', 0) for lora in lora_cache.raw_data)
|
||||
|
||||
# Get Checkpoint statistics
|
||||
checkpoint_cache = await self.checkpoint_scanner.get_cached_data()
|
||||
checkpoint_count = len(checkpoint_cache.raw_data)
|
||||
checkpoint_size = sum(cp.get('size', 0) for cp in checkpoint_cache.raw_data)
|
||||
|
||||
# Get Embedding statistics
|
||||
embedding_cache = await self.embedding_scanner.get_cached_data()
|
||||
embedding_count = len(embedding_cache.raw_data)
|
||||
embedding_size = sum(emb.get('size', 0) for emb in embedding_cache.raw_data)
|
||||
|
||||
# Get usage statistics
|
||||
usage_data = await self.usage_stats.get_stats()
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'data': {
|
||||
'total_models': lora_count + checkpoint_count + embedding_count,
|
||||
'lora_count': lora_count,
|
||||
'checkpoint_count': checkpoint_count,
|
||||
'embedding_count': embedding_count,
|
||||
'total_size': lora_size + checkpoint_size + embedding_size,
|
||||
'lora_size': lora_size,
|
||||
'checkpoint_size': checkpoint_size,
|
||||
'embedding_size': embedding_size,
|
||||
'total_generations': usage_data.get('total_executions', 0),
|
||||
'unused_loras': self._count_unused_models(lora_cache.raw_data, usage_data.get('loras', {})),
|
||||
'unused_checkpoints': self._count_unused_models(checkpoint_cache.raw_data, usage_data.get('checkpoints', {})),
|
||||
'unused_embeddings': self._count_unused_models(embedding_cache.raw_data, usage_data.get('embeddings', {}))
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting collection overview: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_usage_analytics(self, request: web.Request) -> web.Response:
|
||||
"""Get usage analytics data"""
|
||||
try:
|
||||
await self.init_services()
|
||||
|
||||
# Get usage statistics
|
||||
usage_data = await self.usage_stats.get_stats()
|
||||
|
||||
# Get model data for enrichment
|
||||
lora_cache = await self.lora_scanner.get_cached_data()
|
||||
checkpoint_cache = await self.checkpoint_scanner.get_cached_data()
|
||||
embedding_cache = await self.embedding_scanner.get_cached_data()
|
||||
|
||||
# Create hash to model mapping
|
||||
lora_map = {lora['sha256']: lora for lora in lora_cache.raw_data}
|
||||
checkpoint_map = {cp['sha256']: cp for cp in checkpoint_cache.raw_data}
|
||||
embedding_map = {emb['sha256']: emb for emb in embedding_cache.raw_data}
|
||||
|
||||
# Prepare top used models
|
||||
top_loras = self._get_top_used_models(usage_data.get('loras', {}), lora_map, 10)
|
||||
top_checkpoints = self._get_top_used_models(usage_data.get('checkpoints', {}), checkpoint_map, 10)
|
||||
top_embeddings = self._get_top_used_models(usage_data.get('embeddings', {}), embedding_map, 10)
|
||||
|
||||
# Prepare usage timeline (last 30 days)
|
||||
timeline = self._get_usage_timeline(usage_data, 30)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'data': {
|
||||
'top_loras': top_loras,
|
||||
'top_checkpoints': top_checkpoints,
|
||||
'top_embeddings': top_embeddings,
|
||||
'usage_timeline': timeline,
|
||||
'total_executions': usage_data.get('total_executions', 0)
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting usage analytics: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_base_model_distribution(self, request: web.Request) -> web.Response:
|
||||
"""Get base model distribution statistics"""
|
||||
try:
|
||||
await self.init_services()
|
||||
|
||||
# Get model data
|
||||
lora_cache = await self.lora_scanner.get_cached_data()
|
||||
checkpoint_cache = await self.checkpoint_scanner.get_cached_data()
|
||||
embedding_cache = await self.embedding_scanner.get_cached_data()
|
||||
|
||||
# Count by base model
|
||||
lora_base_models = Counter(lora.get('base_model', 'Unknown') for lora in lora_cache.raw_data)
|
||||
checkpoint_base_models = Counter(cp.get('base_model', 'Unknown') for cp in checkpoint_cache.raw_data)
|
||||
embedding_base_models = Counter(emb.get('base_model', 'Unknown') for emb in embedding_cache.raw_data)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'data': {
|
||||
'loras': dict(lora_base_models),
|
||||
'checkpoints': dict(checkpoint_base_models),
|
||||
'embeddings': dict(embedding_base_models)
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting base model distribution: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_tag_analytics(self, request: web.Request) -> web.Response:
|
||||
"""Get tag usage analytics"""
|
||||
try:
|
||||
await self.init_services()
|
||||
|
||||
# Get model data
|
||||
lora_cache = await self.lora_scanner.get_cached_data()
|
||||
checkpoint_cache = await self.checkpoint_scanner.get_cached_data()
|
||||
embedding_cache = await self.embedding_scanner.get_cached_data()
|
||||
|
||||
# Count tag frequencies
|
||||
all_tags = []
|
||||
for lora in lora_cache.raw_data:
|
||||
all_tags.extend(lora.get('tags', []))
|
||||
for cp in checkpoint_cache.raw_data:
|
||||
all_tags.extend(cp.get('tags', []))
|
||||
for emb in embedding_cache.raw_data:
|
||||
all_tags.extend(emb.get('tags', []))
|
||||
|
||||
tag_counts = Counter(all_tags)
|
||||
|
||||
# Get top 50 tags
|
||||
top_tags = [{'tag': tag, 'count': count} for tag, count in tag_counts.most_common(50)]
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'data': {
|
||||
'top_tags': top_tags,
|
||||
'total_unique_tags': len(tag_counts)
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting tag analytics: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_storage_analytics(self, request: web.Request) -> web.Response:
|
||||
"""Get storage usage analytics"""
|
||||
try:
|
||||
await self.init_services()
|
||||
|
||||
# Get usage statistics
|
||||
usage_data = await self.usage_stats.get_stats()
|
||||
|
||||
# Get model data
|
||||
lora_cache = await self.lora_scanner.get_cached_data()
|
||||
checkpoint_cache = await self.checkpoint_scanner.get_cached_data()
|
||||
embedding_cache = await self.embedding_scanner.get_cached_data()
|
||||
|
||||
# Create models with usage data
|
||||
lora_storage = []
|
||||
for lora in lora_cache.raw_data:
|
||||
usage_count = 0
|
||||
if lora['sha256'] in usage_data.get('loras', {}):
|
||||
usage_count = usage_data['loras'][lora['sha256']].get('total', 0)
|
||||
|
||||
lora_storage.append({
|
||||
'name': lora['model_name'],
|
||||
'size': lora.get('size', 0),
|
||||
'usage_count': usage_count,
|
||||
'folder': lora.get('folder', ''),
|
||||
'base_model': lora.get('base_model', 'Unknown')
|
||||
})
|
||||
|
||||
checkpoint_storage = []
|
||||
for cp in checkpoint_cache.raw_data:
|
||||
usage_count = 0
|
||||
if cp['sha256'] in usage_data.get('checkpoints', {}):
|
||||
usage_count = usage_data['checkpoints'][cp['sha256']].get('total', 0)
|
||||
|
||||
checkpoint_storage.append({
|
||||
'name': cp['model_name'],
|
||||
'size': cp.get('size', 0),
|
||||
'usage_count': usage_count,
|
||||
'folder': cp.get('folder', ''),
|
||||
'base_model': cp.get('base_model', 'Unknown')
|
||||
})
|
||||
|
||||
embedding_storage = []
|
||||
for emb in embedding_cache.raw_data:
|
||||
usage_count = 0
|
||||
if emb['sha256'] in usage_data.get('embeddings', {}):
|
||||
usage_count = usage_data['embeddings'][emb['sha256']].get('total', 0)
|
||||
|
||||
embedding_storage.append({
|
||||
'name': emb['model_name'],
|
||||
'size': emb.get('size', 0),
|
||||
'usage_count': usage_count,
|
||||
'folder': emb.get('folder', ''),
|
||||
'base_model': emb.get('base_model', 'Unknown')
|
||||
})
|
||||
|
||||
# Sort by size
|
||||
lora_storage.sort(key=lambda x: x['size'], reverse=True)
|
||||
checkpoint_storage.sort(key=lambda x: x['size'], reverse=True)
|
||||
embedding_storage.sort(key=lambda x: x['size'], reverse=True)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'data': {
|
||||
'loras': lora_storage[:20], # Top 20 by size
|
||||
'checkpoints': checkpoint_storage[:20],
|
||||
'embeddings': embedding_storage[:20]
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting storage analytics: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_insights(self, request: web.Request) -> web.Response:
|
||||
"""Get smart insights about the collection"""
|
||||
try:
|
||||
await self.init_services()
|
||||
|
||||
# Get usage statistics
|
||||
usage_data = await self.usage_stats.get_stats()
|
||||
|
||||
# Get model data
|
||||
lora_cache = await self.lora_scanner.get_cached_data()
|
||||
checkpoint_cache = await self.checkpoint_scanner.get_cached_data()
|
||||
embedding_cache = await self.embedding_scanner.get_cached_data()
|
||||
|
||||
insights = []
|
||||
|
||||
# Calculate unused models
|
||||
unused_loras = self._count_unused_models(lora_cache.raw_data, usage_data.get('loras', {}))
|
||||
unused_checkpoints = self._count_unused_models(checkpoint_cache.raw_data, usage_data.get('checkpoints', {}))
|
||||
unused_embeddings = self._count_unused_models(embedding_cache.raw_data, usage_data.get('embeddings', {}))
|
||||
|
||||
total_loras = len(lora_cache.raw_data)
|
||||
total_checkpoints = len(checkpoint_cache.raw_data)
|
||||
total_embeddings = len(embedding_cache.raw_data)
|
||||
|
||||
if total_loras > 0:
|
||||
unused_lora_percent = (unused_loras / total_loras) * 100
|
||||
if unused_lora_percent > 50:
|
||||
insights.append({
|
||||
'type': 'warning',
|
||||
'title': 'High Number of Unused LoRAs',
|
||||
'description': f'{unused_lora_percent:.1f}% of your LoRAs ({unused_loras}/{total_loras}) have never been used.',
|
||||
'suggestion': 'Consider organizing or archiving unused models to free up storage space.'
|
||||
})
|
||||
|
||||
if total_checkpoints > 0:
|
||||
unused_checkpoint_percent = (unused_checkpoints / total_checkpoints) * 100
|
||||
if unused_checkpoint_percent > 30:
|
||||
insights.append({
|
||||
'type': 'warning',
|
||||
'title': 'Unused Checkpoints Detected',
|
||||
'description': f'{unused_checkpoint_percent:.1f}% of your checkpoints ({unused_checkpoints}/{total_checkpoints}) have never been used.',
|
||||
'suggestion': 'Review and consider removing checkpoints you no longer need.'
|
||||
})
|
||||
|
||||
if total_embeddings > 0:
|
||||
unused_embedding_percent = (unused_embeddings / total_embeddings) * 100
|
||||
if unused_embedding_percent > 50:
|
||||
insights.append({
|
||||
'type': 'warning',
|
||||
'title': 'High Number of Unused Embeddings',
|
||||
'description': f'{unused_embedding_percent:.1f}% of your embeddings ({unused_embeddings}/{total_embeddings}) have never been used.',
|
||||
'suggestion': 'Consider organizing or archiving unused embeddings to optimize your collection.'
|
||||
})
|
||||
|
||||
# Storage insights
|
||||
total_size = sum(lora.get('size', 0) for lora in lora_cache.raw_data) + \
|
||||
sum(cp.get('size', 0) for cp in checkpoint_cache.raw_data) + \
|
||||
sum(emb.get('size', 0) for emb in embedding_cache.raw_data)
|
||||
|
||||
if total_size > 100 * 1024 * 1024 * 1024: # 100GB
|
||||
insights.append({
|
||||
'type': 'info',
|
||||
'title': 'Large Collection Detected',
|
||||
'description': f'Your model collection is using {self._format_size(total_size)} of storage.',
|
||||
'suggestion': 'Consider using external storage or cloud solutions for better organization.'
|
||||
})
|
||||
|
||||
# Recent activity insight
|
||||
if usage_data.get('total_executions', 0) > 100:
|
||||
insights.append({
|
||||
'type': 'success',
|
||||
'title': 'Active User',
|
||||
'description': f'You\'ve completed {usage_data["total_executions"]} generations so far!',
|
||||
'suggestion': 'Keep exploring and creating amazing content with your models.'
|
||||
})
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'data': {
|
||||
'insights': insights
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting insights: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
def _count_unused_models(self, models: List[Dict], usage_data: Dict) -> int:
|
||||
"""Count models that have never been used"""
|
||||
used_hashes = set(usage_data.keys())
|
||||
unused_count = 0
|
||||
|
||||
for model in models:
|
||||
if model.get('sha256') not in used_hashes:
|
||||
unused_count += 1
|
||||
|
||||
return unused_count
|
||||
|
||||
def _get_top_used_models(self, usage_data: Dict, model_map: Dict, limit: int) -> List[Dict]:
|
||||
"""Get top used models with their metadata"""
|
||||
sorted_usage = sorted(usage_data.items(), key=lambda x: x[1].get('total', 0), reverse=True)
|
||||
|
||||
top_models = []
|
||||
for sha256, usage_info in sorted_usage[:limit]:
|
||||
if sha256 in model_map:
|
||||
model = model_map[sha256]
|
||||
top_models.append({
|
||||
'name': model['model_name'],
|
||||
'usage_count': usage_info.get('total', 0),
|
||||
'base_model': model.get('base_model', 'Unknown'),
|
||||
'preview_url': config.get_preview_static_url(model.get('preview_url', '')),
|
||||
'folder': model.get('folder', '')
|
||||
})
|
||||
|
||||
return top_models
|
||||
|
||||
def _get_usage_timeline(self, usage_data: Dict, days: int) -> List[Dict]:
|
||||
"""Get usage timeline for the past N days"""
|
||||
timeline = []
|
||||
today = datetime.now()
|
||||
|
||||
for i in range(days):
|
||||
date = today - timedelta(days=i)
|
||||
date_str = date.strftime('%Y-%m-%d')
|
||||
|
||||
lora_usage = 0
|
||||
checkpoint_usage = 0
|
||||
embedding_usage = 0
|
||||
|
||||
# Count usage for this date
|
||||
for model_usage in usage_data.get('loras', {}).values():
|
||||
if isinstance(model_usage, dict) and 'history' in model_usage:
|
||||
lora_usage += model_usage['history'].get(date_str, 0)
|
||||
|
||||
for model_usage in usage_data.get('checkpoints', {}).values():
|
||||
if isinstance(model_usage, dict) and 'history' in model_usage:
|
||||
checkpoint_usage += model_usage['history'].get(date_str, 0)
|
||||
|
||||
for model_usage in usage_data.get('embeddings', {}).values():
|
||||
if isinstance(model_usage, dict) and 'history' in model_usage:
|
||||
embedding_usage += model_usage['history'].get(date_str, 0)
|
||||
|
||||
timeline.append({
|
||||
'date': date_str,
|
||||
'lora_usage': lora_usage,
|
||||
'checkpoint_usage': checkpoint_usage,
|
||||
'embedding_usage': embedding_usage,
|
||||
'total_usage': lora_usage + checkpoint_usage + embedding_usage
|
||||
})
|
||||
|
||||
return list(reversed(timeline)) # Oldest to newest
|
||||
|
||||
def _format_size(self, size_bytes: int) -> str:
|
||||
"""Format file size in human readable format"""
|
||||
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
||||
if size_bytes < 1024.0:
|
||||
return f"{size_bytes:.1f} {unit}"
|
||||
size_bytes /= 1024.0
|
||||
return f"{size_bytes:.1f} PB"
|
||||
|
||||
def setup_routes(self, app: web.Application):
|
||||
"""Register routes with the application"""
|
||||
# Add an app startup handler to initialize services
|
||||
app.on_startup.append(self._on_startup)
|
||||
|
||||
# Register page route
|
||||
app.router.add_get('/statistics', self.handle_stats_page)
|
||||
|
||||
# Register API routes
|
||||
app.router.add_get('/api/stats/collection-overview', self.get_collection_overview)
|
||||
app.router.add_get('/api/stats/usage-analytics', self.get_usage_analytics)
|
||||
app.router.add_get('/api/stats/base-model-distribution', self.get_base_model_distribution)
|
||||
app.router.add_get('/api/stats/tag-analytics', self.get_tag_analytics)
|
||||
app.router.add_get('/api/stats/storage-analytics', self.get_storage_analytics)
|
||||
app.router.add_get('/api/stats/insights', self.get_insights)
|
||||
|
||||
async def _on_startup(self, app):
|
||||
"""Initialize services when the app starts"""
|
||||
await self.init_services()
|
||||
@@ -2,8 +2,13 @@ import os
|
||||
import aiohttp
|
||||
import logging
|
||||
import toml
|
||||
import git
|
||||
import zipfile
|
||||
import shutil
|
||||
import tempfile
|
||||
from aiohttp import web
|
||||
from typing import Dict, Any, List
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -13,7 +18,9 @@ class UpdateRoutes:
|
||||
@staticmethod
|
||||
def setup_routes(app):
|
||||
"""Register update check routes"""
|
||||
app.router.add_get('/loras/api/check-updates', UpdateRoutes.check_updates)
|
||||
app.router.add_get('/api/check-updates', UpdateRoutes.check_updates)
|
||||
app.router.add_get('/api/version-info', UpdateRoutes.get_version_info)
|
||||
app.router.add_post('/api/perform-update', UpdateRoutes.perform_update)
|
||||
|
||||
@staticmethod
|
||||
async def check_updates(request):
|
||||
@@ -22,24 +29,39 @@ class UpdateRoutes:
|
||||
Returns update status and version information
|
||||
"""
|
||||
try:
|
||||
nightly = request.query.get('nightly', 'false').lower() == 'true'
|
||||
|
||||
# Read local version from pyproject.toml
|
||||
local_version = UpdateRoutes._get_local_version()
|
||||
|
||||
# Get git info (commit hash, branch)
|
||||
git_info = UpdateRoutes._get_git_info()
|
||||
|
||||
# Fetch remote version from GitHub
|
||||
remote_version, changelog = await UpdateRoutes._get_remote_version()
|
||||
if nightly:
|
||||
remote_version, changelog = await UpdateRoutes._get_nightly_version()
|
||||
else:
|
||||
remote_version, changelog = await UpdateRoutes._get_remote_version()
|
||||
|
||||
# Compare versions
|
||||
update_available = UpdateRoutes._compare_versions(
|
||||
local_version.replace('v', ''),
|
||||
remote_version.replace('v', '')
|
||||
)
|
||||
if nightly:
|
||||
# For nightly, compare commit hashes
|
||||
update_available = UpdateRoutes._compare_nightly_versions(git_info, remote_version)
|
||||
else:
|
||||
# For stable, compare semantic versions
|
||||
update_available = UpdateRoutes._compare_versions(
|
||||
local_version.replace('v', ''),
|
||||
remote_version.replace('v', '')
|
||||
)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'current_version': local_version,
|
||||
'latest_version': remote_version,
|
||||
'update_available': update_available,
|
||||
'changelog': changelog
|
||||
'changelog': changelog,
|
||||
'git_info': git_info,
|
||||
'nightly': nightly
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
@@ -48,6 +70,279 @@ class UpdateRoutes:
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
async def get_version_info(request):
|
||||
"""
|
||||
Returns the current version in the format 'version-short_hash'
|
||||
"""
|
||||
try:
|
||||
# Read local version from pyproject.toml
|
||||
local_version = UpdateRoutes._get_local_version().replace('v', '')
|
||||
|
||||
# Get git info (commit hash, branch)
|
||||
git_info = UpdateRoutes._get_git_info()
|
||||
short_hash = git_info['short_hash']
|
||||
|
||||
# Format: version-short_hash
|
||||
version_string = f"{local_version}-{short_hash}"
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'version': version_string
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get version info: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
async def perform_update(request):
|
||||
"""
|
||||
Perform Git-based update to latest release tag or main branch.
|
||||
If .git is missing, fallback to ZIP download.
|
||||
"""
|
||||
try:
|
||||
body = await request.json() if request.has_body else {}
|
||||
nightly = body.get('nightly', False)
|
||||
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
plugin_root = os.path.dirname(os.path.dirname(current_dir))
|
||||
|
||||
settings_path = os.path.join(plugin_root, 'settings.json')
|
||||
settings_backup = None
|
||||
if os.path.exists(settings_path):
|
||||
with open(settings_path, 'r', encoding='utf-8') as f:
|
||||
settings_backup = f.read()
|
||||
logger.info("Backed up settings.json")
|
||||
|
||||
git_folder = os.path.join(plugin_root, '.git')
|
||||
if os.path.exists(git_folder):
|
||||
# Git update
|
||||
success, new_version = await UpdateRoutes._perform_git_update(plugin_root, nightly)
|
||||
else:
|
||||
# Fallback: Download ZIP and replace files
|
||||
success, new_version = await UpdateRoutes._download_and_replace_zip(plugin_root)
|
||||
|
||||
if settings_backup and success:
|
||||
with open(settings_path, 'w', encoding='utf-8') as f:
|
||||
f.write(settings_backup)
|
||||
logger.info("Restored settings.json")
|
||||
|
||||
if success:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'Successfully updated to {new_version}',
|
||||
'new_version': new_version
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Failed to complete update'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to perform update: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
async def _download_and_replace_zip(plugin_root: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Download latest release ZIP from GitHub and replace plugin files.
|
||||
Skips settings.json. Writes extracted file list to .tracking.
|
||||
"""
|
||||
repo_owner = "willmiao"
|
||||
repo_name = "ComfyUI-Lora-Manager"
|
||||
github_api = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_api) as resp:
|
||||
if resp.status != 200:
|
||||
logger.error(f"Failed to fetch release info: {resp.status}")
|
||||
return False, ""
|
||||
data = await resp.json()
|
||||
zip_url = data.get("zipball_url")
|
||||
version = data.get("tag_name", "unknown")
|
||||
|
||||
# Download ZIP
|
||||
async with session.get(zip_url) as zip_resp:
|
||||
if zip_resp.status != 200:
|
||||
logger.error(f"Failed to download ZIP: {zip_resp.status}")
|
||||
return False, ""
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_zip:
|
||||
tmp_zip.write(await zip_resp.read())
|
||||
zip_path = tmp_zip.name
|
||||
|
||||
UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json'])
|
||||
|
||||
# Extract ZIP to temp dir
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(tmp_dir)
|
||||
# Find extracted folder (GitHub ZIP contains a root folder)
|
||||
extracted_root = next(os.scandir(tmp_dir)).path
|
||||
|
||||
# Copy files, skipping settings.json
|
||||
for item in os.listdir(extracted_root):
|
||||
src = os.path.join(extracted_root, item)
|
||||
dst = os.path.join(plugin_root, item)
|
||||
if os.path.isdir(src):
|
||||
if os.path.exists(dst):
|
||||
shutil.rmtree(dst)
|
||||
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json'))
|
||||
else:
|
||||
if item == 'settings.json':
|
||||
continue
|
||||
shutil.copy2(src, dst)
|
||||
|
||||
# Write .tracking file: list all files under extracted_root, relative to extracted_root
|
||||
# for ComfyUI Manager to work properly
|
||||
tracking_info_file = os.path.join(plugin_root, '.tracking')
|
||||
tracking_files = []
|
||||
for root, dirs, files in os.walk(extracted_root):
|
||||
for file in files:
|
||||
rel_path = os.path.relpath(os.path.join(root, file), extracted_root)
|
||||
tracking_files.append(rel_path.replace("\\", "/"))
|
||||
with open(tracking_info_file, "w", encoding='utf-8') as file:
|
||||
file.write('\n'.join(tracking_files))
|
||||
|
||||
os.remove(zip_path)
|
||||
logger.info(f"Updated plugin via ZIP to {version}")
|
||||
return True, version
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"ZIP update failed: {e}", exc_info=True)
|
||||
return False, ""
|
||||
|
||||
def _clean_plugin_folder(plugin_root, skip_files=None):
|
||||
skip_files = skip_files or []
|
||||
for item in os.listdir(plugin_root):
|
||||
if item in skip_files:
|
||||
continue
|
||||
path = os.path.join(plugin_root, item)
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
os.remove(path)
|
||||
|
||||
@staticmethod
|
||||
async def _get_nightly_version() -> tuple[str, List[str]]:
|
||||
"""
|
||||
Fetch latest commit from main branch
|
||||
"""
|
||||
repo_owner = "willmiao"
|
||||
repo_name = "ComfyUI-Lora-Manager"
|
||||
|
||||
# Use GitHub API to fetch the latest commit from main branch
|
||||
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/commits/main"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_url, headers={'Accept': 'application/vnd.github+json'}) as response:
|
||||
if response.status != 200:
|
||||
logger.warning(f"Failed to fetch GitHub commit: {response.status}")
|
||||
return "main", []
|
||||
|
||||
data = await response.json()
|
||||
commit_sha = data.get('sha', '')[:7] # Short hash
|
||||
commit_message = data.get('commit', {}).get('message', '')
|
||||
|
||||
# Format as "main-{short_hash}"
|
||||
version = f"main-{commit_sha}"
|
||||
|
||||
# Use commit message as changelog
|
||||
changelog = [commit_message] if commit_message else []
|
||||
|
||||
return version, changelog
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching nightly version: {e}", exc_info=True)
|
||||
return "main", []
|
||||
|
||||
@staticmethod
|
||||
def _compare_nightly_versions(local_git_info: Dict[str, str], remote_version: str) -> bool:
|
||||
"""
|
||||
Compare local commit hash with remote main branch
|
||||
"""
|
||||
try:
|
||||
local_hash = local_git_info.get('short_hash', 'unknown')
|
||||
if local_hash == 'unknown':
|
||||
return True # Assume update available if we can't get local hash
|
||||
|
||||
# Extract remote hash from version string (format: "main-{hash}")
|
||||
if '-' in remote_version:
|
||||
remote_hash = remote_version.split('-')[-1]
|
||||
return local_hash != remote_hash
|
||||
|
||||
return True # Default to update available
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error comparing nightly versions: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
async def _perform_git_update(plugin_root: str, nightly: bool = False) -> tuple[bool, str]:
|
||||
"""
|
||||
Perform Git-based update using GitPython
|
||||
|
||||
Args:
|
||||
plugin_root: Path to the plugin root directory
|
||||
nightly: Whether to update to main branch or latest release
|
||||
|
||||
Returns:
|
||||
tuple: (success, new_version)
|
||||
"""
|
||||
try:
|
||||
# Open the Git repository
|
||||
repo = git.Repo(plugin_root)
|
||||
|
||||
# Fetch latest changes
|
||||
origin = repo.remotes.origin
|
||||
origin.fetch()
|
||||
|
||||
if nightly:
|
||||
# Switch to main branch and pull latest
|
||||
main_branch = 'main'
|
||||
if main_branch not in [branch.name for branch in repo.branches]:
|
||||
# Create local main branch if it doesn't exist
|
||||
repo.create_head(main_branch, origin.refs.main)
|
||||
|
||||
repo.heads[main_branch].checkout()
|
||||
origin.pull(main_branch)
|
||||
|
||||
# Get new commit hash
|
||||
new_version = f"main-{repo.head.commit.hexsha[:7]}"
|
||||
|
||||
else:
|
||||
# Get latest release tag
|
||||
tags = sorted(repo.tags, key=lambda t: t.commit.committed_datetime, reverse=True)
|
||||
if not tags:
|
||||
logger.error("No tags found in repository")
|
||||
return False, ""
|
||||
|
||||
latest_tag = tags[0]
|
||||
|
||||
# Checkout to latest tag
|
||||
repo.git.checkout(latest_tag.name)
|
||||
|
||||
new_version = latest_tag.name
|
||||
|
||||
logger.info(f"Successfully updated to {new_version}")
|
||||
return True, new_version
|
||||
|
||||
except git.exc.GitError as e:
|
||||
logger.error(f"Git error during update: {e}")
|
||||
return False, ""
|
||||
except Exception as e:
|
||||
logger.error(f"Error during Git update: {e}")
|
||||
return False, ""
|
||||
|
||||
@staticmethod
|
||||
def _get_local_version() -> str:
|
||||
@@ -72,6 +367,35 @@ class UpdateRoutes:
|
||||
logger.error(f"Failed to get local version: {e}", exc_info=True)
|
||||
return "v0.0.0"
|
||||
|
||||
@staticmethod
|
||||
def _get_git_info() -> Dict[str, str]:
|
||||
"""Get Git repository information"""
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
plugin_root = os.path.dirname(os.path.dirname(current_dir))
|
||||
|
||||
git_info = {
|
||||
'commit_hash': 'unknown',
|
||||
'short_hash': 'stable',
|
||||
'branch': 'unknown',
|
||||
'commit_date': 'unknown'
|
||||
}
|
||||
|
||||
try:
|
||||
# Check if we're in a git repository
|
||||
if not os.path.exists(os.path.join(plugin_root, '.git')):
|
||||
return git_info
|
||||
|
||||
repo = git.Repo(plugin_root)
|
||||
commit = repo.head.commit
|
||||
git_info['commit_hash'] = commit.hexsha
|
||||
git_info['short_hash'] = commit.hexsha[:7]
|
||||
git_info['branch'] = repo.active_branch.name if not repo.head.is_detached else 'detached'
|
||||
git_info['commit_date'] = commit.committed_datetime.strftime('%Y-%m-%d')
|
||||
except Exception as e:
|
||||
logger.warning(f"Error getting git info: {e}")
|
||||
|
||||
return git_info
|
||||
|
||||
@staticmethod
|
||||
async def _get_remote_version() -> tuple[str, List[str]]:
|
||||
"""
|
||||
@@ -150,11 +474,16 @@ class UpdateRoutes:
|
||||
"""
|
||||
Compare two semantic version strings
|
||||
Returns True if version2 is newer than version1
|
||||
Ignores any suffixes after '-' (e.g., -bugfix, -alpha)
|
||||
"""
|
||||
try:
|
||||
# Clean version strings - remove any suffix after '-'
|
||||
v1_clean = version1.split('-')[0]
|
||||
v2_clean = version2.split('-')[0]
|
||||
|
||||
# Split versions into components
|
||||
v1_parts = [int(x) for x in version1.split('.')]
|
||||
v2_parts = [int(x) for x in version2.split('.')]
|
||||
v1_parts = [int(x) for x in v1_clean.split('.')]
|
||||
v2_parts = [int(x) for x in v2_clean.split('.')]
|
||||
|
||||
# Ensure both have 3 components (major.minor.patch)
|
||||
while len(v1_parts) < 3:
|
||||
|
||||
275
py/services/base_model_service.py
Normal file
275
py/services/base_model_service.py
Normal file
@@ -0,0 +1,275 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Optional, Type
|
||||
import logging
|
||||
|
||||
from ..utils.models import BaseModelMetadata
|
||||
from ..utils.constants import NSFW_LEVELS
|
||||
from .settings_manager import settings
|
||||
from ..utils.utils import fuzzy_match
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class BaseModelService(ABC):
|
||||
"""Base service class for all model types"""
|
||||
|
||||
def __init__(self, model_type: str, scanner, metadata_class: Type[BaseModelMetadata]):
|
||||
"""Initialize the service
|
||||
|
||||
Args:
|
||||
model_type: Type of model (lora, checkpoint, etc.)
|
||||
scanner: Model scanner instance
|
||||
metadata_class: Metadata class for this model type
|
||||
"""
|
||||
self.model_type = model_type
|
||||
self.scanner = scanner
|
||||
self.metadata_class = metadata_class
|
||||
|
||||
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'name',
|
||||
folder: str = None, search: str = None, fuzzy_search: bool = False,
|
||||
base_models: list = None, tags: list = None,
|
||||
search_options: dict = None, hash_filters: dict = None,
|
||||
favorites_only: bool = False, **kwargs) -> Dict:
|
||||
"""Get paginated and filtered model data
|
||||
|
||||
Args:
|
||||
page: Page number (1-based)
|
||||
page_size: Number of items per page
|
||||
sort_by: Sort criteria, e.g. 'name', 'name:asc', 'name:desc', 'date', 'date:asc', 'date:desc'
|
||||
folder: Folder filter
|
||||
search: Search term
|
||||
fuzzy_search: Whether to use fuzzy search
|
||||
base_models: List of base models to filter by
|
||||
tags: List of tags to filter by
|
||||
search_options: Search options dict
|
||||
hash_filters: Hash filtering options
|
||||
favorites_only: Filter for favorites only
|
||||
**kwargs: Additional model-specific filters
|
||||
|
||||
Returns:
|
||||
Dict containing paginated results
|
||||
"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
# Parse sort_by into sort_key and order
|
||||
if ':' in sort_by:
|
||||
sort_key, order = sort_by.split(':', 1)
|
||||
sort_key = sort_key.strip()
|
||||
order = order.strip().lower()
|
||||
if order not in ('asc', 'desc'):
|
||||
order = 'asc'
|
||||
else:
|
||||
sort_key = sort_by.strip()
|
||||
order = 'asc'
|
||||
|
||||
# Get default search options if not provided
|
||||
if search_options is None:
|
||||
search_options = {
|
||||
'filename': True,
|
||||
'modelname': True,
|
||||
'tags': False,
|
||||
'recursive': False,
|
||||
}
|
||||
|
||||
# Get the base data set using new sort logic
|
||||
filtered_data = await cache.get_sorted_data(sort_key, order)
|
||||
|
||||
# Apply hash filtering if provided (highest priority)
|
||||
if hash_filters:
|
||||
filtered_data = await self._apply_hash_filters(filtered_data, hash_filters)
|
||||
|
||||
# Jump to pagination for hash filters
|
||||
return self._paginate(filtered_data, page, page_size)
|
||||
|
||||
# Apply common filters
|
||||
filtered_data = await self._apply_common_filters(
|
||||
filtered_data, folder, base_models, tags, favorites_only, search_options
|
||||
)
|
||||
|
||||
# Apply search filtering
|
||||
if search:
|
||||
filtered_data = await self._apply_search_filters(
|
||||
filtered_data, search, fuzzy_search, search_options
|
||||
)
|
||||
|
||||
# Apply model-specific filters
|
||||
filtered_data = await self._apply_specific_filters(filtered_data, **kwargs)
|
||||
|
||||
return self._paginate(filtered_data, page, page_size)
|
||||
|
||||
async def _apply_hash_filters(self, data: List[Dict], hash_filters: Dict) -> List[Dict]:
|
||||
"""Apply hash-based filtering"""
|
||||
single_hash = hash_filters.get('single_hash')
|
||||
multiple_hashes = hash_filters.get('multiple_hashes')
|
||||
|
||||
if single_hash:
|
||||
# Filter by single hash
|
||||
single_hash = single_hash.lower()
|
||||
return [
|
||||
item for item in data
|
||||
if item.get('sha256', '').lower() == single_hash
|
||||
]
|
||||
elif multiple_hashes:
|
||||
# Filter by multiple hashes
|
||||
hash_set = set(hash.lower() for hash in multiple_hashes)
|
||||
return [
|
||||
item for item in data
|
||||
if item.get('sha256', '').lower() in hash_set
|
||||
]
|
||||
|
||||
return data
|
||||
|
||||
async def _apply_common_filters(self, data: List[Dict], folder: str = None,
|
||||
base_models: list = None, tags: list = None,
|
||||
favorites_only: bool = False, search_options: dict = None) -> List[Dict]:
|
||||
"""Apply common filters that work across all model types"""
|
||||
# Apply SFW filtering if enabled in settings
|
||||
if settings.get('show_only_sfw', False):
|
||||
data = [
|
||||
item for item in data
|
||||
if not item.get('preview_nsfw_level') or item.get('preview_nsfw_level') < NSFW_LEVELS['R']
|
||||
]
|
||||
|
||||
# Apply favorites filtering if enabled
|
||||
if favorites_only:
|
||||
data = [
|
||||
item for item in data
|
||||
if item.get('favorite', False) is True
|
||||
]
|
||||
|
||||
# Apply folder filtering
|
||||
if folder is not None:
|
||||
if search_options and search_options.get('recursive', False):
|
||||
# Recursive folder filtering - include all subfolders
|
||||
data = [
|
||||
item for item in data
|
||||
if item['folder'].startswith(folder)
|
||||
]
|
||||
else:
|
||||
# Exact folder filtering
|
||||
data = [
|
||||
item for item in data
|
||||
if item['folder'] == folder
|
||||
]
|
||||
|
||||
# Apply base model filtering
|
||||
if base_models and len(base_models) > 0:
|
||||
data = [
|
||||
item for item in data
|
||||
if item.get('base_model') in base_models
|
||||
]
|
||||
|
||||
# Apply tag filtering
|
||||
if tags and len(tags) > 0:
|
||||
data = [
|
||||
item for item in data
|
||||
if any(tag in item.get('tags', []) for tag in tags)
|
||||
]
|
||||
|
||||
return data
|
||||
|
||||
async def _apply_search_filters(self, data: List[Dict], search: str,
|
||||
fuzzy_search: bool, search_options: dict) -> List[Dict]:
|
||||
"""Apply search filtering"""
|
||||
search_results = []
|
||||
|
||||
for item in data:
|
||||
# Search by file name
|
||||
if search_options.get('filename', True):
|
||||
if fuzzy_search:
|
||||
if fuzzy_match(item.get('file_name', ''), search):
|
||||
search_results.append(item)
|
||||
continue
|
||||
elif search.lower() in item.get('file_name', '').lower():
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
# Search by model name
|
||||
if search_options.get('modelname', True):
|
||||
if fuzzy_search:
|
||||
if fuzzy_match(item.get('model_name', ''), search):
|
||||
search_results.append(item)
|
||||
continue
|
||||
elif search.lower() in item.get('model_name', '').lower():
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
# Search by tags
|
||||
if search_options.get('tags', False) and 'tags' in item:
|
||||
if any((fuzzy_match(tag, search) if fuzzy_search else search.lower() in tag.lower())
|
||||
for tag in item['tags']):
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
# Search by creator
|
||||
civitai = item.get('civitai')
|
||||
creator_username = ''
|
||||
if civitai and isinstance(civitai, dict):
|
||||
creator = civitai.get('creator')
|
||||
if creator and isinstance(creator, dict):
|
||||
creator_username = creator.get('username', '')
|
||||
if search_options.get('creator', False) and creator_username:
|
||||
if fuzzy_search:
|
||||
if fuzzy_match(creator_username, search):
|
||||
search_results.append(item)
|
||||
continue
|
||||
elif search.lower() in creator_username.lower():
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
return search_results
|
||||
|
||||
async def _apply_specific_filters(self, data: List[Dict], **kwargs) -> List[Dict]:
|
||||
"""Apply model-specific filters - to be overridden by subclasses if needed"""
|
||||
return data
|
||||
|
||||
def _paginate(self, data: List[Dict], page: int, page_size: int) -> Dict:
|
||||
"""Apply pagination to filtered data"""
|
||||
total_items = len(data)
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = min(start_idx + page_size, total_items)
|
||||
|
||||
return {
|
||||
'items': data[start_idx:end_idx],
|
||||
'total': total_items,
|
||||
'page': page,
|
||||
'page_size': page_size,
|
||||
'total_pages': (total_items + page_size - 1) // page_size
|
||||
}
|
||||
|
||||
@abstractmethod
|
||||
async def format_response(self, model_data: Dict) -> Dict:
|
||||
"""Format model data for API response - must be implemented by subclasses"""
|
||||
pass
|
||||
|
||||
# Common service methods that delegate to scanner
|
||||
async def get_top_tags(self, limit: int = 20) -> List[Dict]:
|
||||
"""Get top tags sorted by frequency"""
|
||||
return await self.scanner.get_top_tags(limit)
|
||||
|
||||
async def get_base_models(self, limit: int = 20) -> List[Dict]:
|
||||
"""Get base models sorted by frequency"""
|
||||
return await self.scanner.get_base_models(limit)
|
||||
|
||||
def has_hash(self, sha256: str) -> bool:
|
||||
"""Check if a model with given hash exists"""
|
||||
return self.scanner.has_hash(sha256)
|
||||
|
||||
def get_path_by_hash(self, sha256: str) -> Optional[str]:
|
||||
"""Get file path for a model by its hash"""
|
||||
return self.scanner.get_path_by_hash(sha256)
|
||||
|
||||
def get_hash_by_path(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a model by its file path"""
|
||||
return self.scanner.get_hash_by_path(file_path)
|
||||
|
||||
async def scan_models(self, force_refresh: bool = False, rebuild_cache: bool = False):
|
||||
"""Trigger model scanning"""
|
||||
return await self.scanner.get_cached_data(force_refresh=force_refresh, rebuild_cache=rebuild_cache)
|
||||
|
||||
async def get_model_info_by_name(self, name: str):
|
||||
"""Get model information by name"""
|
||||
return await self.scanner.get_model_info_by_name(name)
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get model root directories"""
|
||||
return self.scanner.get_model_roots()
|
||||
34
py/services/checkpoint_scanner.py
Normal file
34
py/services/checkpoint_scanner.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from ..utils.models import CheckpointMetadata
|
||||
from ..config import config
|
||||
from .model_scanner import ModelScanner
|
||||
from .model_hash_index import ModelHashIndex
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CheckpointScanner(ModelScanner):
|
||||
"""Service for scanning and managing checkpoint files"""
|
||||
|
||||
def __init__(self):
|
||||
# Define supported file extensions
|
||||
file_extensions = {'.safetensors', '.ckpt', '.pt', '.pth', '.sft', '.gguf'}
|
||||
super().__init__(
|
||||
model_type="checkpoint",
|
||||
model_class=CheckpointMetadata,
|
||||
file_extensions=file_extensions,
|
||||
hash_index=ModelHashIndex()
|
||||
)
|
||||
|
||||
def adjust_metadata(self, metadata, file_path, root_path):
|
||||
if hasattr(metadata, "model_type"):
|
||||
if root_path in config.checkpoints_roots:
|
||||
metadata.model_type = "checkpoint"
|
||||
elif root_path in config.unet_roots:
|
||||
metadata.model_type = "diffusion_model"
|
||||
return metadata
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get checkpoint root directories"""
|
||||
return config.base_models_roots
|
||||
51
py/services/checkpoint_service.py
Normal file
51
py/services/checkpoint_service.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import os
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from .base_model_service import BaseModelService
|
||||
from ..utils.models import CheckpointMetadata
|
||||
from ..config import config
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CheckpointService(BaseModelService):
|
||||
"""Checkpoint-specific service implementation"""
|
||||
|
||||
def __init__(self, scanner):
|
||||
"""Initialize Checkpoint service
|
||||
|
||||
Args:
|
||||
scanner: Checkpoint scanner instance
|
||||
"""
|
||||
super().__init__("checkpoint", scanner, CheckpointMetadata)
|
||||
|
||||
async def format_response(self, checkpoint_data: Dict) -> Dict:
|
||||
"""Format Checkpoint data for API response"""
|
||||
return {
|
||||
"model_name": checkpoint_data["model_name"],
|
||||
"file_name": checkpoint_data["file_name"],
|
||||
"preview_url": config.get_preview_static_url(checkpoint_data.get("preview_url", "")),
|
||||
"preview_nsfw_level": checkpoint_data.get("preview_nsfw_level", 0),
|
||||
"base_model": checkpoint_data.get("base_model", ""),
|
||||
"folder": checkpoint_data["folder"],
|
||||
"sha256": checkpoint_data.get("sha256", ""),
|
||||
"file_path": checkpoint_data["file_path"].replace(os.sep, "/"),
|
||||
"file_size": checkpoint_data.get("size", 0),
|
||||
"modified": checkpoint_data.get("modified", ""),
|
||||
"tags": checkpoint_data.get("tags", []),
|
||||
"modelDescription": checkpoint_data.get("modelDescription", ""),
|
||||
"from_civitai": checkpoint_data.get("from_civitai", True),
|
||||
"notes": checkpoint_data.get("notes", ""),
|
||||
"model_type": checkpoint_data.get("model_type", "checkpoint"),
|
||||
"favorite": checkpoint_data.get("favorite", False),
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(checkpoint_data.get("civitai", {}))
|
||||
}
|
||||
|
||||
def find_duplicate_hashes(self) -> Dict:
|
||||
"""Find Checkpoints with duplicate SHA256 hashes"""
|
||||
return self.scanner._hash_index.get_duplicate_hashes()
|
||||
|
||||
def find_duplicate_filenames(self) -> Dict:
|
||||
"""Find Checkpoints with conflicting filenames"""
|
||||
return self.scanner._hash_index.get_duplicate_filenames()
|
||||
@@ -1,31 +1,73 @@
|
||||
from datetime import datetime
|
||||
import aiohttp
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
from email.parser import Parser
|
||||
from typing import Optional, Dict, Tuple, List
|
||||
from urllib.parse import unquote
|
||||
from ..utils.models import LoraMetadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CivitaiClient:
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of CivitaiClient"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
# Check if already initialized for singleton pattern
|
||||
if hasattr(self, '_initialized'):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
self.base_url = "https://civitai.com/api/v1"
|
||||
self.headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
|
||||
}
|
||||
self._session = None
|
||||
self._session_created_at = None
|
||||
# Set default buffer size to 1MB for higher throughput
|
||||
self.chunk_size = 1024 * 1024
|
||||
|
||||
@property
|
||||
async def session(self) -> aiohttp.ClientSession:
|
||||
"""Lazy initialize the session"""
|
||||
if self._session is None:
|
||||
connector = aiohttp.TCPConnector(ssl=True)
|
||||
trust_env = True # 允许使用系统环境变量中的代理设置
|
||||
self._session = aiohttp.ClientSession(connector=connector, trust_env=trust_env)
|
||||
# Optimize TCP connection parameters
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=8, # Increase from 3 to 8 for better parallelism
|
||||
ttl_dns_cache=300, # Enable DNS caching with reasonable timeout
|
||||
force_close=False, # Keep connections for reuse
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
trust_env = True # Allow using system environment proxy settings
|
||||
# Configure timeout parameters - increase read timeout for large files
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=120)
|
||||
self._session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=trust_env,
|
||||
timeout=timeout
|
||||
)
|
||||
self._session_created_at = datetime.now()
|
||||
return self._session
|
||||
|
||||
async def _ensure_fresh_session(self):
|
||||
"""Refresh session if it's been open too long"""
|
||||
if self._session is not None:
|
||||
if not hasattr(self, '_session_created_at') or \
|
||||
(datetime.now() - self._session_created_at).total_seconds() > 300: # 5 minutes
|
||||
await self.close()
|
||||
self._session = None
|
||||
|
||||
return await self.session
|
||||
|
||||
def _parse_content_disposition(self, header: str) -> str:
|
||||
"""Parse filename from content-disposition header"""
|
||||
@@ -71,9 +113,15 @@ class CivitaiClient:
|
||||
Returns:
|
||||
Tuple[bool, str]: (success, save_path or error message)
|
||||
"""
|
||||
session = await self.session
|
||||
logger.debug(f"Resolving DNS for: {url}")
|
||||
session = await self._ensure_fresh_session()
|
||||
try:
|
||||
headers = self._get_request_headers()
|
||||
|
||||
# Add Range header to allow resumable downloads
|
||||
headers['Accept-Encoding'] = 'identity' # Disable compression for better chunked downloads
|
||||
|
||||
logger.debug(f"Starting download from: {url}")
|
||||
async with session.get(url, headers=headers, allow_redirects=True) as response:
|
||||
if response.status != 200:
|
||||
# Handle 401 unauthorized responses
|
||||
@@ -88,6 +136,7 @@ class CivitaiClient:
|
||||
return False, "Access forbidden: You don't have permission to download this file."
|
||||
|
||||
# Generic error response for other status codes
|
||||
logger.error(f"Download failed for {url} with status {response.status}")
|
||||
return False, f"Download failed with status {response.status}"
|
||||
|
||||
# Get filename from content-disposition header
|
||||
@@ -101,16 +150,23 @@ class CivitaiClient:
|
||||
# Get total file size for progress calculation
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
current_size = 0
|
||||
last_progress_report_time = datetime.now()
|
||||
|
||||
# Stream download to file with progress updates
|
||||
# Stream download to file with progress updates using larger buffer
|
||||
with open(save_path, 'wb') as f:
|
||||
async for chunk in response.content.iter_chunked(8192):
|
||||
async for chunk in response.content.iter_chunked(self.chunk_size):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
current_size += len(chunk)
|
||||
if progress_callback and total_size:
|
||||
|
||||
# Limit progress update frequency to reduce overhead
|
||||
now = datetime.now()
|
||||
time_diff = (now - last_progress_report_time).total_seconds()
|
||||
|
||||
if progress_callback and total_size and time_diff >= 1.0:
|
||||
progress = (current_size / total_size) * 100
|
||||
await progress_callback(progress)
|
||||
last_progress_report_time = now
|
||||
|
||||
# Ensure 100% progress is reported
|
||||
if progress_callback:
|
||||
@@ -118,13 +174,16 @@ class CivitaiClient:
|
||||
|
||||
return True, save_path
|
||||
|
||||
except aiohttp.ClientError as e:
|
||||
logger.error(f"Network error during download: {e}")
|
||||
return False, f"Network error: {str(e)}"
|
||||
except Exception as e:
|
||||
logger.error(f"Download error: {e}")
|
||||
return False, str(e)
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
try:
|
||||
session = await self.session
|
||||
session = await self._ensure_fresh_session()
|
||||
async with session.get(f"{self.base_url}/model-versions/by-hash/{model_hash}") as response:
|
||||
if response.status == 200:
|
||||
return await response.json()
|
||||
@@ -135,7 +194,7 @@ class CivitaiClient:
|
||||
|
||||
async def download_preview_image(self, image_url: str, save_path: str):
|
||||
try:
|
||||
session = await self.session
|
||||
session = await self._ensure_fresh_session()
|
||||
async with session.get(image_url) as response:
|
||||
if response.status == 200:
|
||||
content = await response.read()
|
||||
@@ -150,33 +209,111 @@ class CivitaiClient:
|
||||
async def get_model_versions(self, model_id: str) -> List[Dict]:
|
||||
"""Get all versions of a model with local availability info"""
|
||||
try:
|
||||
session = await self.session # 等待获取 session
|
||||
session = await self._ensure_fresh_session() # Use fresh session
|
||||
async with session.get(f"{self.base_url}/models/{model_id}") as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
data = await response.json()
|
||||
return data.get('modelVersions', [])
|
||||
# Also return model type along with versions
|
||||
return {
|
||||
'modelVersions': data.get('modelVersions', []),
|
||||
'type': data.get('type', '')
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model versions: {e}")
|
||||
return None
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Optional[Dict]:
|
||||
"""Fetch model version metadata from Civitai"""
|
||||
|
||||
async def get_model_version(self, model_id: int, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version with additional metadata
|
||||
|
||||
Args:
|
||||
model_id: The Civitai model ID
|
||||
version_id: Optional specific version ID to retrieve
|
||||
|
||||
Returns:
|
||||
Optional[Dict]: The model version data with additional fields or None if not found
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
session = await self._ensure_fresh_session()
|
||||
|
||||
# Step 1: Get model data to find version_id if not provided and get additional metadata
|
||||
async with session.get(f"{self.base_url}/models/{model_id}") as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
|
||||
data = await response.json()
|
||||
model_versions = data.get('modelVersions', [])
|
||||
|
||||
# Step 2: Determine the version_id to use
|
||||
target_version_id = version_id
|
||||
if target_version_id is None:
|
||||
target_version_id = model_versions[0].get('id')
|
||||
|
||||
# Step 3: Get detailed version info using the version_id
|
||||
headers = self._get_request_headers()
|
||||
async with session.get(f"{self.base_url}/model-versions/{target_version_id}", headers=headers) as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
|
||||
version = await response.json()
|
||||
|
||||
# Step 4: Enrich version_info with model data
|
||||
# Add description and tags from model data
|
||||
version['model']['description'] = data.get("description")
|
||||
version['model']['tags'] = data.get("tags", [])
|
||||
|
||||
# Add creator from model data
|
||||
version['creator'] = data.get("creator")
|
||||
|
||||
return version
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model version: {e}")
|
||||
return None
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version metadata from Civitai
|
||||
|
||||
Args:
|
||||
version_id: The Civitai model version ID
|
||||
|
||||
Returns:
|
||||
Tuple[Optional[Dict], Optional[str]]: A tuple containing:
|
||||
- The model version data or None if not found
|
||||
- An error message if there was an error, or None on success
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
url = f"{self.base_url}/model-versions/{version_id}"
|
||||
headers = self._get_request_headers()
|
||||
|
||||
logger.debug(f"Resolving DNS for model version info: {url}")
|
||||
async with session.get(url, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
return await response.json()
|
||||
return None
|
||||
logger.debug(f"Successfully fetched model version info for: {version_id}")
|
||||
return await response.json(), None
|
||||
|
||||
# Handle specific error cases
|
||||
if response.status == 404:
|
||||
# Try to parse the error message
|
||||
try:
|
||||
error_data = await response.json()
|
||||
error_msg = error_data.get('error', f"Model not found (status 404)")
|
||||
logger.warning(f"Model version not found: {version_id} - {error_msg}")
|
||||
return None, error_msg
|
||||
except:
|
||||
return None, "Model not found (status 404)"
|
||||
|
||||
# Other error cases
|
||||
logger.error(f"Failed to fetch model info for {version_id} (status {response.status})")
|
||||
return None, f"Failed to fetch model info (status {response.status})"
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model version info: {e}")
|
||||
return None
|
||||
error_msg = f"Error fetching model version info: {e}"
|
||||
logger.error(error_msg)
|
||||
return None, error_msg
|
||||
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
"""Fetch model metadata (description and tags) from Civitai API
|
||||
"""Fetch model metadata (description, tags, and creator info) from Civitai API
|
||||
|
||||
Args:
|
||||
model_id: The Civitai model ID
|
||||
@@ -187,7 +324,7 @@ class CivitaiClient:
|
||||
- The HTTP status code from the request
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
session = await self._ensure_fresh_session()
|
||||
headers = self._get_request_headers()
|
||||
url = f"{self.base_url}/models/{model_id}"
|
||||
|
||||
@@ -203,10 +340,14 @@ class CivitaiClient:
|
||||
# Extract relevant metadata
|
||||
metadata = {
|
||||
"description": data.get("description") or "No model description available",
|
||||
"tags": data.get("tags", [])
|
||||
"tags": data.get("tags", []),
|
||||
"creator": {
|
||||
"username": data.get("creator", {}).get("username"),
|
||||
"image": data.get("creator", {}).get("image")
|
||||
}
|
||||
}
|
||||
|
||||
if metadata["description"] or metadata["tags"]:
|
||||
if metadata["description"] or metadata["tags"] or metadata["creator"]["username"]:
|
||||
return metadata, status_code
|
||||
else:
|
||||
logger.warning(f"No metadata found for model {model_id}")
|
||||
@@ -231,10 +372,11 @@ class CivitaiClient:
|
||||
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get hash from Civitai API"""
|
||||
try:
|
||||
if not self._session:
|
||||
session = await self._ensure_fresh_session()
|
||||
if not session:
|
||||
return None
|
||||
|
||||
version_info = await self._session.get(f"{self.base_url}/model-versions/{model_version_id}")
|
||||
version_info = await session.get(f"{self.base_url}/model-versions/{model_version_id}")
|
||||
|
||||
if not version_info or not version_info.json().get('files'):
|
||||
return None
|
||||
@@ -250,3 +392,34 @@ class CivitaiClient:
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting hash from Civitai: {e}")
|
||||
return None
|
||||
|
||||
async def get_image_info(self, image_id: str) -> Optional[Dict]:
|
||||
"""Fetch image information from Civitai API
|
||||
|
||||
Args:
|
||||
image_id: The Civitai image ID
|
||||
|
||||
Returns:
|
||||
Optional[Dict]: The image data or None if not found
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
headers = self._get_request_headers()
|
||||
url = f"{self.base_url}/images?imageId={image_id}&nsfw=X"
|
||||
|
||||
logger.debug(f"Fetching image info for ID: {image_id}")
|
||||
async with session.get(url, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
if data and "items" in data and len(data["items"]) > 0:
|
||||
logger.debug(f"Successfully fetched image info for ID: {image_id}")
|
||||
return data["items"][0]
|
||||
logger.warning(f"No image found with ID: {image_id}")
|
||||
return None
|
||||
|
||||
logger.error(f"Failed to fetch image info for ID: {image_id} (status {response.status})")
|
||||
return None
|
||||
except Exception as e:
|
||||
error_msg = f"Error fetching image info: {e}"
|
||||
logger.error(error_msg)
|
||||
return None
|
||||
|
||||
@@ -1,47 +1,264 @@
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
from typing import Optional, Dict
|
||||
from .civitai_client import CivitaiClient
|
||||
from .file_monitor import LoraFileMonitor
|
||||
from ..utils.models import LoraMetadata
|
||||
import asyncio
|
||||
from collections import OrderedDict
|
||||
import uuid
|
||||
from typing import Dict
|
||||
from ..utils.models import LoraMetadata, CheckpointMetadata, EmbeddingMetadata
|
||||
from ..utils.constants import CARD_PREVIEW_WIDTH, VALID_LORA_TYPES, CIVITAI_MODEL_TAGS
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from .service_registry import ServiceRegistry
|
||||
from .settings_manager import settings
|
||||
|
||||
# Download to temporary file first
|
||||
import tempfile
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DownloadManager:
|
||||
def __init__(self, file_monitor: Optional[LoraFileMonitor] = None):
|
||||
self.civitai_client = CivitaiClient()
|
||||
self.file_monitor = file_monitor
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of DownloadManager"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
async def download_from_civitai(self, download_url: str = None, model_hash: str = None,
|
||||
model_version_id: str = None, save_dir: str = None,
|
||||
relative_path: str = '', progress_callback=None) -> Dict:
|
||||
def __init__(self):
|
||||
# Check if already initialized for singleton pattern
|
||||
if hasattr(self, '_initialized'):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
self._civitai_client = None # Will be lazily initialized
|
||||
# Add download management
|
||||
self._active_downloads = OrderedDict() # download_id -> download_info
|
||||
self._download_semaphore = asyncio.Semaphore(5) # Limit concurrent downloads
|
||||
self._download_tasks = {} # download_id -> asyncio.Task
|
||||
|
||||
async def _get_civitai_client(self):
|
||||
"""Lazily initialize CivitaiClient from registry"""
|
||||
if self._civitai_client is None:
|
||||
self._civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
return self._civitai_client
|
||||
|
||||
async def _get_lora_scanner(self):
|
||||
"""Get the lora scanner from registry"""
|
||||
return await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
async def _get_checkpoint_scanner(self):
|
||||
"""Get the checkpoint scanner from registry"""
|
||||
return await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
async def download_from_civitai(self, model_id: int, model_version_id: int,
|
||||
save_dir: str = None, relative_path: str = '',
|
||||
progress_callback=None, use_default_paths: bool = False,
|
||||
download_id: str = None) -> Dict:
|
||||
"""Download model from Civitai with task tracking and concurrency control
|
||||
|
||||
Args:
|
||||
model_id: Civitai model ID
|
||||
model_version_id: Civitai model version ID
|
||||
save_dir: Directory to save the model
|
||||
relative_path: Relative path within save_dir
|
||||
progress_callback: Callback function for progress updates
|
||||
use_default_paths: Flag to use default paths
|
||||
download_id: Unique identifier for this download task
|
||||
|
||||
Returns:
|
||||
Dict with download result
|
||||
"""
|
||||
# Use provided download_id or generate new one
|
||||
task_id = download_id or str(uuid.uuid4())
|
||||
|
||||
# Register download task in tracking dict
|
||||
self._active_downloads[task_id] = {
|
||||
'model_id': model_id,
|
||||
'model_version_id': model_version_id,
|
||||
'progress': 0,
|
||||
'status': 'queued'
|
||||
}
|
||||
|
||||
# Create tracking task
|
||||
download_task = asyncio.create_task(
|
||||
self._download_with_semaphore(
|
||||
task_id, model_id, model_version_id, save_dir,
|
||||
relative_path, progress_callback, use_default_paths
|
||||
)
|
||||
)
|
||||
|
||||
# Store task for tracking and cancellation
|
||||
self._download_tasks[task_id] = download_task
|
||||
|
||||
try:
|
||||
# Wait for download to complete
|
||||
result = await download_task
|
||||
result['download_id'] = task_id # Include download_id in result
|
||||
return result
|
||||
except asyncio.CancelledError:
|
||||
return {'success': False, 'error': 'Download was cancelled', 'download_id': task_id}
|
||||
finally:
|
||||
# Clean up task reference
|
||||
if task_id in self._download_tasks:
|
||||
del self._download_tasks[task_id]
|
||||
|
||||
async def _download_with_semaphore(self, task_id: str, model_id: int, model_version_id: int,
|
||||
save_dir: str, relative_path: str,
|
||||
progress_callback=None, use_default_paths: bool = False):
|
||||
"""Execute download with semaphore to limit concurrency"""
|
||||
# Update status to waiting
|
||||
if task_id in self._active_downloads:
|
||||
self._active_downloads[task_id]['status'] = 'waiting'
|
||||
|
||||
# Wrap progress callback to track progress in active_downloads
|
||||
original_callback = progress_callback
|
||||
async def tracking_callback(progress):
|
||||
if task_id in self._active_downloads:
|
||||
self._active_downloads[task_id]['progress'] = progress
|
||||
if original_callback:
|
||||
await original_callback(progress)
|
||||
|
||||
# Acquire semaphore to limit concurrent downloads
|
||||
try:
|
||||
async with self._download_semaphore:
|
||||
# Update status to downloading
|
||||
if task_id in self._active_downloads:
|
||||
self._active_downloads[task_id]['status'] = 'downloading'
|
||||
|
||||
# Use original download implementation
|
||||
try:
|
||||
# Check for cancellation before starting
|
||||
if asyncio.current_task().cancelled():
|
||||
raise asyncio.CancelledError()
|
||||
|
||||
result = await self._execute_original_download(
|
||||
model_id, model_version_id, save_dir,
|
||||
relative_path, tracking_callback, use_default_paths,
|
||||
task_id
|
||||
)
|
||||
|
||||
# Update status based on result
|
||||
if task_id in self._active_downloads:
|
||||
self._active_downloads[task_id]['status'] = 'completed' if result['success'] else 'failed'
|
||||
if not result['success']:
|
||||
self._active_downloads[task_id]['error'] = result.get('error', 'Unknown error')
|
||||
|
||||
return result
|
||||
except asyncio.CancelledError:
|
||||
# Handle cancellation
|
||||
if task_id in self._active_downloads:
|
||||
self._active_downloads[task_id]['status'] = 'cancelled'
|
||||
logger.info(f"Download cancelled for task {task_id}")
|
||||
raise
|
||||
except Exception as e:
|
||||
# Handle other errors
|
||||
logger.error(f"Download error for task {task_id}: {str(e)}", exc_info=True)
|
||||
if task_id in self._active_downloads:
|
||||
self._active_downloads[task_id]['status'] = 'failed'
|
||||
self._active_downloads[task_id]['error'] = str(e)
|
||||
return {'success': False, 'error': str(e)}
|
||||
finally:
|
||||
# Schedule cleanup of download record after delay
|
||||
asyncio.create_task(self._cleanup_download_record(task_id))
|
||||
|
||||
async def _cleanup_download_record(self, task_id: str):
|
||||
"""Keep completed downloads in history for a short time"""
|
||||
await asyncio.sleep(600) # Keep for 10 minutes
|
||||
if task_id in self._active_downloads:
|
||||
del self._active_downloads[task_id]
|
||||
|
||||
async def _execute_original_download(self, model_id, model_version_id, save_dir,
|
||||
relative_path, progress_callback, use_default_paths,
|
||||
download_id=None):
|
||||
"""Wrapper for original download_from_civitai implementation"""
|
||||
try:
|
||||
# Check if model version already exists in library
|
||||
if model_version_id is not None:
|
||||
# Check both scanners
|
||||
lora_scanner = await self._get_lora_scanner()
|
||||
checkpoint_scanner = await self._get_checkpoint_scanner()
|
||||
|
||||
# Check lora scanner first
|
||||
if await lora_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in lora library'}
|
||||
|
||||
# Check checkpoint scanner
|
||||
if await checkpoint_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in checkpoint library'}
|
||||
|
||||
# Get civitai client
|
||||
civitai_client = await self._get_civitai_client()
|
||||
|
||||
# Get version info based on the provided identifier
|
||||
version_info = await civitai_client.get_model_version(model_id, model_version_id)
|
||||
|
||||
if not version_info:
|
||||
return {'success': False, 'error': 'Failed to fetch model metadata'}
|
||||
|
||||
model_type_from_info = version_info.get('model', {}).get('type', '').lower()
|
||||
if model_type_from_info == 'checkpoint':
|
||||
model_type = 'checkpoint'
|
||||
elif model_type_from_info in VALID_LORA_TYPES:
|
||||
model_type = 'lora'
|
||||
elif model_type_from_info == 'textualinversion':
|
||||
model_type = 'embedding'
|
||||
else:
|
||||
return {'success': False, 'error': f'Model type "{model_type_from_info}" is not supported for download'}
|
||||
|
||||
# Case 2: model_version_id was None, check after getting version_info
|
||||
if model_version_id is None:
|
||||
version_model_id = version_info.get('modelId')
|
||||
version_id = version_info.get('id')
|
||||
|
||||
if model_type == 'lora':
|
||||
# Check lora scanner
|
||||
lora_scanner = await self._get_lora_scanner()
|
||||
if await lora_scanner.check_model_version_exists(version_model_id, version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in lora library'}
|
||||
elif model_type == 'checkpoint':
|
||||
# Check checkpoint scanner
|
||||
checkpoint_scanner = await self._get_checkpoint_scanner()
|
||||
if await checkpoint_scanner.check_model_version_exists(version_model_id, version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in checkpoint library'}
|
||||
elif model_type == 'embedding':
|
||||
# Embeddings are not checked in scanners, but we can still check if it exists
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
if await embedding_scanner.check_model_version_exists(version_model_id, version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in embedding library'}
|
||||
|
||||
# Handle use_default_paths
|
||||
if use_default_paths:
|
||||
# Set save_dir based on model type
|
||||
if model_type == 'checkpoint':
|
||||
default_path = settings.get('default_checkpoint_root')
|
||||
if not default_path:
|
||||
return {'success': False, 'error': 'Default checkpoint root path not set in settings'}
|
||||
save_dir = default_path
|
||||
elif model_type == 'lora':
|
||||
default_path = settings.get('default_lora_root')
|
||||
if not default_path:
|
||||
return {'success': False, 'error': 'Default lora root path not set in settings'}
|
||||
save_dir = default_path
|
||||
elif model_type == 'embedding':
|
||||
default_path = settings.get('default_embedding_root')
|
||||
if not default_path:
|
||||
return {'success': False, 'error': 'Default embedding root path not set in settings'}
|
||||
save_dir = default_path
|
||||
|
||||
# Calculate relative path using template
|
||||
relative_path = self._calculate_relative_path(version_info)
|
||||
|
||||
# Update save directory with relative path if provided
|
||||
if relative_path:
|
||||
save_dir = os.path.join(save_dir, relative_path)
|
||||
# Create directory if it doesn't exist
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
# Get version info based on the provided identifier
|
||||
version_info = None
|
||||
|
||||
if download_url:
|
||||
# Extract version ID from download URL
|
||||
version_id = download_url.split('/')[-1]
|
||||
version_info = await self.civitai_client.get_model_version_info(version_id)
|
||||
elif model_version_id:
|
||||
# Use model version ID directly
|
||||
version_info = await self.civitai_client.get_model_version_info(model_version_id)
|
||||
elif model_hash:
|
||||
# Get model by hash
|
||||
version_info = await self.civitai_client.get_model_by_hash(model_hash)
|
||||
|
||||
|
||||
if not version_info:
|
||||
return {'success': False, 'error': 'Failed to fetch model metadata'}
|
||||
|
||||
# Check if this is an early access LoRA
|
||||
# Check if this is an early access model
|
||||
if version_info.get('earlyAccessEndsAt'):
|
||||
early_access_date = version_info.get('earlyAccessEndsAt', '')
|
||||
# Convert to a readable date if possible
|
||||
@@ -49,12 +266,12 @@ class DownloadManager:
|
||||
from datetime import datetime
|
||||
date_obj = datetime.fromisoformat(early_access_date.replace('Z', '+00:00'))
|
||||
formatted_date = date_obj.strftime('%Y-%m-%d')
|
||||
early_access_msg = f"This LoRA requires early access payment (until {formatted_date}). "
|
||||
early_access_msg = f"This model requires early access payment (until {formatted_date}). "
|
||||
except:
|
||||
early_access_msg = "This LoRA requires early access payment. "
|
||||
early_access_msg = "This model requires early access payment. "
|
||||
|
||||
early_access_msg += "Please ensure you have purchased early access and are logged in to Civitai."
|
||||
logger.warning(f"Early access LoRA detected: {version_info.get('name', 'Unknown')}")
|
||||
logger.warning(f"Early access model detected: {version_info.get('name', 'Unknown')}")
|
||||
|
||||
# We'll still try to download, but log a warning and prepare for potential failure
|
||||
if progress_callback:
|
||||
@@ -64,50 +281,36 @@ class DownloadManager:
|
||||
if progress_callback:
|
||||
await progress_callback(0)
|
||||
|
||||
# 2. 获取文件信息
|
||||
# 2. Get file information
|
||||
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
|
||||
if not file_info:
|
||||
return {'success': False, 'error': 'No primary file found in metadata'}
|
||||
|
||||
# 3. 准备下载
|
||||
# 3. Prepare download
|
||||
file_name = file_info['name']
|
||||
save_path = os.path.join(save_dir, file_name)
|
||||
file_size = file_info.get('sizeKB', 0) * 1024
|
||||
|
||||
# 4. 通知文件监控系统 - 使用规范化路径和文件大小
|
||||
if self.file_monitor and self.file_monitor.handler:
|
||||
# Add both the normalized path and potential alternative paths
|
||||
normalized_path = save_path.replace(os.sep, '/')
|
||||
self.file_monitor.handler.add_ignore_path(normalized_path, file_size)
|
||||
|
||||
# Also add the path with file extension variations (.safetensors)
|
||||
if not normalized_path.endswith('.safetensors'):
|
||||
safetensors_path = os.path.splitext(normalized_path)[0] + '.safetensors'
|
||||
self.file_monitor.handler.add_ignore_path(safetensors_path, file_size)
|
||||
|
||||
logger.debug(f"Added download path to ignore list: {normalized_path} (size: {file_size} bytes)")
|
||||
|
||||
# 5. 准备元数据
|
||||
metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path)
|
||||
# 5. Prepare metadata based on model type
|
||||
if model_type == "checkpoint":
|
||||
metadata = CheckpointMetadata.from_civitai_info(version_info, file_info, save_path)
|
||||
logger.info(f"Creating CheckpointMetadata for {file_name}")
|
||||
elif model_type == "lora":
|
||||
metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path)
|
||||
logger.info(f"Creating LoraMetadata for {file_name}")
|
||||
elif model_type == "embedding":
|
||||
metadata = EmbeddingMetadata.from_civitai_info(version_info, file_info, save_path)
|
||||
logger.info(f"Creating EmbeddingMetadata for {file_name}")
|
||||
|
||||
# 5.1 获取并更新模型标签和描述信息
|
||||
model_id = version_info.get('modelId')
|
||||
if model_id:
|
||||
model_metadata, _ = await self.civitai_client.get_model_metadata(str(model_id))
|
||||
if model_metadata:
|
||||
if model_metadata.get("tags"):
|
||||
metadata.tags = model_metadata.get("tags", [])
|
||||
if model_metadata.get("description"):
|
||||
metadata.modelDescription = model_metadata.get("description", "")
|
||||
|
||||
# 6. 开始下载流程
|
||||
# 6. Start download process
|
||||
result = await self._execute_download(
|
||||
download_url=file_info.get('downloadUrl', ''),
|
||||
save_dir=save_dir,
|
||||
metadata=metadata,
|
||||
version_info=version_info,
|
||||
relative_path=relative_path,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
model_type=model_type,
|
||||
download_id=download_id
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -120,13 +323,63 @@ class DownloadManager:
|
||||
return {'success': False, 'error': f"Early access restriction: {str(e)}. Please ensure you have purchased early access and are logged in to Civitai."}
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
def _calculate_relative_path(self, version_info: Dict) -> str:
|
||||
"""Calculate relative path using template from settings
|
||||
|
||||
Args:
|
||||
version_info: Version info from Civitai API
|
||||
|
||||
Returns:
|
||||
Relative path string
|
||||
"""
|
||||
# Get path template from settings, default to '{base_model}/{first_tag}'
|
||||
path_template = settings.get('download_path_template', '{base_model}/{first_tag}')
|
||||
|
||||
# If template is empty, return empty path (flat structure)
|
||||
if not path_template:
|
||||
return ''
|
||||
|
||||
# Get base model name
|
||||
base_model = version_info.get('baseModel', '')
|
||||
|
||||
# Apply mapping if available
|
||||
base_model_mappings = settings.get('base_model_path_mappings', {})
|
||||
mapped_base_model = base_model_mappings.get(base_model, base_model)
|
||||
|
||||
# Get model tags
|
||||
model_tags = version_info.get('model', {}).get('tags', [])
|
||||
|
||||
# Find the first Civitai model tag that exists in model_tags
|
||||
first_tag = ''
|
||||
for civitai_tag in CIVITAI_MODEL_TAGS:
|
||||
if civitai_tag in model_tags:
|
||||
first_tag = civitai_tag
|
||||
break
|
||||
|
||||
# If no Civitai model tag found, fallback to first tag
|
||||
if not first_tag and model_tags:
|
||||
first_tag = model_tags[0]
|
||||
|
||||
# Format the template with available data
|
||||
formatted_path = path_template
|
||||
formatted_path = formatted_path.replace('{base_model}', mapped_base_model)
|
||||
formatted_path = formatted_path.replace('{first_tag}', first_tag)
|
||||
|
||||
return formatted_path
|
||||
|
||||
async def _execute_download(self, download_url: str, save_dir: str,
|
||||
metadata: LoraMetadata, version_info: Dict,
|
||||
relative_path: str, progress_callback=None) -> Dict:
|
||||
metadata, version_info: Dict,
|
||||
relative_path: str, progress_callback=None,
|
||||
model_type: str = "lora", download_id: str = None) -> Dict:
|
||||
"""Execute the actual download process including preview images and model files"""
|
||||
try:
|
||||
civitai_client = await self._get_civitai_client()
|
||||
save_path = metadata.file_path
|
||||
metadata_path = os.path.splitext(save_path)[0] + '.metadata.json'
|
||||
|
||||
# Store file path in active_downloads for potential cleanup
|
||||
if download_id and download_id in self._active_downloads:
|
||||
self._active_downloads[download_id]['file_path'] = save_path
|
||||
|
||||
# Download preview image if available
|
||||
images = version_info.get('images', [])
|
||||
@@ -135,20 +388,57 @@ class DownloadManager:
|
||||
if progress_callback:
|
||||
await progress_callback(1) # 1% progress for starting preview download
|
||||
|
||||
preview_ext = '.mp4' if images[0].get('type') == 'video' else '.png'
|
||||
preview_path = os.path.splitext(save_path)[0] + '.preview' + preview_ext
|
||||
if await self.civitai_client.download_preview_image(images[0]['url'], preview_path):
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
# Check if it's a video or an image
|
||||
is_video = images[0].get('type') == 'video'
|
||||
|
||||
if (is_video):
|
||||
# For videos, use .mp4 extension
|
||||
preview_ext = '.mp4'
|
||||
preview_path = os.path.splitext(save_path)[0] + preview_ext
|
||||
|
||||
# Download video directly
|
||||
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
else:
|
||||
# For images, use WebP format for better performance
|
||||
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
# Download the original image to temp path
|
||||
if await civitai_client.download_preview_image(images[0]['url'], temp_path):
|
||||
# Optimize and convert to WebP
|
||||
preview_path = os.path.splitext(save_path)[0] + '.webp'
|
||||
|
||||
# Use ExifUtils to optimize and convert the image
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=temp_path,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False
|
||||
)
|
||||
|
||||
# Save the optimized image
|
||||
with open(preview_path, 'wb') as f:
|
||||
f.write(optimized_data)
|
||||
|
||||
# Update metadata
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
|
||||
# Remove temporary file
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete temp file: {e}")
|
||||
|
||||
# Report preview download completion
|
||||
if progress_callback:
|
||||
await progress_callback(3) # 3% progress after preview download
|
||||
|
||||
# Download model file with progress tracking
|
||||
success, result = await self.civitai_client._download_file(
|
||||
success, result = await civitai_client._download_file(
|
||||
download_url,
|
||||
save_dir,
|
||||
os.path.basename(save_path),
|
||||
@@ -162,28 +452,28 @@ class DownloadManager:
|
||||
os.remove(path)
|
||||
return {'success': False, 'error': result}
|
||||
|
||||
# 4. 更新文件信息(大小和修改时间)
|
||||
# 4. Update file information (size and modified time)
|
||||
metadata.update_file_info(save_path)
|
||||
|
||||
# 5. 最终更新元数据
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
# 5. Final metadata update
|
||||
await MetadataManager.save_metadata(save_path, metadata, True)
|
||||
|
||||
# 6. update lora cache
|
||||
cache = await self.file_monitor.scanner.get_cached_data()
|
||||
# 6. Update cache based on model type
|
||||
if model_type == "checkpoint":
|
||||
scanner = await self._get_checkpoint_scanner()
|
||||
logger.info(f"Updating checkpoint cache for {save_path}")
|
||||
elif model_type == "lora":
|
||||
scanner = await self._get_lora_scanner()
|
||||
logger.info(f"Updating lora cache for {save_path}")
|
||||
elif model_type == "embedding":
|
||||
scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
logger.info(f"Updating embedding cache for {save_path}")
|
||||
|
||||
# Convert metadata to dictionary
|
||||
metadata_dict = metadata.to_dict()
|
||||
metadata_dict['folder'] = relative_path
|
||||
cache.raw_data.append(metadata_dict)
|
||||
await cache.resort()
|
||||
all_folders = set(cache.folders)
|
||||
all_folders.add(relative_path)
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
# Update the hash index with the new LoRA entry
|
||||
self.file_monitor.scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
|
||||
|
||||
# Update the hash index with the new LoRA entry
|
||||
self.file_monitor.scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
|
||||
# Add model to cache and save to disk in a single operation
|
||||
await scanner.add_model_to_cache(metadata_dict, relative_path)
|
||||
|
||||
# Report 100% completion
|
||||
if progress_callback:
|
||||
@@ -211,4 +501,86 @@ class DownloadManager:
|
||||
if progress_callback:
|
||||
# Scale file progress to 3-100 range (after preview download)
|
||||
overall_progress = 3 + (file_progress * 0.97) # 97% of progress for file download
|
||||
await progress_callback(round(overall_progress))
|
||||
await progress_callback(round(overall_progress))
|
||||
|
||||
async def cancel_download(self, download_id: str) -> Dict:
|
||||
"""Cancel an active download by download_id
|
||||
|
||||
Args:
|
||||
download_id: The unique identifier of the download task
|
||||
|
||||
Returns:
|
||||
Dict: Status of the cancellation operation
|
||||
"""
|
||||
if download_id not in self._download_tasks:
|
||||
return {'success': False, 'error': 'Download task not found'}
|
||||
|
||||
try:
|
||||
# Get the task and cancel it
|
||||
task = self._download_tasks[download_id]
|
||||
task.cancel()
|
||||
|
||||
# Update status in active downloads
|
||||
if download_id in self._active_downloads:
|
||||
self._active_downloads[download_id]['status'] = 'cancelling'
|
||||
|
||||
# Wait briefly for the task to acknowledge cancellation
|
||||
try:
|
||||
await asyncio.wait_for(asyncio.shield(task), timeout=2.0)
|
||||
except (asyncio.CancelledError, asyncio.TimeoutError):
|
||||
pass
|
||||
|
||||
# Clean up partial downloads
|
||||
download_info = self._active_downloads.get(download_id)
|
||||
if download_info and 'file_path' in download_info:
|
||||
# Delete the partial file
|
||||
file_path = download_info['file_path']
|
||||
if os.path.exists(file_path):
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
logger.debug(f"Deleted partial download: {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting partial file: {e}")
|
||||
|
||||
# Delete metadata file if exists
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
os.unlink(metadata_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting metadata file: {e}")
|
||||
|
||||
# Delete preview file if exists (.webp or .mp4)
|
||||
for preview_ext in ['.webp', '.mp4']:
|
||||
preview_path = os.path.splitext(file_path)[0] + preview_ext
|
||||
if os.path.exists(preview_path):
|
||||
try:
|
||||
os.unlink(preview_path)
|
||||
logger.debug(f"Deleted preview file: {preview_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting preview file: {e}")
|
||||
|
||||
return {'success': True, 'message': 'Download cancelled successfully'}
|
||||
except Exception as e:
|
||||
logger.error(f"Error cancelling download: {e}", exc_info=True)
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
async def get_active_downloads(self) -> Dict:
|
||||
"""Get information about all active downloads
|
||||
|
||||
Returns:
|
||||
Dict: List of active downloads and their status
|
||||
"""
|
||||
return {
|
||||
'downloads': [
|
||||
{
|
||||
'download_id': task_id,
|
||||
'model_id': info.get('model_id'),
|
||||
'model_version_id': info.get('model_version_id'),
|
||||
'progress': info.get('progress', 0),
|
||||
'status': info.get('status', 'unknown'),
|
||||
'error': info.get('error', None)
|
||||
}
|
||||
for task_id, info in self._active_downloads.items()
|
||||
]
|
||||
}
|
||||
26
py/services/embedding_scanner.py
Normal file
26
py/services/embedding_scanner.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from ..utils.models import EmbeddingMetadata
|
||||
from ..config import config
|
||||
from .model_scanner import ModelScanner
|
||||
from .model_hash_index import ModelHashIndex
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class EmbeddingScanner(ModelScanner):
|
||||
"""Service for scanning and managing embedding files"""
|
||||
|
||||
def __init__(self):
|
||||
# Define supported file extensions
|
||||
file_extensions = {'.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft'}
|
||||
super().__init__(
|
||||
model_type="embedding",
|
||||
model_class=EmbeddingMetadata,
|
||||
file_extensions=file_extensions,
|
||||
hash_index=ModelHashIndex()
|
||||
)
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get embedding root directories"""
|
||||
return config.embeddings_roots
|
||||
51
py/services/embedding_service.py
Normal file
51
py/services/embedding_service.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import os
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from .base_model_service import BaseModelService
|
||||
from ..utils.models import EmbeddingMetadata
|
||||
from ..config import config
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class EmbeddingService(BaseModelService):
|
||||
"""Embedding-specific service implementation"""
|
||||
|
||||
def __init__(self, scanner):
|
||||
"""Initialize Embedding service
|
||||
|
||||
Args:
|
||||
scanner: Embedding scanner instance
|
||||
"""
|
||||
super().__init__("embedding", scanner, EmbeddingMetadata)
|
||||
|
||||
async def format_response(self, embedding_data: Dict) -> Dict:
|
||||
"""Format Embedding data for API response"""
|
||||
return {
|
||||
"model_name": embedding_data["model_name"],
|
||||
"file_name": embedding_data["file_name"],
|
||||
"preview_url": config.get_preview_static_url(embedding_data.get("preview_url", "")),
|
||||
"preview_nsfw_level": embedding_data.get("preview_nsfw_level", 0),
|
||||
"base_model": embedding_data.get("base_model", ""),
|
||||
"folder": embedding_data["folder"],
|
||||
"sha256": embedding_data.get("sha256", ""),
|
||||
"file_path": embedding_data["file_path"].replace(os.sep, "/"),
|
||||
"file_size": embedding_data.get("size", 0),
|
||||
"modified": embedding_data.get("modified", ""),
|
||||
"tags": embedding_data.get("tags", []),
|
||||
"modelDescription": embedding_data.get("modelDescription", ""),
|
||||
"from_civitai": embedding_data.get("from_civitai", True),
|
||||
"notes": embedding_data.get("notes", ""),
|
||||
"model_type": embedding_data.get("model_type", "embedding"),
|
||||
"favorite": embedding_data.get("favorite", False),
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(embedding_data.get("civitai", {}))
|
||||
}
|
||||
|
||||
def find_duplicate_hashes(self) -> Dict:
|
||||
"""Find Embeddings with duplicate SHA256 hashes"""
|
||||
return self.scanner._hash_index.get_duplicate_hashes()
|
||||
|
||||
def find_duplicate_filenames(self) -> Dict:
|
||||
"""Find Embeddings with conflicting filenames"""
|
||||
return self.scanner._hash_index.get_duplicate_filenames()
|
||||
@@ -1,250 +0,0 @@
|
||||
from operator import itemgetter
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler, FileCreatedEvent, FileDeletedEvent
|
||||
from typing import List
|
||||
from threading import Lock
|
||||
from .lora_scanner import LoraScanner
|
||||
from ..config import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraFileHandler(FileSystemEventHandler):
|
||||
"""Handler for LoRA file system events"""
|
||||
|
||||
def __init__(self, scanner: LoraScanner, loop: asyncio.AbstractEventLoop):
|
||||
self.scanner = scanner
|
||||
self.loop = loop # 存储事件循环引用
|
||||
self.pending_changes = set() # 待处理的变更
|
||||
self.lock = Lock() # 线程安全锁
|
||||
self.update_task = None # 异步更新任务
|
||||
self._ignore_paths = {} # Change to dictionary to store expiration times
|
||||
self._min_ignore_timeout = 5 # minimum timeout in seconds
|
||||
self._download_speed = 1024 * 1024 # assume 1MB/s as base speed
|
||||
|
||||
def _should_ignore(self, path: str) -> bool:
|
||||
"""Check if path should be ignored"""
|
||||
real_path = os.path.realpath(path) # Resolve any symbolic links
|
||||
normalized_path = real_path.replace(os.sep, '/')
|
||||
|
||||
# Also check with backslashes for Windows compatibility
|
||||
alt_path = real_path.replace('/', '\\')
|
||||
|
||||
# 使用传入的事件循环而不是尝试获取当前线程的事件循环
|
||||
current_time = self.loop.time()
|
||||
|
||||
# Check if path is in ignore list and not expired
|
||||
if normalized_path in self._ignore_paths and self._ignore_paths[normalized_path] > current_time:
|
||||
return True
|
||||
|
||||
# Also check alternative path format
|
||||
if alt_path in self._ignore_paths and self._ignore_paths[alt_path] > current_time:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def add_ignore_path(self, path: str, file_size: int = 0):
|
||||
"""Add path to ignore list with dynamic timeout based on file size"""
|
||||
real_path = os.path.realpath(path) # Resolve any symbolic links
|
||||
normalized_path = real_path.replace(os.sep, '/')
|
||||
|
||||
# Calculate timeout based on file size
|
||||
# For small files, use minimum timeout
|
||||
# For larger files, estimate download time + buffer
|
||||
if file_size > 0:
|
||||
# Estimate download time in seconds (size / speed) + buffer
|
||||
estimated_time = (file_size / self._download_speed) + 10
|
||||
timeout = max(self._min_ignore_timeout, estimated_time)
|
||||
else:
|
||||
timeout = self._min_ignore_timeout
|
||||
|
||||
current_time = self.loop.time()
|
||||
expiration_time = current_time + timeout
|
||||
|
||||
# Store both normalized and alternative path formats
|
||||
self._ignore_paths[normalized_path] = expiration_time
|
||||
|
||||
# Also store with backslashes for Windows compatibility
|
||||
alt_path = real_path.replace('/', '\\')
|
||||
self._ignore_paths[alt_path] = expiration_time
|
||||
|
||||
logger.debug(f"Added ignore path: {normalized_path} (expires in {timeout:.1f}s)")
|
||||
|
||||
self.loop.call_later(
|
||||
timeout,
|
||||
self._remove_ignore_path,
|
||||
normalized_path
|
||||
)
|
||||
|
||||
def _remove_ignore_path(self, path: str):
|
||||
"""Remove path from ignore list after timeout"""
|
||||
if path in self._ignore_paths:
|
||||
del self._ignore_paths[path]
|
||||
logger.debug(f"Removed ignore path: {path}")
|
||||
|
||||
# Also remove alternative path format
|
||||
alt_path = path.replace('/', '\\')
|
||||
if alt_path in self._ignore_paths:
|
||||
del self._ignore_paths[alt_path]
|
||||
|
||||
def on_created(self, event):
|
||||
if event.is_directory or not event.src_path.endswith('.safetensors'):
|
||||
return
|
||||
if self._should_ignore(event.src_path):
|
||||
return
|
||||
logger.info(f"LoRA file created: {event.src_path}")
|
||||
self._schedule_update('add', event.src_path)
|
||||
|
||||
def on_deleted(self, event):
|
||||
if event.is_directory or not event.src_path.endswith('.safetensors'):
|
||||
return
|
||||
if self._should_ignore(event.src_path):
|
||||
return
|
||||
logger.info(f"LoRA file deleted: {event.src_path}")
|
||||
self._schedule_update('remove', event.src_path)
|
||||
|
||||
def _schedule_update(self, action: str, file_path: str): #file_path is a real path
|
||||
"""Schedule a cache update"""
|
||||
with self.lock:
|
||||
# 使用 config 中的方法映射路径
|
||||
mapped_path = config.map_path_to_link(file_path)
|
||||
normalized_path = mapped_path.replace(os.sep, '/')
|
||||
self.pending_changes.add((action, normalized_path))
|
||||
|
||||
self.loop.call_soon_threadsafe(self._create_update_task)
|
||||
|
||||
def _create_update_task(self):
|
||||
"""Create update task in the event loop"""
|
||||
if self.update_task is None or self.update_task.done():
|
||||
self.update_task = asyncio.create_task(self._process_changes())
|
||||
|
||||
async def _process_changes(self, delay: float = 2.0):
|
||||
"""Process pending changes with debouncing"""
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
try:
|
||||
with self.lock:
|
||||
changes = self.pending_changes.copy()
|
||||
self.pending_changes.clear()
|
||||
|
||||
if not changes:
|
||||
return
|
||||
|
||||
logger.info(f"Processing {len(changes)} file changes")
|
||||
|
||||
cache = await self.scanner.get_cached_data()
|
||||
needs_resort = False
|
||||
new_folders = set()
|
||||
|
||||
for action, file_path in changes:
|
||||
try:
|
||||
if action == 'add':
|
||||
# Scan new file
|
||||
lora_data = await self.scanner.scan_single_lora(file_path)
|
||||
if lora_data:
|
||||
# Update tags count
|
||||
for tag in lora_data.get('tags', []):
|
||||
self.scanner._tags_count[tag] = self.scanner._tags_count.get(tag, 0) + 1
|
||||
|
||||
cache.raw_data.append(lora_data)
|
||||
new_folders.add(lora_data['folder'])
|
||||
# Update hash index
|
||||
if 'sha256' in lora_data:
|
||||
self.scanner._hash_index.add_entry(
|
||||
lora_data['sha256'],
|
||||
lora_data['file_path']
|
||||
)
|
||||
needs_resort = True
|
||||
|
||||
elif action == 'remove':
|
||||
# Find the lora to remove so we can update tags count
|
||||
lora_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
|
||||
if lora_to_remove:
|
||||
# Update tags count by reducing counts
|
||||
for tag in lora_to_remove.get('tags', []):
|
||||
if tag in self.scanner._tags_count:
|
||||
self.scanner._tags_count[tag] = max(0, self.scanner._tags_count[tag] - 1)
|
||||
if self.scanner._tags_count[tag] == 0:
|
||||
del self.scanner._tags_count[tag]
|
||||
|
||||
# Remove from cache and hash index
|
||||
logger.info(f"Removing {file_path} from cache")
|
||||
self.scanner._hash_index.remove_by_path(file_path)
|
||||
cache.raw_data = [
|
||||
item for item in cache.raw_data
|
||||
if item['file_path'] != file_path
|
||||
]
|
||||
needs_resort = True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {action} for {file_path}: {e}")
|
||||
|
||||
if needs_resort:
|
||||
await cache.resort()
|
||||
|
||||
# Update folder list
|
||||
all_folders = set(cache.folders) | new_folders
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in process_changes: {e}")
|
||||
|
||||
|
||||
class LoraFileMonitor:
|
||||
"""Monitor for LoRA file changes"""
|
||||
|
||||
def __init__(self, scanner: LoraScanner, roots: List[str]):
|
||||
self.scanner = scanner
|
||||
scanner.set_file_monitor(self)
|
||||
self.observer = Observer()
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.handler = LoraFileHandler(scanner, self.loop)
|
||||
|
||||
# 使用已存在的路径映射
|
||||
self.monitor_paths = set()
|
||||
for root in roots:
|
||||
self.monitor_paths.add(os.path.realpath(root).replace(os.sep, '/'))
|
||||
|
||||
# 添加所有已映射的目标路径
|
||||
for target_path in config._path_mappings.keys():
|
||||
self.monitor_paths.add(target_path)
|
||||
|
||||
def start(self):
|
||||
"""Start monitoring"""
|
||||
for path_info in self.monitor_paths:
|
||||
try:
|
||||
if isinstance(path_info, tuple):
|
||||
# 对于链接,监控目标路径
|
||||
_, target_path = path_info
|
||||
self.observer.schedule(self.handler, target_path, recursive=True)
|
||||
logger.info(f"Started monitoring target path: {target_path}")
|
||||
else:
|
||||
# 对于普通路径,直接监控
|
||||
self.observer.schedule(self.handler, path_info, recursive=True)
|
||||
logger.info(f"Started monitoring: {path_info}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error monitoring {path_info}: {e}")
|
||||
|
||||
self.observer.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop monitoring"""
|
||||
self.observer.stop()
|
||||
self.observer.join()
|
||||
|
||||
def rescan_links(self):
|
||||
"""重新扫描链接(当添加新的链接时调用)"""
|
||||
new_paths = set()
|
||||
for path in self.monitor_paths.copy():
|
||||
self._add_link_targets(path)
|
||||
|
||||
# 添加新发现的路径到监控
|
||||
new_paths = self.monitor_paths - set(self.observer.watches.keys())
|
||||
for path in new_paths:
|
||||
try:
|
||||
self.observer.schedule(self.handler, path, recursive=True)
|
||||
logger.info(f"Added new monitoring path: {path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding new monitor for {path}: {e}")
|
||||
@@ -1,64 +0,0 @@
|
||||
import asyncio
|
||||
from typing import List, Dict
|
||||
from dataclasses import dataclass
|
||||
from operator import itemgetter
|
||||
|
||||
@dataclass
|
||||
class LoraCache:
|
||||
"""Cache structure for LoRA data"""
|
||||
raw_data: List[Dict]
|
||||
sorted_by_name: List[Dict]
|
||||
sorted_by_date: List[Dict]
|
||||
folders: List[str]
|
||||
|
||||
def __post_init__(self):
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
async def resort(self, name_only: bool = False):
|
||||
"""Resort all cached data views"""
|
||||
async with self._lock:
|
||||
self.sorted_by_name = sorted(
|
||||
self.raw_data,
|
||||
key=lambda x: x['model_name'].lower() # Case-insensitive sort
|
||||
)
|
||||
if not name_only:
|
||||
self.sorted_by_date = sorted(
|
||||
self.raw_data,
|
||||
key=itemgetter('modified'),
|
||||
reverse=True
|
||||
)
|
||||
# Update folder list
|
||||
all_folders = set(l['folder'] for l in self.raw_data)
|
||||
self.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
async def update_preview_url(self, file_path: str, preview_url: str) -> bool:
|
||||
"""Update preview_url for a specific lora in all cached data
|
||||
|
||||
Args:
|
||||
file_path: The file path of the lora to update
|
||||
preview_url: The new preview URL
|
||||
|
||||
Returns:
|
||||
bool: True if the update was successful, False if the lora wasn't found
|
||||
"""
|
||||
async with self._lock:
|
||||
# Update in raw_data
|
||||
for item in self.raw_data:
|
||||
if item['file_path'] == file_path:
|
||||
item['preview_url'] = preview_url
|
||||
break
|
||||
else:
|
||||
return False # Lora not found
|
||||
|
||||
# Update in sorted lists (references to the same dict objects)
|
||||
for item in self.sorted_by_name:
|
||||
if item['file_path'] == file_path:
|
||||
item['preview_url'] = preview_url
|
||||
break
|
||||
|
||||
for item in self.sorted_by_date:
|
||||
if item['file_path'] == file_path:
|
||||
item['preview_url'] = preview_url
|
||||
break
|
||||
|
||||
return True
|
||||
@@ -1,54 +0,0 @@
|
||||
from typing import Dict, Optional
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class LoraHashIndex:
|
||||
"""Index for mapping LoRA file hashes to their file paths"""
|
||||
|
||||
def __init__(self):
|
||||
self._hash_to_path: Dict[str, str] = {}
|
||||
|
||||
def add_entry(self, sha256: str, file_path: str) -> None:
|
||||
"""Add or update a hash -> path mapping"""
|
||||
if not sha256 or not file_path:
|
||||
return
|
||||
# Always store lowercase hashes for consistency
|
||||
self._hash_to_path[sha256.lower()] = file_path
|
||||
|
||||
def remove_entry(self, sha256: str) -> None:
|
||||
"""Remove a hash entry"""
|
||||
if sha256:
|
||||
self._hash_to_path.pop(sha256.lower(), None)
|
||||
|
||||
def remove_by_path(self, file_path: str) -> None:
|
||||
"""Remove entry by file path"""
|
||||
for sha256, path in list(self._hash_to_path.items()):
|
||||
if path == file_path:
|
||||
del self._hash_to_path[sha256]
|
||||
break
|
||||
|
||||
def get_path(self, sha256: str) -> Optional[str]:
|
||||
"""Get file path for a given hash"""
|
||||
if not sha256:
|
||||
return None
|
||||
return self._hash_to_path.get(sha256.lower())
|
||||
|
||||
def get_hash(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a given file path"""
|
||||
for sha256, path in self._hash_to_path.items():
|
||||
if path == file_path:
|
||||
return sha256
|
||||
return None
|
||||
|
||||
def has_hash(self, sha256: str) -> bool:
|
||||
"""Check if hash exists in index"""
|
||||
if not sha256:
|
||||
return False
|
||||
return sha256.lower() in self._hash_to_path
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all entries"""
|
||||
self._hash_to_path.clear()
|
||||
@@ -1,715 +1,32 @@
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
import shutil
|
||||
import time
|
||||
from typing import List, Dict, Optional
|
||||
from typing import List
|
||||
|
||||
from ..utils.models import LoraMetadata
|
||||
from ..config import config
|
||||
from ..utils.file_utils import load_metadata, get_file_info, normalize_path, find_preview_file, save_metadata
|
||||
from ..utils.lora_metadata import extract_lora_metadata
|
||||
from .lora_cache import LoraCache
|
||||
from .lora_hash_index import LoraHashIndex
|
||||
from .settings_manager import settings
|
||||
from ..utils.constants import NSFW_LEVELS
|
||||
from ..utils.utils import fuzzy_match
|
||||
from .model_scanner import ModelScanner
|
||||
from .model_hash_index import ModelHashIndex # Changed from LoraHashIndex to ModelHashIndex
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraScanner:
|
||||
class LoraScanner(ModelScanner):
|
||||
"""Service for scanning and managing LoRA files"""
|
||||
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
# 确保初始化只执行一次
|
||||
if not hasattr(self, '_initialized'):
|
||||
self._cache: Optional[LoraCache] = None
|
||||
self._hash_index = LoraHashIndex()
|
||||
self._initialization_lock = asyncio.Lock()
|
||||
self._initialization_task: Optional[asyncio.Task] = None
|
||||
self._initialized = True
|
||||
self.file_monitor = None # Add this line
|
||||
self._tags_count = {} # Add a dictionary to store tag counts
|
||||
|
||||
def set_file_monitor(self, monitor):
|
||||
"""Set file monitor instance"""
|
||||
self.file_monitor = monitor
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance with async support"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
async def get_cached_data(self, force_refresh: bool = False) -> LoraCache:
|
||||
"""Get cached LoRA data, refresh if needed"""
|
||||
async with self._initialization_lock:
|
||||
|
||||
# 如果缓存未初始化但需要响应请求,返回空缓存
|
||||
if self._cache is None and not force_refresh:
|
||||
return LoraCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# 如果正在初始化,等待完成
|
||||
if self._initialization_task and not self._initialization_task.done():
|
||||
try:
|
||||
await self._initialization_task
|
||||
except Exception as e:
|
||||
logger.error(f"Cache initialization failed: {e}")
|
||||
self._initialization_task = None
|
||||
|
||||
if (self._cache is None or force_refresh):
|
||||
|
||||
# 创建新的初始化任务
|
||||
if not self._initialization_task or self._initialization_task.done():
|
||||
self._initialization_task = asyncio.create_task(self._initialize_cache())
|
||||
|
||||
try:
|
||||
await self._initialization_task
|
||||
except Exception as e:
|
||||
logger.error(f"Cache initialization failed: {e}")
|
||||
# 如果缓存已存在,继续使用旧缓存
|
||||
if self._cache is None:
|
||||
raise # 如果没有缓存,则抛出异常
|
||||
|
||||
return self._cache
|
||||
|
||||
async def _initialize_cache(self) -> None:
|
||||
"""Initialize or refresh the cache"""
|
||||
try:
|
||||
start_time = time.time()
|
||||
# Clear existing hash index
|
||||
self._hash_index.clear()
|
||||
|
||||
# Clear existing tags count
|
||||
self._tags_count = {}
|
||||
|
||||
# Scan for new data
|
||||
raw_data = await self.scan_all_loras()
|
||||
|
||||
# Build hash index and tags count
|
||||
for lora_data in raw_data:
|
||||
if 'sha256' in lora_data and 'file_path' in lora_data:
|
||||
self._hash_index.add_entry(lora_data['sha256'].lower(), lora_data['file_path'])
|
||||
|
||||
# Count tags
|
||||
if 'tags' in lora_data and lora_data['tags']:
|
||||
for tag in lora_data['tags']:
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Update cache
|
||||
self._cache = LoraCache(
|
||||
raw_data=raw_data,
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# Call resort_cache to create sorted views
|
||||
await self._cache.resort()
|
||||
|
||||
self._initialization_task = None
|
||||
logger.info(f"LoRA Manager: Cache initialization completed in {time.time() - start_time:.2f} seconds, found {len(raw_data)} loras")
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing cache: {e}")
|
||||
self._cache = LoraCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'name',
|
||||
folder: str = None, search: str = None, fuzzy: bool = False,
|
||||
base_models: list = None, tags: list = None,
|
||||
search_options: dict = None) -> Dict:
|
||||
"""Get paginated and filtered lora data
|
||||
# Define supported file extensions
|
||||
file_extensions = {'.safetensors'}
|
||||
|
||||
Args:
|
||||
page: Current page number (1-based)
|
||||
page_size: Number of items per page
|
||||
sort_by: Sort method ('name' or 'date')
|
||||
folder: Filter by folder path
|
||||
search: Search term
|
||||
fuzzy: Use fuzzy matching for search
|
||||
base_models: List of base models to filter by
|
||||
tags: List of tags to filter by
|
||||
search_options: Dictionary with search options (filename, modelname, tags, recursive)
|
||||
"""
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Get default search options if not provided
|
||||
if search_options is None:
|
||||
search_options = {
|
||||
'filename': True,
|
||||
'modelname': True,
|
||||
'tags': False,
|
||||
'recursive': False
|
||||
}
|
||||
|
||||
# Get the base data set
|
||||
filtered_data = cache.sorted_by_date if sort_by == 'date' else cache.sorted_by_name
|
||||
|
||||
# Apply SFW filtering if enabled
|
||||
if settings.get('show_only_sfw', False):
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if not item.get('preview_nsfw_level') or item.get('preview_nsfw_level') < NSFW_LEVELS['R']
|
||||
]
|
||||
|
||||
# Apply folder filtering
|
||||
if folder is not None:
|
||||
if search_options.get('recursive', False):
|
||||
# Recursive mode: match all paths starting with this folder
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if item['folder'].startswith(folder + '/') or item['folder'] == folder
|
||||
]
|
||||
else:
|
||||
# Non-recursive mode: match exact folder
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if item['folder'] == folder
|
||||
]
|
||||
|
||||
# Apply base model filtering
|
||||
if base_models and len(base_models) > 0:
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if item.get('base_model') in base_models
|
||||
]
|
||||
|
||||
# Apply tag filtering
|
||||
if tags and len(tags) > 0:
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if any(tag in item.get('tags', []) for tag in tags)
|
||||
]
|
||||
|
||||
# Apply search filtering
|
||||
if search:
|
||||
search_results = []
|
||||
for item in filtered_data:
|
||||
# Check filename if enabled
|
||||
if search_options.get('filename', True):
|
||||
if fuzzy:
|
||||
if fuzzy_match(item.get('file_name', ''), search):
|
||||
search_results.append(item)
|
||||
continue
|
||||
else:
|
||||
if search.lower() in item.get('file_name', '').lower():
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
# Check model name if enabled
|
||||
if search_options.get('modelname', True):
|
||||
if fuzzy:
|
||||
if fuzzy_match(item.get('model_name', ''), search):
|
||||
search_results.append(item)
|
||||
continue
|
||||
else:
|
||||
if search.lower() in item.get('model_name', '').lower():
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
# Check tags if enabled
|
||||
if search_options.get('tags', False) and item.get('tags'):
|
||||
found_tag = False
|
||||
for tag in item['tags']:
|
||||
if fuzzy:
|
||||
if fuzzy_match(tag, search):
|
||||
found_tag = True
|
||||
break
|
||||
else:
|
||||
if search.lower() in tag.lower():
|
||||
found_tag = True
|
||||
break
|
||||
if found_tag:
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
filtered_data = search_results
|
||||
|
||||
# Calculate pagination
|
||||
total_items = len(filtered_data)
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = min(start_idx + page_size, total_items)
|
||||
|
||||
result = {
|
||||
'items': filtered_data[start_idx:end_idx],
|
||||
'total': total_items,
|
||||
'page': page,
|
||||
'page_size': page_size,
|
||||
'total_pages': (total_items + page_size - 1) // page_size
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def invalidate_cache(self):
|
||||
"""Invalidate the current cache"""
|
||||
self._cache = None
|
||||
|
||||
async def scan_all_loras(self) -> List[Dict]:
|
||||
"""Scan all LoRA directories and return metadata"""
|
||||
all_loras = []
|
||||
|
||||
# 分目录异步扫描
|
||||
scan_tasks = []
|
||||
for loras_root in config.loras_roots:
|
||||
task = asyncio.create_task(self._scan_directory(loras_root))
|
||||
scan_tasks.append(task)
|
||||
|
||||
for task in scan_tasks:
|
||||
try:
|
||||
loras = await task
|
||||
all_loras.extend(loras)
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning directory: {e}")
|
||||
|
||||
return all_loras
|
||||
|
||||
async def _scan_directory(self, root_path: str) -> List[Dict]:
|
||||
"""Scan a single directory for LoRA files"""
|
||||
loras = []
|
||||
original_root = root_path # 保存原始根路径
|
||||
|
||||
async def scan_recursive(path: str, visited_paths: set):
|
||||
"""递归扫描目录,避免循环链接"""
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
logger.debug(f"Skipping already visited path: {path}")
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
entries = list(it)
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True) and entry.name.endswith('.safetensors'):
|
||||
# 使用原始路径而不是真实路径
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
await self._process_single_file(file_path, original_root, loras)
|
||||
await asyncio.sleep(0)
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
# 对于目录,使用原始路径继续扫描
|
||||
await scan_recursive(entry.path, visited_paths)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {path}: {e}")
|
||||
|
||||
await scan_recursive(root_path, set())
|
||||
return loras
|
||||
|
||||
async def _process_single_file(self, file_path: str, root_path: str, loras: list):
|
||||
"""处理单个文件并添加到结果列表"""
|
||||
try:
|
||||
result = await self._process_lora_file(file_path, root_path)
|
||||
if result:
|
||||
loras.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {file_path}: {e}")
|
||||
|
||||
async def _process_lora_file(self, file_path: str, root_path: str) -> Dict:
|
||||
"""Process a single LoRA file and return its metadata"""
|
||||
# Try loading existing metadata
|
||||
metadata = await load_metadata(file_path)
|
||||
|
||||
if metadata is None:
|
||||
# Try to find and use .civitai.info file first
|
||||
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
|
||||
if os.path.exists(civitai_info_path):
|
||||
try:
|
||||
with open(civitai_info_path, 'r', encoding='utf-8') as f:
|
||||
version_info = json.load(f)
|
||||
|
||||
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
|
||||
if file_info:
|
||||
# Create a minimal file_info with the required fields
|
||||
file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
file_info['name'] = file_name
|
||||
|
||||
# Use from_civitai_info to create metadata
|
||||
metadata = LoraMetadata.from_civitai_info(version_info, file_info, file_path)
|
||||
metadata.preview_url = find_preview_file(file_name, os.path.dirname(file_path))
|
||||
await save_metadata(file_path, metadata)
|
||||
logger.debug(f"Created metadata from .civitai.info for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating metadata from .civitai.info for {file_path}: {e}")
|
||||
|
||||
# If still no metadata, create new metadata using get_file_info
|
||||
if metadata is None:
|
||||
metadata = await get_file_info(file_path)
|
||||
|
||||
# Convert to dict and add folder info
|
||||
lora_data = metadata.to_dict()
|
||||
# Try to fetch missing metadata from Civitai if needed
|
||||
await self._fetch_missing_metadata(file_path, lora_data)
|
||||
rel_path = os.path.relpath(file_path, root_path)
|
||||
folder = os.path.dirname(rel_path)
|
||||
lora_data['folder'] = folder.replace(os.path.sep, '/')
|
||||
|
||||
return lora_data
|
||||
|
||||
async def _fetch_missing_metadata(self, file_path: str, lora_data: Dict) -> None:
|
||||
"""Fetch missing description and tags from Civitai if needed
|
||||
|
||||
Args:
|
||||
file_path: Path to the lora file
|
||||
lora_data: Lora metadata dictionary to update
|
||||
"""
|
||||
try:
|
||||
# Skip if already marked as deleted on Civitai
|
||||
if lora_data.get('civitai_deleted', False):
|
||||
logger.debug(f"Skipping metadata fetch for {file_path}: marked as deleted on Civitai")
|
||||
return
|
||||
|
||||
# Check if we need to fetch additional metadata from Civitai
|
||||
needs_metadata_update = False
|
||||
model_id = None
|
||||
|
||||
# Check if we have Civitai model ID but missing metadata
|
||||
if lora_data.get('civitai'):
|
||||
# Try to get model ID directly from the correct location
|
||||
model_id = lora_data['civitai'].get('modelId')
|
||||
|
||||
if model_id:
|
||||
model_id = str(model_id)
|
||||
# Check if tags are missing or empty
|
||||
tags_missing = not lora_data.get('tags') or len(lora_data.get('tags', [])) == 0
|
||||
|
||||
# Check if description is missing or empty
|
||||
desc_missing = not lora_data.get('modelDescription') or lora_data.get('modelDescription') in (None, "")
|
||||
|
||||
needs_metadata_update = tags_missing or desc_missing
|
||||
|
||||
# Fetch missing metadata if needed
|
||||
if needs_metadata_update and model_id:
|
||||
logger.debug(f"Fetching missing metadata for {file_path} with model ID {model_id}")
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
client = CivitaiClient()
|
||||
|
||||
# Get metadata and status code
|
||||
model_metadata, status_code = await client.get_model_metadata(model_id)
|
||||
await client.close()
|
||||
|
||||
# Handle 404 status (model deleted from Civitai)
|
||||
if status_code == 404:
|
||||
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
|
||||
# Mark as deleted to avoid future API calls
|
||||
lora_data['civitai_deleted'] = True
|
||||
|
||||
# Save the updated metadata back to file
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(lora_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Process valid metadata if available
|
||||
elif model_metadata:
|
||||
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
|
||||
|
||||
# Update tags if they were missing
|
||||
if model_metadata.get('tags') and (not lora_data.get('tags') or len(lora_data.get('tags', [])) == 0):
|
||||
lora_data['tags'] = model_metadata['tags']
|
||||
|
||||
# Update description if it was missing
|
||||
if model_metadata.get('description') and (not lora_data.get('modelDescription') or lora_data.get('modelDescription') in (None, "")):
|
||||
lora_data['modelDescription'] = model_metadata['description']
|
||||
|
||||
# Save the updated metadata back to file
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(lora_data, f, indent=2, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
|
||||
|
||||
async def update_preview_in_cache(self, file_path: str, preview_url: str) -> bool:
|
||||
"""Update preview URL in cache for a specific lora
|
||||
|
||||
Args:
|
||||
file_path: The file path of the lora to update
|
||||
preview_url: The new preview URL
|
||||
|
||||
Returns:
|
||||
bool: True if the update was successful, False if cache doesn't exist or lora wasn't found
|
||||
"""
|
||||
if self._cache is None:
|
||||
return False
|
||||
|
||||
return await self._cache.update_preview_url(file_path, preview_url)
|
||||
|
||||
async def scan_single_lora(self, file_path: str) -> Optional[Dict]:
|
||||
"""Scan a single LoRA file and return its metadata"""
|
||||
try:
|
||||
if not os.path.exists(os.path.realpath(file_path)):
|
||||
return None
|
||||
|
||||
# 获取基本文件信息
|
||||
metadata = await get_file_info(file_path)
|
||||
if not metadata:
|
||||
return None
|
||||
|
||||
folder = self._calculate_folder(file_path)
|
||||
|
||||
# 确保 folder 字段存在
|
||||
metadata_dict = metadata.to_dict()
|
||||
metadata_dict['folder'] = folder or ''
|
||||
|
||||
return metadata_dict
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {file_path}: {e}")
|
||||
return None
|
||||
|
||||
def _calculate_folder(self, file_path: str) -> str:
|
||||
"""Calculate the folder path for a LoRA file"""
|
||||
# 使用原始路径计算相对路径
|
||||
for root in config.loras_roots:
|
||||
if file_path.startswith(root):
|
||||
rel_path = os.path.relpath(file_path, root)
|
||||
return os.path.dirname(rel_path).replace(os.path.sep, '/')
|
||||
return ''
|
||||
|
||||
async def move_model(self, source_path: str, target_path: str) -> bool:
|
||||
"""Move a model and its associated files to a new location"""
|
||||
try:
|
||||
# 保持原始路径格式
|
||||
source_path = source_path.replace(os.sep, '/')
|
||||
target_path = target_path.replace(os.sep, '/')
|
||||
|
||||
# 其余代码保持不变
|
||||
base_name = os.path.splitext(os.path.basename(source_path))[0]
|
||||
source_dir = os.path.dirname(source_path)
|
||||
|
||||
os.makedirs(target_path, exist_ok=True)
|
||||
|
||||
target_lora = os.path.join(target_path, f"{base_name}.safetensors").replace(os.sep, '/')
|
||||
|
||||
# 使用真实路径进行文件操作
|
||||
real_source = os.path.realpath(source_path)
|
||||
real_target = os.path.realpath(target_lora)
|
||||
|
||||
file_size = os.path.getsize(real_source)
|
||||
|
||||
if self.file_monitor:
|
||||
self.file_monitor.handler.add_ignore_path(
|
||||
real_source,
|
||||
file_size
|
||||
)
|
||||
self.file_monitor.handler.add_ignore_path(
|
||||
real_target,
|
||||
file_size
|
||||
)
|
||||
|
||||
# 使用真实路径进行文件操作
|
||||
shutil.move(real_source, real_target)
|
||||
|
||||
# Move associated files
|
||||
source_metadata = os.path.join(source_dir, f"{base_name}.metadata.json")
|
||||
if os.path.exists(source_metadata):
|
||||
target_metadata = os.path.join(target_path, f"{base_name}.metadata.json")
|
||||
shutil.move(source_metadata, target_metadata)
|
||||
metadata = await self._update_metadata_paths(target_metadata, target_lora)
|
||||
|
||||
# Move preview file if exists
|
||||
preview_extensions = ['.preview.png', '.preview.jpeg', '.preview.jpg', '.preview.mp4',
|
||||
'.png', '.jpeg', '.jpg', '.mp4']
|
||||
for ext in preview_extensions:
|
||||
source_preview = os.path.join(source_dir, f"{base_name}{ext}")
|
||||
if os.path.exists(source_preview):
|
||||
target_preview = os.path.join(target_path, f"{base_name}{ext}")
|
||||
shutil.move(source_preview, target_preview)
|
||||
break
|
||||
|
||||
# Update cache
|
||||
await self.update_single_lora_cache(source_path, target_lora, metadata)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
async def update_single_lora_cache(self, original_path: str, new_path: str, metadata: Dict) -> bool:
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Find the existing item to remove its tags from count
|
||||
existing_item = next((item for item in cache.raw_data if item['file_path'] == original_path), None)
|
||||
if existing_item and 'tags' in existing_item:
|
||||
for tag in existing_item.get('tags', []):
|
||||
if tag in self._tags_count:
|
||||
self._tags_count[tag] = max(0, self._tags_count[tag] - 1)
|
||||
if self._tags_count[tag] == 0:
|
||||
del self._tags_count[tag]
|
||||
|
||||
# Remove old path from hash index if exists
|
||||
self._hash_index.remove_by_path(original_path)
|
||||
|
||||
# Remove the old entry from raw_data
|
||||
cache.raw_data = [
|
||||
item for item in cache.raw_data
|
||||
if item['file_path'] != original_path
|
||||
]
|
||||
|
||||
if metadata:
|
||||
# If this is an update to an existing path (not a move), ensure folder is preserved
|
||||
if original_path == new_path:
|
||||
# Find the folder from existing entries or calculate it
|
||||
existing_folder = next((item['folder'] for item in cache.raw_data
|
||||
if item['file_path'] == original_path), None)
|
||||
if existing_folder:
|
||||
metadata['folder'] = existing_folder
|
||||
else:
|
||||
metadata['folder'] = self._calculate_folder(new_path)
|
||||
else:
|
||||
# For moved files, recalculate the folder
|
||||
metadata['folder'] = self._calculate_folder(new_path)
|
||||
|
||||
# Add the updated metadata to raw_data
|
||||
cache.raw_data.append(metadata)
|
||||
|
||||
# Update hash index with new path
|
||||
if 'sha256' in metadata:
|
||||
self._hash_index.add_entry(metadata['sha256'].lower(), new_path)
|
||||
|
||||
# Update folders list
|
||||
all_folders = set(item['folder'] for item in cache.raw_data)
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
# Update tags count with the new/updated tags
|
||||
if 'tags' in metadata:
|
||||
for tag in metadata.get('tags', []):
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Resort cache
|
||||
await cache.resort()
|
||||
|
||||
return True
|
||||
|
||||
async def _update_metadata_paths(self, metadata_path: str, lora_path: str) -> Dict:
|
||||
"""Update file paths in metadata file"""
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Update file_path
|
||||
metadata['file_path'] = lora_path.replace(os.sep, '/')
|
||||
|
||||
# Update preview_url if exists
|
||||
if 'preview_url' in metadata:
|
||||
preview_dir = os.path.dirname(lora_path)
|
||||
preview_name = os.path.splitext(os.path.basename(metadata['preview_url']))[0]
|
||||
preview_ext = os.path.splitext(metadata['preview_url'])[1]
|
||||
new_preview_path = os.path.join(preview_dir, f"{preview_name}{preview_ext}")
|
||||
metadata['preview_url'] = new_preview_path.replace(os.sep, '/')
|
||||
|
||||
# Save updated metadata
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating metadata paths: {e}", exc_info=True)
|
||||
|
||||
# Add new methods for hash index functionality
|
||||
def has_lora_hash(self, sha256: str) -> bool:
|
||||
"""Check if a LoRA with given hash exists"""
|
||||
return self._hash_index.has_hash(sha256.lower())
|
||||
|
||||
def get_lora_path_by_hash(self, sha256: str) -> Optional[str]:
|
||||
"""Get file path for a LoRA by its hash"""
|
||||
return self._hash_index.get_path(sha256.lower())
|
||||
|
||||
def get_lora_hash_by_path(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a LoRA by its file path"""
|
||||
return self._hash_index.get_hash(file_path)
|
||||
|
||||
def get_preview_url_by_hash(self, sha256: str) -> Optional[str]:
|
||||
"""Get preview static URL for a LoRA by its hash"""
|
||||
# Get the file path first
|
||||
file_path = self._hash_index.get_path(sha256.lower())
|
||||
if not file_path:
|
||||
return None
|
||||
|
||||
# Determine the preview file path (typically same name with different extension)
|
||||
base_name = os.path.splitext(file_path)[0]
|
||||
preview_extensions = ['.preview.png', '.preview.jpeg', '.preview.jpg', '.preview.mp4',
|
||||
'.png', '.jpeg', '.jpg', '.mp4']
|
||||
|
||||
for ext in preview_extensions:
|
||||
preview_path = f"{base_name}{ext}"
|
||||
if os.path.exists(preview_path):
|
||||
# Convert to static URL using config
|
||||
return config.get_preview_static_url(preview_path)
|
||||
|
||||
return None
|
||||
|
||||
# Add new method to get top tags
|
||||
async def get_top_tags(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
"""Get top tags sorted by count
|
||||
|
||||
Args:
|
||||
limit: Maximum number of tags to return
|
||||
|
||||
Returns:
|
||||
List of dictionaries with tag name and count, sorted by count
|
||||
"""
|
||||
# Make sure cache is initialized
|
||||
await self.get_cached_data()
|
||||
|
||||
# Sort tags by count in descending order
|
||||
sorted_tags = sorted(
|
||||
[{"tag": tag, "count": count} for tag, count in self._tags_count.items()],
|
||||
key=lambda x: x['count'],
|
||||
reverse=True
|
||||
# Initialize parent class with ModelHashIndex
|
||||
super().__init__(
|
||||
model_type="lora",
|
||||
model_class=LoraMetadata,
|
||||
file_extensions=file_extensions,
|
||||
hash_index=ModelHashIndex() # Changed from LoraHashIndex to ModelHashIndex
|
||||
)
|
||||
|
||||
# Return limited number
|
||||
return sorted_tags[:limit]
|
||||
|
||||
async def get_base_models(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
"""Get base models used in loras sorted by frequency
|
||||
|
||||
Args:
|
||||
limit: Maximum number of base models to return
|
||||
|
||||
Returns:
|
||||
List of dictionaries with base model name and count, sorted by count
|
||||
"""
|
||||
# Make sure cache is initialized
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Count base model occurrences
|
||||
base_model_counts = {}
|
||||
for lora in cache.raw_data:
|
||||
if 'base_model' in lora and lora['base_model']:
|
||||
base_model = lora['base_model']
|
||||
base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
|
||||
|
||||
# Sort base models by count
|
||||
sorted_models = [{'name': model, 'count': count} for model, count in base_model_counts.items()]
|
||||
sorted_models.sort(key=lambda x: x['count'], reverse=True)
|
||||
|
||||
# Return limited number
|
||||
return sorted_models[:limit]
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get lora root directories"""
|
||||
return config.loras_roots
|
||||
|
||||
async def diagnose_hash_index(self):
|
||||
"""Diagnostic method to verify hash index functionality"""
|
||||
@@ -746,19 +63,3 @@ class LoraScanner:
|
||||
test_hash_result = self._hash_index.get_hash(test_path)
|
||||
print(f"Test reverse lookup: {test_path} -> {test_hash_result[:8]}...\n\n", file=sys.stderr)
|
||||
|
||||
async def get_lora_info_by_name(self, name):
|
||||
"""Get LoRA information by name"""
|
||||
try:
|
||||
# Get cached data
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Find the LoRA by name
|
||||
for lora in cache.raw_data:
|
||||
if lora.get("file_name") == name:
|
||||
return lora
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting LoRA info by name: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
|
||||
212
py/services/lora_service.py
Normal file
212
py/services/lora_service.py
Normal file
@@ -0,0 +1,212 @@
|
||||
import os
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from .base_model_service import BaseModelService
|
||||
from ..utils.models import LoraMetadata
|
||||
from ..config import config
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraService(BaseModelService):
|
||||
"""LoRA-specific service implementation"""
|
||||
|
||||
def __init__(self, scanner):
|
||||
"""Initialize LoRA service
|
||||
|
||||
Args:
|
||||
scanner: LoRA scanner instance
|
||||
"""
|
||||
super().__init__("lora", scanner, LoraMetadata)
|
||||
|
||||
async def format_response(self, lora_data: Dict) -> Dict:
|
||||
"""Format LoRA data for API response"""
|
||||
return {
|
||||
"model_name": lora_data["model_name"],
|
||||
"file_name": lora_data["file_name"],
|
||||
"preview_url": config.get_preview_static_url(lora_data.get("preview_url", "")),
|
||||
"preview_nsfw_level": lora_data.get("preview_nsfw_level", 0),
|
||||
"base_model": lora_data.get("base_model", ""),
|
||||
"folder": lora_data["folder"],
|
||||
"sha256": lora_data.get("sha256", ""),
|
||||
"file_path": lora_data["file_path"].replace(os.sep, "/"),
|
||||
"file_size": lora_data.get("size", 0),
|
||||
"modified": lora_data.get("modified", ""),
|
||||
"tags": lora_data.get("tags", []),
|
||||
"modelDescription": lora_data.get("modelDescription", ""),
|
||||
"from_civitai": lora_data.get("from_civitai", True),
|
||||
"usage_tips": lora_data.get("usage_tips", ""),
|
||||
"notes": lora_data.get("notes", ""),
|
||||
"favorite": lora_data.get("favorite", False),
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(lora_data.get("civitai", {}))
|
||||
}
|
||||
|
||||
async def _apply_specific_filters(self, data: List[Dict], **kwargs) -> List[Dict]:
|
||||
"""Apply LoRA-specific filters"""
|
||||
# Handle first_letter filter for LoRAs
|
||||
first_letter = kwargs.get('first_letter')
|
||||
if first_letter:
|
||||
data = self._filter_by_first_letter(data, first_letter)
|
||||
|
||||
return data
|
||||
|
||||
def _filter_by_first_letter(self, data: List[Dict], letter: str) -> List[Dict]:
|
||||
"""Filter data by first letter of model name
|
||||
|
||||
Special handling:
|
||||
- '#': Numbers (0-9)
|
||||
- '@': Special characters (not alphanumeric)
|
||||
- '漢': CJK characters
|
||||
"""
|
||||
filtered_data = []
|
||||
|
||||
for lora in data:
|
||||
model_name = lora.get('model_name', '')
|
||||
if not model_name:
|
||||
continue
|
||||
|
||||
first_char = model_name[0].upper()
|
||||
|
||||
if letter == '#' and first_char.isdigit():
|
||||
filtered_data.append(lora)
|
||||
elif letter == '@' and not first_char.isalnum():
|
||||
# Special characters (not alphanumeric)
|
||||
filtered_data.append(lora)
|
||||
elif letter == '漢' and self._is_cjk_character(first_char):
|
||||
# CJK characters
|
||||
filtered_data.append(lora)
|
||||
elif letter.upper() == first_char:
|
||||
# Regular alphabet matching
|
||||
filtered_data.append(lora)
|
||||
|
||||
return filtered_data
|
||||
|
||||
def _is_cjk_character(self, char: str) -> bool:
|
||||
"""Check if character is a CJK character"""
|
||||
# Define Unicode ranges for CJK characters
|
||||
cjk_ranges = [
|
||||
(0x4E00, 0x9FFF), # CJK Unified Ideographs
|
||||
(0x3400, 0x4DBF), # CJK Unified Ideographs Extension A
|
||||
(0x20000, 0x2A6DF), # CJK Unified Ideographs Extension B
|
||||
(0x2A700, 0x2B73F), # CJK Unified Ideographs Extension C
|
||||
(0x2B740, 0x2B81F), # CJK Unified Ideographs Extension D
|
||||
(0x2B820, 0x2CEAF), # CJK Unified Ideographs Extension E
|
||||
(0x2CEB0, 0x2EBEF), # CJK Unified Ideographs Extension F
|
||||
(0x30000, 0x3134F), # CJK Unified Ideographs Extension G
|
||||
(0xF900, 0xFAFF), # CJK Compatibility Ideographs
|
||||
(0x3300, 0x33FF), # CJK Compatibility
|
||||
(0x3200, 0x32FF), # Enclosed CJK Letters and Months
|
||||
(0x3100, 0x312F), # Bopomofo
|
||||
(0x31A0, 0x31BF), # Bopomofo Extended
|
||||
(0x3040, 0x309F), # Hiragana
|
||||
(0x30A0, 0x30FF), # Katakana
|
||||
(0x31F0, 0x31FF), # Katakana Phonetic Extensions
|
||||
(0xAC00, 0xD7AF), # Hangul Syllables
|
||||
(0x1100, 0x11FF), # Hangul Jamo
|
||||
(0xA960, 0xA97F), # Hangul Jamo Extended-A
|
||||
(0xD7B0, 0xD7FF), # Hangul Jamo Extended-B
|
||||
]
|
||||
|
||||
code_point = ord(char)
|
||||
return any(start <= code_point <= end for start, end in cjk_ranges)
|
||||
|
||||
# LoRA-specific methods
|
||||
async def get_letter_counts(self) -> Dict[str, int]:
|
||||
"""Get count of LoRAs for each letter of the alphabet"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
data = cache.raw_data
|
||||
|
||||
# Define letter categories
|
||||
letters = {
|
||||
'#': 0, # Numbers
|
||||
'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0,
|
||||
'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0, 'O': 0, 'P': 0,
|
||||
'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0,
|
||||
'Y': 0, 'Z': 0,
|
||||
'@': 0, # Special characters
|
||||
'漢': 0 # CJK characters
|
||||
}
|
||||
|
||||
# Count models for each letter
|
||||
for lora in data:
|
||||
model_name = lora.get('model_name', '')
|
||||
if not model_name:
|
||||
continue
|
||||
|
||||
first_char = model_name[0].upper()
|
||||
|
||||
if first_char.isdigit():
|
||||
letters['#'] += 1
|
||||
elif first_char in letters:
|
||||
letters[first_char] += 1
|
||||
elif self._is_cjk_character(first_char):
|
||||
letters['漢'] += 1
|
||||
elif not first_char.isalnum():
|
||||
letters['@'] += 1
|
||||
|
||||
return letters
|
||||
|
||||
async def get_lora_notes(self, lora_name: str) -> Optional[str]:
|
||||
"""Get notes for a specific LoRA file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for lora in cache.raw_data:
|
||||
if lora['file_name'] == lora_name:
|
||||
return lora.get('notes', '')
|
||||
|
||||
return None
|
||||
|
||||
async def get_lora_trigger_words(self, lora_name: str) -> List[str]:
|
||||
"""Get trigger words for a specific LoRA file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for lora in cache.raw_data:
|
||||
if lora['file_name'] == lora_name:
|
||||
civitai_data = lora.get('civitai', {})
|
||||
return civitai_data.get('trainedWords', [])
|
||||
|
||||
return []
|
||||
|
||||
async def get_lora_preview_url(self, lora_name: str) -> Optional[str]:
|
||||
"""Get the static preview URL for a LoRA file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for lora in cache.raw_data:
|
||||
if lora['file_name'] == lora_name:
|
||||
preview_url = lora.get('preview_url')
|
||||
if preview_url:
|
||||
return config.get_preview_static_url(preview_url)
|
||||
|
||||
return None
|
||||
|
||||
async def get_lora_civitai_url(self, lora_name: str) -> Dict[str, Optional[str]]:
|
||||
"""Get the Civitai URL for a LoRA file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for lora in cache.raw_data:
|
||||
if lora['file_name'] == lora_name:
|
||||
civitai_data = lora.get('civitai', {})
|
||||
model_id = civitai_data.get('modelId')
|
||||
version_id = civitai_data.get('id')
|
||||
|
||||
if model_id:
|
||||
civitai_url = f"https://civitai.com/models/{model_id}"
|
||||
if version_id:
|
||||
civitai_url += f"?modelVersionId={version_id}"
|
||||
|
||||
return {
|
||||
'civitai_url': civitai_url,
|
||||
'model_id': str(model_id),
|
||||
'version_id': str(version_id) if version_id else None
|
||||
}
|
||||
|
||||
return {'civitai_url': None, 'model_id': None, 'version_id': None}
|
||||
|
||||
def find_duplicate_hashes(self) -> Dict:
|
||||
"""Find LoRAs with duplicate SHA256 hashes"""
|
||||
return self.scanner._hash_index.get_duplicate_hashes()
|
||||
|
||||
def find_duplicate_filenames(self) -> Dict:
|
||||
"""Find LoRAs with conflicting filenames"""
|
||||
return self.scanner._hash_index.get_duplicate_filenames()
|
||||
104
py/services/model_cache.py
Normal file
104
py/services/model_cache.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import asyncio
|
||||
from typing import List, Dict, Tuple
|
||||
from dataclasses import dataclass
|
||||
from operator import itemgetter
|
||||
from natsort import natsorted
|
||||
|
||||
# Supported sort modes: (sort_key, order)
|
||||
# order: 'asc' for ascending, 'desc' for descending
|
||||
SUPPORTED_SORT_MODES = [
|
||||
('name', 'asc'),
|
||||
('name', 'desc'),
|
||||
('date', 'asc'),
|
||||
('date', 'desc'),
|
||||
('size', 'asc'),
|
||||
('size', 'desc'),
|
||||
]
|
||||
|
||||
@dataclass
|
||||
class ModelCache:
|
||||
"""Cache structure for model data with extensible sorting"""
|
||||
raw_data: List[Dict]
|
||||
folders: List[str]
|
||||
|
||||
def __post_init__(self):
|
||||
self._lock = asyncio.Lock()
|
||||
# Cache for last sort: (sort_key, order) -> sorted list
|
||||
self._last_sort: Tuple[str, str] = (None, None)
|
||||
self._last_sorted_data: List[Dict] = []
|
||||
# Default sort on init
|
||||
asyncio.create_task(self.resort())
|
||||
|
||||
async def resort(self):
|
||||
"""Resort cached data according to last sort mode if set"""
|
||||
async with self._lock:
|
||||
if self._last_sort != (None, None):
|
||||
sort_key, order = self._last_sort
|
||||
sorted_data = self._sort_data(self.raw_data, sort_key, order)
|
||||
self._last_sorted_data = sorted_data
|
||||
# Update folder list
|
||||
# else: do nothing
|
||||
|
||||
all_folders = set(l['folder'] for l in self.raw_data)
|
||||
self.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
def _sort_data(self, data: List[Dict], sort_key: str, order: str) -> List[Dict]:
|
||||
"""Sort data by sort_key and order"""
|
||||
reverse = (order == 'desc')
|
||||
if sort_key == 'name':
|
||||
# Natural sort by model_name, case-insensitive
|
||||
return natsorted(
|
||||
data,
|
||||
key=lambda x: x['model_name'].lower(),
|
||||
reverse=reverse
|
||||
)
|
||||
elif sort_key == 'date':
|
||||
# Sort by modified timestamp
|
||||
return sorted(
|
||||
data,
|
||||
key=itemgetter('modified'),
|
||||
reverse=reverse
|
||||
)
|
||||
elif sort_key == 'size':
|
||||
# Sort by file size
|
||||
return sorted(
|
||||
data,
|
||||
key=itemgetter('size'),
|
||||
reverse=reverse
|
||||
)
|
||||
else:
|
||||
# Fallback: no sort
|
||||
return list(data)
|
||||
|
||||
async def get_sorted_data(self, sort_key: str = 'name', order: str = 'asc') -> List[Dict]:
|
||||
"""Get sorted data by sort_key and order, using cache if possible"""
|
||||
async with self._lock:
|
||||
if (sort_key, order) == self._last_sort:
|
||||
return self._last_sorted_data
|
||||
sorted_data = self._sort_data(self.raw_data, sort_key, order)
|
||||
self._last_sort = (sort_key, order)
|
||||
self._last_sorted_data = sorted_data
|
||||
return sorted_data
|
||||
|
||||
async def update_preview_url(self, file_path: str, preview_url: str, preview_nsfw_level: int) -> bool:
|
||||
"""Update preview_url for a specific model in all cached data
|
||||
|
||||
Args:
|
||||
file_path: The file path of the model to update
|
||||
preview_url: The new preview URL
|
||||
preview_nsfw_level: The NSFW level of the preview
|
||||
|
||||
Returns:
|
||||
bool: True if the update was successful, False if the model wasn't found
|
||||
"""
|
||||
async with self._lock:
|
||||
# Update in raw_data
|
||||
for item in self.raw_data:
|
||||
if item['file_path'] == file_path:
|
||||
item['preview_url'] = preview_url
|
||||
item['preview_nsfw_level'] = preview_nsfw_level
|
||||
break
|
||||
else:
|
||||
return False # Model not found
|
||||
|
||||
return True
|
||||
229
py/services/model_hash_index.py
Normal file
229
py/services/model_hash_index.py
Normal file
@@ -0,0 +1,229 @@
|
||||
from typing import Dict, Optional, Set, List
|
||||
import os
|
||||
|
||||
class ModelHashIndex:
|
||||
"""Index for looking up models by hash or filename"""
|
||||
|
||||
def __init__(self):
|
||||
self._hash_to_path: Dict[str, str] = {}
|
||||
self._filename_to_hash: Dict[str, str] = {}
|
||||
# New data structures for tracking duplicates
|
||||
self._duplicate_hashes: Dict[str, List[str]] = {} # sha256 -> list of paths
|
||||
self._duplicate_filenames: Dict[str, List[str]] = {} # filename -> list of paths
|
||||
|
||||
def add_entry(self, sha256: str, file_path: str) -> None:
|
||||
"""Add or update hash index entry"""
|
||||
if not sha256 or not file_path:
|
||||
return
|
||||
|
||||
# Ensure hash is lowercase for consistency
|
||||
sha256 = sha256.lower()
|
||||
|
||||
# Extract filename without extension
|
||||
filename = self._get_filename_from_path(file_path)
|
||||
|
||||
# Track duplicates by hash
|
||||
if sha256 in self._hash_to_path:
|
||||
old_path = self._hash_to_path[sha256]
|
||||
if old_path != file_path: # Only record if it's actually a different path
|
||||
if sha256 not in self._duplicate_hashes:
|
||||
self._duplicate_hashes[sha256] = [old_path]
|
||||
if file_path not in self._duplicate_hashes.get(sha256, []):
|
||||
self._duplicate_hashes.setdefault(sha256, []).append(file_path)
|
||||
|
||||
# Track duplicates by filename
|
||||
if filename in self._filename_to_hash:
|
||||
old_hash = self._filename_to_hash[filename]
|
||||
if old_hash != sha256: # Different models with the same name
|
||||
old_path = self._hash_to_path.get(old_hash)
|
||||
if old_path:
|
||||
if filename not in self._duplicate_filenames:
|
||||
self._duplicate_filenames[filename] = [old_path]
|
||||
if file_path not in self._duplicate_filenames.get(filename, []):
|
||||
self._duplicate_filenames.setdefault(filename, []).append(file_path)
|
||||
|
||||
# Remove old path mapping if hash exists
|
||||
if sha256 in self._hash_to_path:
|
||||
old_path = self._hash_to_path[sha256]
|
||||
old_filename = self._get_filename_from_path(old_path)
|
||||
if old_filename in self._filename_to_hash:
|
||||
del self._filename_to_hash[old_filename]
|
||||
|
||||
# Remove old hash mapping if filename exists
|
||||
if filename in self._filename_to_hash:
|
||||
old_hash = self._filename_to_hash[filename]
|
||||
if old_hash in self._hash_to_path:
|
||||
del self._hash_to_path[old_hash]
|
||||
|
||||
# Add new mappings
|
||||
self._hash_to_path[sha256] = file_path
|
||||
self._filename_to_hash[filename] = sha256
|
||||
|
||||
def _get_filename_from_path(self, file_path: str) -> str:
|
||||
"""Extract filename without extension from path"""
|
||||
return os.path.splitext(os.path.basename(file_path))[0]
|
||||
|
||||
def remove_by_path(self, file_path: str, hash_val: str = None) -> None:
|
||||
"""Remove entry by file path"""
|
||||
filename = self._get_filename_from_path(file_path)
|
||||
|
||||
# Find the hash for this file path
|
||||
if hash_val is None:
|
||||
for h, p in self._hash_to_path.items():
|
||||
if p == file_path:
|
||||
hash_val = h
|
||||
break
|
||||
|
||||
# If we didn't find a hash, nothing to do
|
||||
if not hash_val:
|
||||
return
|
||||
|
||||
# Update duplicates tracking for hash
|
||||
if hash_val in self._duplicate_hashes:
|
||||
# Remove the current path from duplicates
|
||||
self._duplicate_hashes[hash_val] = [p for p in self._duplicate_hashes[hash_val] if p != file_path]
|
||||
|
||||
# Update or remove hash mapping based on remaining duplicates
|
||||
if len(self._duplicate_hashes[hash_val]) > 0:
|
||||
# Replace with one of the remaining paths
|
||||
new_path = self._duplicate_hashes[hash_val][0]
|
||||
new_filename = self._get_filename_from_path(new_path)
|
||||
|
||||
# Update hash-to-path mapping
|
||||
self._hash_to_path[hash_val] = new_path
|
||||
|
||||
# IMPORTANT: Update filename-to-hash mapping for consistency
|
||||
# Remove old filename mapping if it points to this hash
|
||||
if filename in self._filename_to_hash and self._filename_to_hash[filename] == hash_val:
|
||||
del self._filename_to_hash[filename]
|
||||
|
||||
# Add new filename mapping
|
||||
self._filename_to_hash[new_filename] = hash_val
|
||||
|
||||
# If only one duplicate left, remove from duplicates tracking
|
||||
if len(self._duplicate_hashes[hash_val]) == 1:
|
||||
del self._duplicate_hashes[hash_val]
|
||||
else:
|
||||
# No duplicates left, remove hash entry completely
|
||||
del self._duplicate_hashes[hash_val]
|
||||
del self._hash_to_path[hash_val]
|
||||
|
||||
# Remove corresponding filename entry if it points to this hash
|
||||
if filename in self._filename_to_hash and self._filename_to_hash[filename] == hash_val:
|
||||
del self._filename_to_hash[filename]
|
||||
else:
|
||||
# No duplicates, simply remove the hash entry
|
||||
del self._hash_to_path[hash_val]
|
||||
|
||||
# Remove corresponding filename entry if it points to this hash
|
||||
if filename in self._filename_to_hash and self._filename_to_hash[filename] == hash_val:
|
||||
del self._filename_to_hash[filename]
|
||||
|
||||
# Update duplicates tracking for filename
|
||||
if filename in self._duplicate_filenames:
|
||||
# Remove the current path from duplicates
|
||||
self._duplicate_filenames[filename] = [p for p in self._duplicate_filenames[filename] if p != file_path]
|
||||
|
||||
# Update or remove filename mapping based on remaining duplicates
|
||||
if len(self._duplicate_filenames[filename]) > 0:
|
||||
# Get the hash for the first remaining duplicate path
|
||||
first_dup_path = self._duplicate_filenames[filename][0]
|
||||
first_dup_hash = None
|
||||
for h, p in self._hash_to_path.items():
|
||||
if p == first_dup_path:
|
||||
first_dup_hash = h
|
||||
break
|
||||
|
||||
# Update the filename to hash mapping if we found a hash
|
||||
if first_dup_hash:
|
||||
self._filename_to_hash[filename] = first_dup_hash
|
||||
|
||||
# If only one duplicate left, remove from duplicates tracking
|
||||
if len(self._duplicate_filenames[filename]) == 1:
|
||||
del self._duplicate_filenames[filename]
|
||||
else:
|
||||
# No duplicates left, remove filename entry completely
|
||||
del self._duplicate_filenames[filename]
|
||||
if filename in self._filename_to_hash:
|
||||
del self._filename_to_hash[filename]
|
||||
|
||||
def remove_by_hash(self, sha256: str) -> None:
|
||||
"""Remove entry by hash"""
|
||||
sha256 = sha256.lower()
|
||||
if sha256 not in self._hash_to_path:
|
||||
return
|
||||
|
||||
# Get the path and filename
|
||||
path = self._hash_to_path[sha256]
|
||||
filename = self._get_filename_from_path(path)
|
||||
|
||||
# Get all paths for this hash (including duplicates)
|
||||
paths_to_remove = [path]
|
||||
if sha256 in self._duplicate_hashes:
|
||||
paths_to_remove.extend(self._duplicate_hashes[sha256])
|
||||
del self._duplicate_hashes[sha256]
|
||||
|
||||
# Remove hash-to-path mapping
|
||||
del self._hash_to_path[sha256]
|
||||
|
||||
# Update filename-to-hash and duplicate filenames for all paths
|
||||
for path_to_remove in paths_to_remove:
|
||||
fname = self._get_filename_from_path(path_to_remove)
|
||||
|
||||
# If this filename maps to the hash we're removing, remove it
|
||||
if fname in self._filename_to_hash and self._filename_to_hash[fname] == sha256:
|
||||
del self._filename_to_hash[fname]
|
||||
|
||||
# Update duplicate filenames tracking
|
||||
if fname in self._duplicate_filenames:
|
||||
self._duplicate_filenames[fname] = [p for p in self._duplicate_filenames[fname] if p != path_to_remove]
|
||||
|
||||
if not self._duplicate_filenames[fname]:
|
||||
del self._duplicate_filenames[fname]
|
||||
elif len(self._duplicate_filenames[fname]) == 1:
|
||||
# If only one entry remains, it's no longer a duplicate
|
||||
del self._duplicate_filenames[fname]
|
||||
|
||||
def has_hash(self, sha256: str) -> bool:
|
||||
"""Check if hash exists in index"""
|
||||
return sha256.lower() in self._hash_to_path
|
||||
|
||||
def get_path(self, sha256: str) -> Optional[str]:
|
||||
"""Get file path for a hash"""
|
||||
return self._hash_to_path.get(sha256.lower())
|
||||
|
||||
def get_hash(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a file path"""
|
||||
filename = self._get_filename_from_path(file_path)
|
||||
return self._filename_to_hash.get(filename)
|
||||
|
||||
def get_hash_by_filename(self, filename: str) -> Optional[str]:
|
||||
"""Get hash for a filename without extension"""
|
||||
return self._filename_to_hash.get(filename)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all entries"""
|
||||
self._hash_to_path.clear()
|
||||
self._filename_to_hash.clear()
|
||||
self._duplicate_hashes.clear()
|
||||
self._duplicate_filenames.clear()
|
||||
|
||||
def get_all_hashes(self) -> Set[str]:
|
||||
"""Get all hashes in the index"""
|
||||
return set(self._hash_to_path.keys())
|
||||
|
||||
def get_all_filenames(self) -> Set[str]:
|
||||
"""Get all filenames in the index"""
|
||||
return set(self._filename_to_hash.keys())
|
||||
|
||||
def get_duplicate_hashes(self) -> Dict[str, List[str]]:
|
||||
"""Get dictionary of duplicate hashes and their paths"""
|
||||
return self._duplicate_hashes
|
||||
|
||||
def get_duplicate_filenames(self) -> Dict[str, List[str]]:
|
||||
"""Get dictionary of duplicate filenames and their paths"""
|
||||
return self._duplicate_filenames
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Get number of entries"""
|
||||
return len(self._hash_to_path)
|
||||
1206
py/services/model_scanner.py
Normal file
1206
py/services/model_scanner.py
Normal file
File diff suppressed because it is too large
Load Diff
142
py/services/model_service_factory.py
Normal file
142
py/services/model_service_factory.py
Normal file
@@ -0,0 +1,142 @@
|
||||
from typing import Dict, Type, Any
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ModelServiceFactory:
|
||||
"""Factory for managing model services and routes"""
|
||||
|
||||
_services: Dict[str, Type] = {}
|
||||
_routes: Dict[str, Type] = {}
|
||||
_initialized_services: Dict[str, Any] = {}
|
||||
_initialized_routes: Dict[str, Any] = {}
|
||||
|
||||
@classmethod
|
||||
def register_model_type(cls, model_type: str, service_class: Type, route_class: Type):
|
||||
"""Register a new model type with its service and route classes
|
||||
|
||||
Args:
|
||||
model_type: The model type identifier (e.g., 'lora', 'checkpoint')
|
||||
service_class: The service class for this model type
|
||||
route_class: The route class for this model type
|
||||
"""
|
||||
cls._services[model_type] = service_class
|
||||
cls._routes[model_type] = route_class
|
||||
logger.info(f"Registered model type '{model_type}' with service {service_class.__name__} and routes {route_class.__name__}")
|
||||
|
||||
@classmethod
|
||||
def get_service_class(cls, model_type: str) -> Type:
|
||||
"""Get service class for a model type
|
||||
|
||||
Args:
|
||||
model_type: The model type identifier
|
||||
|
||||
Returns:
|
||||
The service class for the model type
|
||||
|
||||
Raises:
|
||||
ValueError: If model type is not registered
|
||||
"""
|
||||
if model_type not in cls._services:
|
||||
raise ValueError(f"Unknown model type: {model_type}")
|
||||
return cls._services[model_type]
|
||||
|
||||
@classmethod
|
||||
def get_route_class(cls, model_type: str) -> Type:
|
||||
"""Get route class for a model type
|
||||
|
||||
Args:
|
||||
model_type: The model type identifier
|
||||
|
||||
Returns:
|
||||
The route class for the model type
|
||||
|
||||
Raises:
|
||||
ValueError: If model type is not registered
|
||||
"""
|
||||
if model_type not in cls._routes:
|
||||
raise ValueError(f"Unknown model type: {model_type}")
|
||||
return cls._routes[model_type]
|
||||
|
||||
@classmethod
|
||||
def get_route_instance(cls, model_type: str):
|
||||
"""Get or create route instance for a model type
|
||||
|
||||
Args:
|
||||
model_type: The model type identifier
|
||||
|
||||
Returns:
|
||||
The route instance for the model type
|
||||
"""
|
||||
if model_type not in cls._initialized_routes:
|
||||
route_class = cls.get_route_class(model_type)
|
||||
cls._initialized_routes[model_type] = route_class()
|
||||
return cls._initialized_routes[model_type]
|
||||
|
||||
@classmethod
|
||||
def setup_all_routes(cls, app):
|
||||
"""Setup routes for all registered model types
|
||||
|
||||
Args:
|
||||
app: The aiohttp application instance
|
||||
"""
|
||||
logger.info(f"Setting up routes for {len(cls._services)} registered model types")
|
||||
|
||||
for model_type in cls._services.keys():
|
||||
try:
|
||||
routes_instance = cls.get_route_instance(model_type)
|
||||
routes_instance.setup_routes(app)
|
||||
logger.info(f"Successfully set up routes for {model_type}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to setup routes for {model_type}: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
def get_registered_types(cls) -> list:
|
||||
"""Get list of all registered model types
|
||||
|
||||
Returns:
|
||||
List of registered model type identifiers
|
||||
"""
|
||||
return list(cls._services.keys())
|
||||
|
||||
@classmethod
|
||||
def is_registered(cls, model_type: str) -> bool:
|
||||
"""Check if a model type is registered
|
||||
|
||||
Args:
|
||||
model_type: The model type identifier
|
||||
|
||||
Returns:
|
||||
True if the model type is registered, False otherwise
|
||||
"""
|
||||
return model_type in cls._services
|
||||
|
||||
@classmethod
|
||||
def clear_registrations(cls):
|
||||
"""Clear all registrations - mainly for testing purposes"""
|
||||
cls._services.clear()
|
||||
cls._routes.clear()
|
||||
cls._initialized_services.clear()
|
||||
cls._initialized_routes.clear()
|
||||
logger.info("Cleared all model type registrations")
|
||||
|
||||
|
||||
def register_default_model_types():
|
||||
"""Register the default model types (LoRA, Checkpoint, and Embedding)"""
|
||||
from ..services.lora_service import LoraService
|
||||
from ..services.checkpoint_service import CheckpointService
|
||||
from ..services.embedding_service import EmbeddingService
|
||||
from ..routes.lora_routes import LoraRoutes
|
||||
from ..routes.checkpoint_routes import CheckpointRoutes
|
||||
from ..routes.embedding_routes import EmbeddingRoutes
|
||||
|
||||
# Register LoRA model type
|
||||
ModelServiceFactory.register_model_type('lora', LoraService, LoraRoutes)
|
||||
|
||||
# Register Checkpoint model type
|
||||
ModelServiceFactory.register_model_type('checkpoint', CheckpointService, CheckpointRoutes)
|
||||
|
||||
# Register Embedding model type
|
||||
ModelServiceFactory.register_model_type('embedding', EmbeddingService, EmbeddingRoutes)
|
||||
|
||||
logger.info("Registered default model types: lora, checkpoint, embedding")
|
||||
@@ -2,6 +2,7 @@ import asyncio
|
||||
from typing import List, Dict
|
||||
from dataclasses import dataclass
|
||||
from operator import itemgetter
|
||||
from natsort import natsorted
|
||||
|
||||
@dataclass
|
||||
class RecipeCache:
|
||||
@@ -16,7 +17,7 @@ class RecipeCache:
|
||||
async def resort(self, name_only: bool = False):
|
||||
"""Resort all cached data views"""
|
||||
async with self._lock:
|
||||
self.sorted_by_name = sorted(
|
||||
self.sorted_by_name = natsorted(
|
||||
self.raw_data,
|
||||
key=lambda x: x.get('title', '').lower() # Case-insensitive sort
|
||||
)
|
||||
|
||||
@@ -2,12 +2,14 @@ import os
|
||||
import logging
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from typing import List, Dict, Optional, Any, Tuple
|
||||
from ..config import config
|
||||
from .recipe_cache import RecipeCache
|
||||
from .service_registry import ServiceRegistry
|
||||
from .lora_scanner import LoraScanner
|
||||
from .civitai_client import CivitaiClient
|
||||
from ..utils.utils import fuzzy_match
|
||||
from natsort import natsorted
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -18,11 +20,22 @@ class RecipeScanner:
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls, lora_scanner: Optional[LoraScanner] = None):
|
||||
"""Get singleton instance of RecipeScanner"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
if not lora_scanner:
|
||||
# Get lora scanner from service registry if not provided
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
cls._instance = cls(lora_scanner)
|
||||
return cls._instance
|
||||
|
||||
def __new__(cls, lora_scanner: Optional[LoraScanner] = None):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._lora_scanner = lora_scanner
|
||||
cls._instance._civitai_client = CivitaiClient()
|
||||
cls._instance._civitai_client = None # Will be lazily initialized
|
||||
return cls._instance
|
||||
|
||||
def __init__(self, lora_scanner: Optional[LoraScanner] = None):
|
||||
@@ -35,9 +48,148 @@ class RecipeScanner:
|
||||
if lora_scanner:
|
||||
self._lora_scanner = lora_scanner
|
||||
self._initialized = True
|
||||
|
||||
# Initialization will be scheduled by LoraManager
|
||||
|
||||
async def _get_civitai_client(self):
|
||||
"""Lazily initialize CivitaiClient from registry"""
|
||||
if self._civitai_client is None:
|
||||
self._civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
return self._civitai_client
|
||||
|
||||
async def initialize_in_background(self) -> None:
|
||||
"""Initialize cache in background using thread pool"""
|
||||
try:
|
||||
# Set initial empty cache to avoid None reference errors
|
||||
if self._cache is None:
|
||||
self._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
|
||||
# Mark as initializing to prevent concurrent initializations
|
||||
self._is_initializing = True
|
||||
|
||||
try:
|
||||
# Start timer
|
||||
start_time = time.time()
|
||||
|
||||
# Use thread pool to execute CPU-intensive operations
|
||||
loop = asyncio.get_event_loop()
|
||||
cache = await loop.run_in_executor(
|
||||
None, # Use default thread pool
|
||||
self._initialize_recipe_cache_sync # Run synchronous version in thread
|
||||
)
|
||||
|
||||
# Calculate elapsed time and log it
|
||||
elapsed_time = time.time() - start_time
|
||||
recipe_count = len(cache.raw_data) if cache and hasattr(cache, 'raw_data') else 0
|
||||
logger.info(f"Recipe cache initialized in {elapsed_time:.2f} seconds. Found {recipe_count} recipes")
|
||||
finally:
|
||||
# Mark initialization as complete regardless of outcome
|
||||
self._is_initializing = False
|
||||
except Exception as e:
|
||||
logger.error(f"Recipe Scanner: Error initializing cache in background: {e}")
|
||||
|
||||
def _initialize_recipe_cache_sync(self):
|
||||
"""Synchronous version of recipe cache initialization for thread pool execution"""
|
||||
try:
|
||||
# Create a new event loop for this thread
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
# Create a synchronous method to bypass the async lock
|
||||
def sync_initialize_cache():
|
||||
# We need to implement scan_all_recipes logic synchronously here
|
||||
# instead of calling the async method to avoid event loop issues
|
||||
recipes = []
|
||||
recipes_dir = self.recipes_dir
|
||||
|
||||
if not recipes_dir or not os.path.exists(recipes_dir):
|
||||
logger.warning(f"Recipes directory not found: {recipes_dir}")
|
||||
return recipes
|
||||
|
||||
# Get all recipe JSON files in the recipes directory
|
||||
recipe_files = []
|
||||
for root, _, files in os.walk(recipes_dir):
|
||||
recipe_count = sum(1 for f in files if f.lower().endswith('.recipe.json'))
|
||||
if recipe_count > 0:
|
||||
for file in files:
|
||||
if file.lower().endswith('.recipe.json'):
|
||||
recipe_files.append(os.path.join(root, file))
|
||||
|
||||
# Process each recipe file
|
||||
for recipe_path in recipe_files:
|
||||
try:
|
||||
with open(recipe_path, 'r', encoding='utf-8') as f:
|
||||
recipe_data = json.load(f)
|
||||
|
||||
# Validate recipe data
|
||||
if not recipe_data or not isinstance(recipe_data, dict):
|
||||
logger.warning(f"Invalid recipe data in {recipe_path}")
|
||||
continue
|
||||
|
||||
# Ensure required fields exist
|
||||
required_fields = ['id', 'file_path', 'title']
|
||||
if not all(field in recipe_data for field in required_fields):
|
||||
logger.warning(f"Missing required fields in {recipe_path}")
|
||||
continue
|
||||
|
||||
# Ensure the image file exists
|
||||
image_path = recipe_data.get('file_path')
|
||||
if not os.path.exists(image_path):
|
||||
recipe_dir = os.path.dirname(recipe_path)
|
||||
image_filename = os.path.basename(image_path)
|
||||
alternative_path = os.path.join(recipe_dir, image_filename)
|
||||
if os.path.exists(alternative_path):
|
||||
recipe_data['file_path'] = alternative_path
|
||||
|
||||
# Ensure loras array exists
|
||||
if 'loras' not in recipe_data:
|
||||
recipe_data['loras'] = []
|
||||
|
||||
# Ensure gen_params exists
|
||||
if 'gen_params' not in recipe_data:
|
||||
recipe_data['gen_params'] = {}
|
||||
|
||||
# Add to list without async operations
|
||||
recipes.append(recipe_data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading recipe file {recipe_path}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
|
||||
# Update cache with the collected data
|
||||
self._cache.raw_data = recipes
|
||||
|
||||
# Create a simplified resort function that doesn't use await
|
||||
if hasattr(self._cache, "resort"):
|
||||
try:
|
||||
# Sort by name
|
||||
self._cache.sorted_by_name = natsorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: x.get('title', '').lower()
|
||||
)
|
||||
|
||||
# Sort by date (modified or created)
|
||||
self._cache.sorted_by_date = sorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: x.get('modified', x.get('created_date', 0)),
|
||||
reverse=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sorting recipe cache: {e}")
|
||||
|
||||
return self._cache
|
||||
|
||||
# Run our sync initialization that avoids lock conflicts
|
||||
return sync_initialize_cache()
|
||||
except Exception as e:
|
||||
logger.error(f"Error in thread-based recipe cache initialization: {e}")
|
||||
return self._cache if hasattr(self, '_cache') else None
|
||||
finally:
|
||||
# Clean up the event loop
|
||||
loop.close()
|
||||
|
||||
@property
|
||||
def recipes_dir(self) -> str:
|
||||
"""Get path to recipes directory"""
|
||||
@@ -60,49 +212,48 @@ class RecipeScanner:
|
||||
if self._is_initializing and not force_refresh:
|
||||
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
|
||||
|
||||
# Try to acquire the lock with a timeout to prevent deadlocks
|
||||
try:
|
||||
async with self._initialization_lock:
|
||||
# Check again after acquiring the lock
|
||||
if self._cache is not None and not force_refresh:
|
||||
return self._cache
|
||||
|
||||
# Mark as initializing to prevent concurrent initializations
|
||||
self._is_initializing = True
|
||||
|
||||
try:
|
||||
# Remove dependency on lora scanner initialization
|
||||
# Scan for recipe data directly
|
||||
raw_data = await self.scan_all_recipes()
|
||||
# If force refresh is requested, initialize the cache directly
|
||||
if force_refresh:
|
||||
# Try to acquire the lock with a timeout to prevent deadlocks
|
||||
try:
|
||||
async with self._initialization_lock:
|
||||
# Mark as initializing to prevent concurrent initializations
|
||||
self._is_initializing = True
|
||||
|
||||
# Update cache
|
||||
self._cache = RecipeCache(
|
||||
raw_data=raw_data,
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
try:
|
||||
# Scan for recipe data directly
|
||||
raw_data = await self.scan_all_recipes()
|
||||
|
||||
# Update cache
|
||||
self._cache = RecipeCache(
|
||||
raw_data=raw_data,
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
|
||||
# Resort cache
|
||||
await self._cache.resort()
|
||||
|
||||
return self._cache
|
||||
|
||||
# Resort cache
|
||||
await self._cache.resort()
|
||||
|
||||
return self._cache
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Recipe Manager: Error initializing cache: {e}", exc_info=True)
|
||||
# Create empty cache on error
|
||||
self._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
return self._cache
|
||||
finally:
|
||||
# Mark initialization as complete
|
||||
self._is_initializing = False
|
||||
except Exception as e:
|
||||
logger.error(f"Recipe Manager: Error initializing cache: {e}", exc_info=True)
|
||||
# Create empty cache on error
|
||||
self._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
return self._cache
|
||||
finally:
|
||||
# Mark initialization as complete
|
||||
self._is_initializing = False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in get_cached_data: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in get_cached_data: {e}")
|
||||
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
|
||||
# Return the cache (may be empty or partially initialized)
|
||||
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
|
||||
|
||||
async def scan_all_recipes(self) -> List[Dict]:
|
||||
"""Scan all recipe JSON files and return metadata"""
|
||||
@@ -171,6 +322,20 @@ class RecipeScanner:
|
||||
|
||||
# Update lora information with local paths and availability
|
||||
await self._update_lora_information(recipe_data)
|
||||
|
||||
# Calculate and update fingerprint if missing
|
||||
if 'loras' in recipe_data and 'fingerprint' not in recipe_data:
|
||||
from ..utils.utils import calculate_recipe_fingerprint
|
||||
fingerprint = calculate_recipe_fingerprint(recipe_data['loras'])
|
||||
recipe_data['fingerprint'] = fingerprint
|
||||
|
||||
# Write updated recipe data back to file
|
||||
try:
|
||||
with open(recipe_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
|
||||
logger.info(f"Added fingerprint to recipe: {recipe_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing updated recipe with fingerprint: {e}")
|
||||
|
||||
return recipe_data
|
||||
except Exception as e:
|
||||
@@ -191,6 +356,10 @@ class RecipeScanner:
|
||||
metadata_updated = False
|
||||
|
||||
for lora in recipe_data['loras']:
|
||||
# Skip deleted loras that were already marked
|
||||
if lora.get('isDeleted', False):
|
||||
continue
|
||||
|
||||
# Skip if already has complete information
|
||||
if 'hash' in lora and 'file_name' in lora and lora['file_name']:
|
||||
continue
|
||||
@@ -206,10 +375,17 @@ class RecipeScanner:
|
||||
metadata_updated = True
|
||||
else:
|
||||
# If not in cache, fetch from Civitai
|
||||
hash_from_civitai = await self._get_hash_from_civitai(model_version_id)
|
||||
if hash_from_civitai:
|
||||
lora['hash'] = hash_from_civitai
|
||||
metadata_updated = True
|
||||
result = await self._get_hash_from_civitai(model_version_id)
|
||||
if isinstance(result, tuple):
|
||||
hash_from_civitai, is_deleted = result
|
||||
if hash_from_civitai:
|
||||
lora['hash'] = hash_from_civitai
|
||||
metadata_updated = True
|
||||
elif is_deleted:
|
||||
# Mark the lora as deleted if it was not found on Civitai
|
||||
lora['isDeleted'] = True
|
||||
logger.warning(f"Marked lora with modelVersionId {model_version_id} as deleted")
|
||||
metadata_updated = True
|
||||
else:
|
||||
logger.debug(f"Could not get hash for modelVersionId {model_version_id}")
|
||||
|
||||
@@ -217,8 +393,8 @@ class RecipeScanner:
|
||||
if 'hash' in lora and (not lora.get('file_name') or not lora['file_name']):
|
||||
hash_value = lora['hash']
|
||||
|
||||
if self._lora_scanner.has_lora_hash(hash_value):
|
||||
lora_path = self._lora_scanner.get_lora_path_by_hash(hash_value)
|
||||
if self._lora_scanner.has_hash(hash_value):
|
||||
lora_path = self._lora_scanner.get_path_by_hash(hash_value)
|
||||
if lora_path:
|
||||
file_name = os.path.splitext(os.path.basename(lora_path))[0]
|
||||
lora['file_name'] = file_name
|
||||
@@ -255,42 +431,32 @@ class RecipeScanner:
|
||||
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get hash from Civitai API"""
|
||||
try:
|
||||
if not self._civitai_client:
|
||||
# Get CivitaiClient from ServiceRegistry
|
||||
civitai_client = await self._get_civitai_client()
|
||||
if not civitai_client:
|
||||
logger.error("Failed to get CivitaiClient from ServiceRegistry")
|
||||
return None
|
||||
|
||||
version_info = await self._civitai_client.get_model_version_info(model_version_id)
|
||||
version_info, error_msg = await civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
if not version_info or not version_info.get('files'):
|
||||
logger.debug(f"No files found in version info for ID: {model_version_id}")
|
||||
return None
|
||||
|
||||
if not version_info:
|
||||
if error_msg and "model not found" in error_msg.lower():
|
||||
logger.warning(f"Model with version ID {model_version_id} was not found on Civitai - marking as deleted")
|
||||
return None, True # Return None hash and True for isDeleted flag
|
||||
else:
|
||||
logger.debug(f"Could not get hash for modelVersionId {model_version_id}: {error_msg}")
|
||||
return None, False # Return None hash but not marked as deleted
|
||||
|
||||
# Get hash from the first file
|
||||
for file_info in version_info.get('files', []):
|
||||
if file_info.get('hashes', {}).get('SHA256'):
|
||||
return file_info['hashes']['SHA256']
|
||||
return file_info['hashes']['SHA256'], False # Return hash with False for isDeleted flag
|
||||
|
||||
logger.debug(f"No SHA256 hash found in version info for ID: {model_version_id}")
|
||||
return None
|
||||
return None, False
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting hash from Civitai: {e}")
|
||||
return None
|
||||
|
||||
async def _get_model_version_name(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get model version name from Civitai API"""
|
||||
try:
|
||||
if not self._civitai_client:
|
||||
return None
|
||||
|
||||
version_info = await self._civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
if version_info and 'name' in version_info:
|
||||
return version_info['name']
|
||||
|
||||
logger.debug(f"No version name found for modelVersionId {model_version_id}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting model version name from Civitai: {e}")
|
||||
return None
|
||||
return None, False
|
||||
|
||||
async def _determine_base_model(self, loras: List[Dict]) -> Optional[str]:
|
||||
"""Determine the most common base model among LoRAs"""
|
||||
@@ -299,7 +465,7 @@ class RecipeScanner:
|
||||
# Count occurrences of each base model
|
||||
for lora in loras:
|
||||
if 'hash' in lora:
|
||||
lora_path = self._lora_scanner.get_lora_path_by_hash(lora['hash'])
|
||||
lora_path = self._lora_scanner.get_path_by_hash(lora['hash'])
|
||||
if lora_path:
|
||||
base_model = await self._get_base_model_for_lora(lora_path)
|
||||
if base_model:
|
||||
@@ -330,7 +496,7 @@ class RecipeScanner:
|
||||
logger.error(f"Error getting base model for lora: {e}")
|
||||
return None
|
||||
|
||||
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'date', search: str = None, filters: dict = None, search_options: dict = None):
|
||||
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'date', search: str = None, filters: dict = None, search_options: dict = None, lora_hash: str = None, bypass_filters: bool = True):
|
||||
"""Get paginated and filtered recipe data
|
||||
|
||||
Args:
|
||||
@@ -340,69 +506,89 @@ class RecipeScanner:
|
||||
search: Search term
|
||||
filters: Dictionary of filters to apply
|
||||
search_options: Dictionary of search options to apply
|
||||
lora_hash: Optional SHA256 hash of a LoRA to filter recipes by
|
||||
bypass_filters: If True, ignore other filters when a lora_hash is provided
|
||||
"""
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Get base dataset
|
||||
filtered_data = cache.sorted_by_date if sort_by == 'date' else cache.sorted_by_name
|
||||
|
||||
# Apply search filter
|
||||
if search:
|
||||
# Default search options if none provided
|
||||
if not search_options:
|
||||
search_options = {
|
||||
'title': True,
|
||||
'tags': True,
|
||||
'lora_name': True,
|
||||
'lora_model': True
|
||||
}
|
||||
# Special case: Filter by LoRA hash (takes precedence if bypass_filters is True)
|
||||
if lora_hash:
|
||||
# Filter recipes that contain this LoRA hash
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if 'loras' in item and any(
|
||||
lora.get('hash', '').lower() == lora_hash.lower()
|
||||
for lora in item['loras']
|
||||
)
|
||||
]
|
||||
|
||||
# Build the search predicate based on search options
|
||||
def matches_search(item):
|
||||
# Search in title if enabled
|
||||
if search_options.get('title', True):
|
||||
if fuzzy_match(str(item.get('title', '')), search):
|
||||
return True
|
||||
|
||||
# Search in tags if enabled
|
||||
if search_options.get('tags', True) and 'tags' in item:
|
||||
for tag in item['tags']:
|
||||
if fuzzy_match(tag, search):
|
||||
return True
|
||||
|
||||
# Search in lora file names if enabled
|
||||
if search_options.get('lora_name', True) and 'loras' in item:
|
||||
for lora in item['loras']:
|
||||
if fuzzy_match(str(lora.get('file_name', '')), search):
|
||||
return True
|
||||
|
||||
# Search in lora model names if enabled
|
||||
if search_options.get('lora_model', True) and 'loras' in item:
|
||||
for lora in item['loras']:
|
||||
if fuzzy_match(str(lora.get('modelName', '')), search):
|
||||
return True
|
||||
|
||||
# No match found
|
||||
return False
|
||||
|
||||
# Filter the data using the search predicate
|
||||
filtered_data = [item for item in filtered_data if matches_search(item)]
|
||||
if bypass_filters:
|
||||
# Skip other filters if bypass_filters is True
|
||||
pass
|
||||
# Otherwise continue with normal filtering after applying LoRA hash filter
|
||||
|
||||
# Apply additional filters
|
||||
if filters:
|
||||
# Filter by base model
|
||||
if 'base_model' in filters and filters['base_model']:
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if item.get('base_model', '') in filters['base_model']
|
||||
]
|
||||
# Skip further filtering if we're only filtering by LoRA hash with bypass enabled
|
||||
if not (lora_hash and bypass_filters):
|
||||
# Apply search filter
|
||||
if search:
|
||||
# Default search options if none provided
|
||||
if not search_options:
|
||||
search_options = {
|
||||
'title': True,
|
||||
'tags': True,
|
||||
'lora_name': True,
|
||||
'lora_model': True
|
||||
}
|
||||
|
||||
# Build the search predicate based on search options
|
||||
def matches_search(item):
|
||||
# Search in title if enabled
|
||||
if search_options.get('title', True):
|
||||
if fuzzy_match(str(item.get('title', '')), search):
|
||||
return True
|
||||
|
||||
# Search in tags if enabled
|
||||
if search_options.get('tags', True) and 'tags' in item:
|
||||
for tag in item['tags']:
|
||||
if fuzzy_match(tag, search):
|
||||
return True
|
||||
|
||||
# Search in lora file names if enabled
|
||||
if search_options.get('lora_name', True) and 'loras' in item:
|
||||
for lora in item['loras']:
|
||||
if fuzzy_match(str(lora.get('file_name', '')), search):
|
||||
return True
|
||||
|
||||
# Search in lora model names if enabled
|
||||
if search_options.get('lora_model', True) and 'loras' in item:
|
||||
for lora in item['loras']:
|
||||
if fuzzy_match(str(lora.get('modelName', '')), search):
|
||||
return True
|
||||
|
||||
# No match found
|
||||
return False
|
||||
|
||||
# Filter the data using the search predicate
|
||||
filtered_data = [item for item in filtered_data if matches_search(item)]
|
||||
|
||||
# Filter by tags
|
||||
if 'tags' in filters and filters['tags']:
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if any(tag in item.get('tags', []) for tag in filters['tags'])
|
||||
]
|
||||
# Apply additional filters
|
||||
if filters:
|
||||
# Filter by base model
|
||||
if 'base_model' in filters and filters['base_model']:
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if item.get('base_model', '') in filters['base_model']
|
||||
]
|
||||
|
||||
# Filter by tags
|
||||
if 'tags' in filters and filters['tags']:
|
||||
filtered_data = [
|
||||
item for item in filtered_data
|
||||
if any(tag in item.get('tags', []) for tag in filters['tags'])
|
||||
]
|
||||
|
||||
# Calculate pagination
|
||||
total_items = len(filtered_data)
|
||||
@@ -417,9 +603,9 @@ class RecipeScanner:
|
||||
if 'loras' in item:
|
||||
for lora in item['loras']:
|
||||
if 'hash' in lora and lora['hash']:
|
||||
lora['inLibrary'] = self._lora_scanner.has_lora_hash(lora['hash'].lower())
|
||||
lora['inLibrary'] = self._lora_scanner.has_hash(lora['hash'].lower())
|
||||
lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora['hash'].lower())
|
||||
lora['localPath'] = self._lora_scanner.get_lora_path_by_hash(lora['hash'].lower())
|
||||
lora['localPath'] = self._lora_scanner.get_path_by_hash(lora['hash'].lower())
|
||||
|
||||
result = {
|
||||
'items': paginated_items,
|
||||
@@ -430,6 +616,74 @@ class RecipeScanner:
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
async def get_recipe_by_id(self, recipe_id: str) -> dict:
|
||||
"""Get a single recipe by ID with all metadata and formatted URLs
|
||||
|
||||
Args:
|
||||
recipe_id: The ID of the recipe to retrieve
|
||||
|
||||
Returns:
|
||||
Dict containing the recipe data or None if not found
|
||||
"""
|
||||
if not recipe_id:
|
||||
return None
|
||||
|
||||
# Get all recipes from cache
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Find the recipe with the specified ID
|
||||
recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None)
|
||||
|
||||
if not recipe:
|
||||
return None
|
||||
|
||||
# Format the recipe with all needed information
|
||||
formatted_recipe = {**recipe} # Copy all fields
|
||||
|
||||
# Format file path to URL
|
||||
if 'file_path' in formatted_recipe:
|
||||
formatted_recipe['file_url'] = self._format_file_url(formatted_recipe['file_path'])
|
||||
|
||||
# Format dates for display
|
||||
for date_field in ['created_date', 'modified']:
|
||||
if date_field in formatted_recipe:
|
||||
formatted_recipe[f"{date_field}_formatted"] = self._format_timestamp(formatted_recipe[date_field])
|
||||
|
||||
# Add lora metadata
|
||||
if 'loras' in formatted_recipe:
|
||||
for lora in formatted_recipe['loras']:
|
||||
if 'hash' in lora and lora['hash']:
|
||||
lora_hash = lora['hash'].lower()
|
||||
lora['inLibrary'] = self._lora_scanner.has_hash(lora_hash)
|
||||
lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora_hash)
|
||||
lora['localPath'] = self._lora_scanner.get_path_by_hash(lora_hash)
|
||||
|
||||
return formatted_recipe
|
||||
|
||||
def _format_file_url(self, file_path: str) -> str:
|
||||
"""Format file path as URL for serving in web UI"""
|
||||
if not file_path:
|
||||
return '/loras_static/images/no-preview.png'
|
||||
|
||||
try:
|
||||
# Format file path as a URL that will work with static file serving
|
||||
recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/')
|
||||
if file_path.replace(os.sep, '/').startswith(recipes_dir):
|
||||
relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/')
|
||||
return f"/loras_static/root1/preview/{relative_path}"
|
||||
|
||||
# If not in recipes dir, try to create a valid URL from the file name
|
||||
file_name = os.path.basename(file_path)
|
||||
return f"/loras_static/root1/preview/recipes/{file_name}"
|
||||
except Exception as e:
|
||||
logger.error(f"Error formatting file URL: {e}")
|
||||
return '/loras_static/images/no-preview.png'
|
||||
|
||||
def _format_timestamp(self, timestamp: float) -> str:
|
||||
"""Format timestamp for display"""
|
||||
from datetime import datetime
|
||||
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
async def update_recipe_metadata(self, recipe_id: str, metadata: dict) -> bool:
|
||||
"""Update recipe metadata (like title and tags) in both file system and cache
|
||||
@@ -562,3 +816,60 @@ class RecipeScanner:
|
||||
logger.info(f"Resorted recipe cache after updating {cache_updated_count} items")
|
||||
|
||||
return file_updated_count, cache_updated_count
|
||||
|
||||
async def find_recipes_by_fingerprint(self, fingerprint: str) -> list:
|
||||
"""Find recipes with a matching fingerprint
|
||||
|
||||
Args:
|
||||
fingerprint: The recipe fingerprint to search for
|
||||
|
||||
Returns:
|
||||
List of recipe details that match the fingerprint
|
||||
"""
|
||||
if not fingerprint:
|
||||
return []
|
||||
|
||||
# Get all recipes from cache
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Find recipes with matching fingerprint
|
||||
matching_recipes = []
|
||||
for recipe in cache.raw_data:
|
||||
if recipe.get('fingerprint') == fingerprint:
|
||||
recipe_details = {
|
||||
'id': recipe.get('id'),
|
||||
'title': recipe.get('title'),
|
||||
'file_url': self._format_file_url(recipe.get('file_path')),
|
||||
'modified': recipe.get('modified'),
|
||||
'created_date': recipe.get('created_date'),
|
||||
'lora_count': len(recipe.get('loras', []))
|
||||
}
|
||||
matching_recipes.append(recipe_details)
|
||||
|
||||
return matching_recipes
|
||||
|
||||
async def find_all_duplicate_recipes(self) -> dict:
|
||||
"""Find all recipe duplicates based on fingerprints
|
||||
|
||||
Returns:
|
||||
Dictionary where keys are fingerprints and values are lists of recipe IDs
|
||||
"""
|
||||
# Get all recipes from cache
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Group recipes by fingerprint
|
||||
fingerprint_groups = {}
|
||||
for recipe in cache.raw_data:
|
||||
fingerprint = recipe.get('fingerprint')
|
||||
if not fingerprint:
|
||||
continue
|
||||
|
||||
if fingerprint not in fingerprint_groups:
|
||||
fingerprint_groups[fingerprint] = []
|
||||
|
||||
fingerprint_groups[fingerprint].append(recipe.get('id'))
|
||||
|
||||
# Filter to only include groups with more than one recipe
|
||||
duplicate_groups = {k: v for k, v in fingerprint_groups.items() if len(v) > 1}
|
||||
|
||||
return duplicate_groups
|
||||
|
||||
215
py/services/service_registry.py
Normal file
215
py/services/service_registry.py
Normal file
@@ -0,0 +1,215 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional, Dict, Any, TypeVar, Type
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
T = TypeVar('T') # Define a type variable for service types
|
||||
|
||||
class ServiceRegistry:
|
||||
"""Central registry for managing singleton services"""
|
||||
|
||||
_services: Dict[str, Any] = {}
|
||||
_locks: Dict[str, asyncio.Lock] = {}
|
||||
|
||||
@classmethod
|
||||
async def register_service(cls, name: str, service: Any) -> None:
|
||||
"""Register a service instance with the registry
|
||||
|
||||
Args:
|
||||
name: Service name identifier
|
||||
service: Service instance to register
|
||||
"""
|
||||
cls._services[name] = service
|
||||
logger.debug(f"Registered service: {name}")
|
||||
|
||||
@classmethod
|
||||
async def get_service(cls, name: str) -> Optional[Any]:
|
||||
"""Get a service instance by name
|
||||
|
||||
Args:
|
||||
name: Service name identifier
|
||||
|
||||
Returns:
|
||||
Service instance or None if not found
|
||||
"""
|
||||
return cls._services.get(name)
|
||||
|
||||
@classmethod
|
||||
def get_service_sync(cls, name: str) -> Optional[Any]:
|
||||
"""Synchronously get a service instance by name
|
||||
|
||||
Args:
|
||||
name: Service name identifier
|
||||
|
||||
Returns:
|
||||
Service instance or None if not found
|
||||
"""
|
||||
return cls._services.get(name)
|
||||
|
||||
@classmethod
|
||||
def _get_lock(cls, name: str) -> asyncio.Lock:
|
||||
"""Get or create a lock for a service
|
||||
|
||||
Args:
|
||||
name: Service name identifier
|
||||
|
||||
Returns:
|
||||
AsyncIO lock for the service
|
||||
"""
|
||||
if name not in cls._locks:
|
||||
cls._locks[name] = asyncio.Lock()
|
||||
return cls._locks[name]
|
||||
|
||||
@classmethod
|
||||
async def get_lora_scanner(cls):
|
||||
"""Get or create LoRA scanner instance"""
|
||||
service_name = "lora_scanner"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
# Double-check after acquiring lock
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from .lora_scanner import LoraScanner
|
||||
|
||||
scanner = await LoraScanner.get_instance()
|
||||
cls._services[service_name] = scanner
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return scanner
|
||||
|
||||
@classmethod
|
||||
async def get_checkpoint_scanner(cls):
|
||||
"""Get or create Checkpoint scanner instance"""
|
||||
service_name = "checkpoint_scanner"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
# Double-check after acquiring lock
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from .checkpoint_scanner import CheckpointScanner
|
||||
|
||||
scanner = await CheckpointScanner.get_instance()
|
||||
cls._services[service_name] = scanner
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return scanner
|
||||
|
||||
@classmethod
|
||||
async def get_recipe_scanner(cls):
|
||||
"""Get or create Recipe scanner instance"""
|
||||
service_name = "recipe_scanner"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
# Double-check after acquiring lock
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from .recipe_scanner import RecipeScanner
|
||||
|
||||
scanner = await RecipeScanner.get_instance()
|
||||
cls._services[service_name] = scanner
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return scanner
|
||||
|
||||
@classmethod
|
||||
async def get_civitai_client(cls):
|
||||
"""Get or create CivitAI client instance"""
|
||||
service_name = "civitai_client"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
# Double-check after acquiring lock
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from .civitai_client import CivitaiClient
|
||||
|
||||
client = await CivitaiClient.get_instance()
|
||||
cls._services[service_name] = client
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return client
|
||||
|
||||
@classmethod
|
||||
async def get_download_manager(cls):
|
||||
"""Get or create Download manager instance"""
|
||||
service_name = "download_manager"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
# Double-check after acquiring lock
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from .download_manager import DownloadManager
|
||||
|
||||
manager = DownloadManager()
|
||||
cls._services[service_name] = manager
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return manager
|
||||
|
||||
@classmethod
|
||||
async def get_websocket_manager(cls):
|
||||
"""Get or create WebSocket manager instance"""
|
||||
service_name = "websocket_manager"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
# Double-check after acquiring lock
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from .websocket_manager import ws_manager
|
||||
|
||||
cls._services[service_name] = ws_manager
|
||||
logger.debug(f"Registered {service_name}")
|
||||
return ws_manager
|
||||
|
||||
@classmethod
|
||||
async def get_embedding_scanner(cls):
|
||||
"""Get or create Embedding scanner instance"""
|
||||
service_name = "embedding_scanner"
|
||||
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
async with cls._get_lock(service_name):
|
||||
# Double-check after acquiring lock
|
||||
if service_name in cls._services:
|
||||
return cls._services[service_name]
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from .embedding_scanner import EmbeddingScanner
|
||||
|
||||
scanner = await EmbeddingScanner.get_instance()
|
||||
cls._services[service_name] = scanner
|
||||
logger.debug(f"Created and registered {service_name}")
|
||||
return scanner
|
||||
|
||||
@classmethod
|
||||
def clear_services(cls):
|
||||
"""Clear all registered services - mainly for testing"""
|
||||
cls._services.clear()
|
||||
cls._locks.clear()
|
||||
logger.info("Cleared all registered services")
|
||||
@@ -9,6 +9,7 @@ class SettingsManager:
|
||||
def __init__(self):
|
||||
self.settings_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'settings.json')
|
||||
self.settings = self._load_settings()
|
||||
self._auto_set_default_roots()
|
||||
self._check_environment_variables()
|
||||
|
||||
def _load_settings(self) -> Dict[str, Any]:
|
||||
@@ -21,6 +22,28 @@ class SettingsManager:
|
||||
logger.error(f"Error loading settings: {e}")
|
||||
return self._get_default_settings()
|
||||
|
||||
def _auto_set_default_roots(self):
|
||||
"""Auto set default root paths if only one folder is present and default is empty."""
|
||||
folder_paths = self.settings.get('folder_paths', {})
|
||||
updated = False
|
||||
# loras
|
||||
loras = folder_paths.get('loras', [])
|
||||
if isinstance(loras, list) and len(loras) == 1 and not self.settings.get('default_lora_root'):
|
||||
self.settings['default_lora_root'] = loras[0]
|
||||
updated = True
|
||||
# checkpoints
|
||||
checkpoints = folder_paths.get('checkpoints', [])
|
||||
if isinstance(checkpoints, list) and len(checkpoints) == 1 and not self.settings.get('default_checkpoint_root'):
|
||||
self.settings['default_checkpoint_root'] = checkpoints[0]
|
||||
updated = True
|
||||
# embeddings
|
||||
embeddings = folder_paths.get('embeddings', [])
|
||||
if isinstance(embeddings, list) and len(embeddings) == 1 and not self.settings.get('default_embedding_root'):
|
||||
self.settings['default_embedding_root'] = embeddings[0]
|
||||
updated = True
|
||||
if updated:
|
||||
self._save_settings()
|
||||
|
||||
def _check_environment_variables(self) -> None:
|
||||
"""Check for environment variables and update settings if needed"""
|
||||
env_api_key = os.environ.get('CIVITAI_API_KEY')
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import logging
|
||||
from aiohttp import web
|
||||
from typing import Set, Dict, Optional
|
||||
from uuid import uuid4
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -9,6 +12,10 @@ class WebSocketManager:
|
||||
|
||||
def __init__(self):
|
||||
self._websockets: Set[web.WebSocketResponse] = set()
|
||||
self._init_websockets: Set[web.WebSocketResponse] = set() # New set for initialization progress clients
|
||||
self._download_websockets: Dict[str, web.WebSocketResponse] = {} # New dict for download-specific clients
|
||||
# Add progress tracking dictionary
|
||||
self._download_progress: Dict[str, Dict] = {}
|
||||
|
||||
async def handle_connection(self, request: web.Request) -> web.WebSocketResponse:
|
||||
"""Handle new WebSocket connection"""
|
||||
@@ -23,7 +30,62 @@ class WebSocketManager:
|
||||
finally:
|
||||
self._websockets.discard(ws)
|
||||
return ws
|
||||
|
||||
async def handle_init_connection(self, request: web.Request) -> web.WebSocketResponse:
|
||||
"""Handle new WebSocket connection for initialization progress"""
|
||||
ws = web.WebSocketResponse()
|
||||
await ws.prepare(request)
|
||||
self._init_websockets.add(ws)
|
||||
|
||||
try:
|
||||
async for msg in ws:
|
||||
if msg.type == web.WSMsgType.ERROR:
|
||||
logger.error(f'Init WebSocket error: {ws.exception()}')
|
||||
finally:
|
||||
self._init_websockets.discard(ws)
|
||||
return ws
|
||||
|
||||
async def handle_download_connection(self, request: web.Request) -> web.WebSocketResponse:
|
||||
"""Handle new WebSocket connection for download progress"""
|
||||
ws = web.WebSocketResponse()
|
||||
await ws.prepare(request)
|
||||
|
||||
# Get download_id from query parameters
|
||||
download_id = request.query.get('id')
|
||||
|
||||
if not download_id:
|
||||
# Generate a new download ID if not provided
|
||||
download_id = str(uuid4())
|
||||
|
||||
# Store the websocket with its download ID
|
||||
self._download_websockets[download_id] = ws
|
||||
|
||||
try:
|
||||
# Send the download ID back to the client
|
||||
await ws.send_json({
|
||||
'type': 'download_id',
|
||||
'download_id': download_id
|
||||
})
|
||||
|
||||
async for msg in ws:
|
||||
if msg.type == web.WSMsgType.ERROR:
|
||||
logger.error(f'Download WebSocket error: {ws.exception()}')
|
||||
finally:
|
||||
if download_id in self._download_websockets:
|
||||
del self._download_websockets[download_id]
|
||||
|
||||
# Schedule cleanup of completed downloads after WebSocket disconnection
|
||||
asyncio.create_task(self._delayed_cleanup(download_id))
|
||||
return ws
|
||||
|
||||
async def _delayed_cleanup(self, download_id: str, delay_seconds: int = 300):
|
||||
"""Clean up download progress after a delay (5 minutes by default)"""
|
||||
await asyncio.sleep(delay_seconds)
|
||||
progress_data = self._download_progress.get(download_id)
|
||||
if progress_data and progress_data.get('progress', 0) >= 100:
|
||||
self.cleanup_download_progress(download_id)
|
||||
logger.debug(f"Delayed cleanup completed for download {download_id}")
|
||||
|
||||
async def broadcast(self, data: Dict):
|
||||
"""Broadcast message to all connected clients"""
|
||||
if not self._websockets:
|
||||
@@ -34,10 +96,80 @@ class WebSocketManager:
|
||||
await ws.send_json(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending progress: {e}")
|
||||
|
||||
|
||||
async def broadcast_init_progress(self, data: Dict):
|
||||
"""Broadcast initialization progress to connected clients"""
|
||||
if not self._init_websockets:
|
||||
return
|
||||
|
||||
# Ensure data has all required fields
|
||||
if 'stage' not in data:
|
||||
data['stage'] = 'processing'
|
||||
if 'progress' not in data:
|
||||
data['progress'] = 0
|
||||
if 'details' not in data:
|
||||
data['details'] = 'Processing...'
|
||||
|
||||
for ws in self._init_websockets:
|
||||
try:
|
||||
await ws.send_json(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending initialization progress: {e}")
|
||||
|
||||
async def broadcast_download_progress(self, download_id: str, data: Dict):
|
||||
"""Send progress update to specific download client"""
|
||||
# Store simplified progress data in memory (only progress percentage)
|
||||
self._download_progress[download_id] = {
|
||||
'progress': data.get('progress', 0),
|
||||
'timestamp': datetime.now()
|
||||
}
|
||||
|
||||
if download_id not in self._download_websockets:
|
||||
logger.debug(f"No WebSocket found for download ID: {download_id}")
|
||||
return
|
||||
|
||||
ws = self._download_websockets[download_id]
|
||||
try:
|
||||
await ws.send_json(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending download progress: {e}")
|
||||
|
||||
def get_download_progress(self, download_id: str) -> Optional[Dict]:
|
||||
"""Get progress information for a specific download"""
|
||||
return self._download_progress.get(download_id)
|
||||
|
||||
def cleanup_download_progress(self, download_id: str):
|
||||
"""Remove progress info for a specific download"""
|
||||
self._download_progress.pop(download_id, None)
|
||||
|
||||
def cleanup_old_downloads(self, max_age_hours: int = 24):
|
||||
"""Clean up old download progress entries"""
|
||||
cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
|
||||
to_remove = []
|
||||
|
||||
for download_id, progress_data in self._download_progress.items():
|
||||
if progress_data.get('timestamp', datetime.now()) < cutoff_time:
|
||||
to_remove.append(download_id)
|
||||
|
||||
for download_id in to_remove:
|
||||
self._download_progress.pop(download_id, None)
|
||||
logger.debug(f"Cleaned up old download progress for {download_id}")
|
||||
|
||||
def get_connected_clients_count(self) -> int:
|
||||
"""Get number of connected clients"""
|
||||
return len(self._websockets)
|
||||
|
||||
def get_init_clients_count(self) -> int:
|
||||
"""Get number of initialization progress clients"""
|
||||
return len(self._init_websockets)
|
||||
|
||||
def get_download_clients_count(self) -> int:
|
||||
"""Get number of download progress clients"""
|
||||
return len(self._download_websockets)
|
||||
|
||||
def generate_download_id(self) -> str:
|
||||
"""Generate a unique download ID"""
|
||||
return str(uuid4())
|
||||
|
||||
# Global instance
|
||||
ws_manager = WebSocketManager()
|
||||
ws_manager = WebSocketManager()
|
||||
@@ -5,4 +5,53 @@ NSFW_LEVELS = {
|
||||
"X": 8,
|
||||
"XXX": 16,
|
||||
"Blocked": 32, # Probably not actually visible through the API without being logged in on model owner account?
|
||||
}
|
||||
}
|
||||
|
||||
# Node type constants
|
||||
NODE_TYPES = {
|
||||
"Lora Loader (LoraManager)": 1,
|
||||
"Lora Stacker (LoraManager)": 2,
|
||||
"WanVideo Lora Select (LoraManager)": 3
|
||||
}
|
||||
|
||||
# Default ComfyUI node color when bgcolor is null
|
||||
DEFAULT_NODE_COLOR = "#353535"
|
||||
|
||||
# preview extensions
|
||||
PREVIEW_EXTENSIONS = [
|
||||
'.webp',
|
||||
'.preview.webp',
|
||||
'.preview.png',
|
||||
'.preview.jpeg',
|
||||
'.preview.jpg',
|
||||
'.preview.mp4',
|
||||
'.png',
|
||||
'.jpeg',
|
||||
'.jpg',
|
||||
'.mp4',
|
||||
'.gif',
|
||||
'.webm'
|
||||
]
|
||||
|
||||
# Card preview image width
|
||||
CARD_PREVIEW_WIDTH = 480
|
||||
|
||||
# Width for optimized example images
|
||||
EXAMPLE_IMAGE_WIDTH = 832
|
||||
|
||||
# Supported media extensions for example downloads
|
||||
SUPPORTED_MEDIA_EXTENSIONS = {
|
||||
'images': ['.jpg', '.jpeg', '.png', '.webp', '.gif'],
|
||||
'videos': ['.mp4', '.webm']
|
||||
}
|
||||
|
||||
# Valid Lora types
|
||||
VALID_LORA_TYPES = ['lora', 'locon', 'dora']
|
||||
|
||||
# Civitai model tags in priority order for subfolder organization
|
||||
CIVITAI_MODEL_TAGS = [
|
||||
'character', 'style', 'concept', 'clothing',
|
||||
# 'base model', # exclude 'base model'
|
||||
'poses', 'background', 'tool', 'vehicle', 'buildings',
|
||||
'objects', 'assets', 'animal', 'action'
|
||||
]
|
||||
408
py/utils/example_images_download_manager.py
Normal file
408
py/utils/example_images_download_manager.py
Normal file
@@ -0,0 +1,408 @@
|
||||
import logging
|
||||
import os
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from .example_images_processor import ExampleImagesProcessor
|
||||
from .example_images_metadata import MetadataUpdater
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Download status tracking
|
||||
download_task = None
|
||||
is_downloading = False
|
||||
download_progress = {
|
||||
'total': 0,
|
||||
'completed': 0,
|
||||
'current_model': '',
|
||||
'status': 'idle', # idle, running, paused, completed, error
|
||||
'errors': [],
|
||||
'last_error': None,
|
||||
'start_time': None,
|
||||
'end_time': None,
|
||||
'processed_models': set(), # Track models that have been processed
|
||||
'refreshed_models': set() # Track models that had metadata refreshed
|
||||
}
|
||||
|
||||
class DownloadManager:
|
||||
"""Manages downloading example images for models"""
|
||||
|
||||
@staticmethod
|
||||
async def start_download(request):
|
||||
"""
|
||||
Start downloading example images for models
|
||||
|
||||
Expects a JSON body with:
|
||||
{
|
||||
"output_dir": "path/to/output", # Base directory to save example images
|
||||
"optimize": true, # Whether to optimize images (default: true)
|
||||
"model_types": ["lora", "checkpoint"], # Model types to process (default: both)
|
||||
"delay": 1.0 # Delay between downloads to avoid rate limiting (default: 1.0)
|
||||
}
|
||||
"""
|
||||
global download_task, is_downloading, download_progress
|
||||
|
||||
if is_downloading:
|
||||
# Create a copy for JSON serialization
|
||||
response_progress = download_progress.copy()
|
||||
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Download already in progress',
|
||||
'status': response_progress
|
||||
}, status=400)
|
||||
|
||||
try:
|
||||
# Parse the request body
|
||||
data = await request.json()
|
||||
output_dir = data.get('output_dir')
|
||||
optimize = data.get('optimize', True)
|
||||
model_types = data.get('model_types', ['lora', 'checkpoint'])
|
||||
delay = float(data.get('delay', 0.2)) # Default to 0.2 seconds
|
||||
|
||||
if not output_dir:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing output_dir parameter'
|
||||
}, status=400)
|
||||
|
||||
# Create the output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Initialize progress tracking
|
||||
download_progress['total'] = 0
|
||||
download_progress['completed'] = 0
|
||||
download_progress['current_model'] = ''
|
||||
download_progress['status'] = 'running'
|
||||
download_progress['errors'] = []
|
||||
download_progress['last_error'] = None
|
||||
download_progress['start_time'] = time.time()
|
||||
download_progress['end_time'] = None
|
||||
|
||||
# Get the processed models list from a file if it exists
|
||||
progress_file = os.path.join(output_dir, '.download_progress.json')
|
||||
if os.path.exists(progress_file):
|
||||
try:
|
||||
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||
saved_progress = json.load(f)
|
||||
download_progress['processed_models'] = set(saved_progress.get('processed_models', []))
|
||||
logger.debug(f"Loaded previous progress, {len(download_progress['processed_models'])} models already processed")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load progress file: {e}")
|
||||
download_progress['processed_models'] = set()
|
||||
else:
|
||||
download_progress['processed_models'] = set()
|
||||
|
||||
# Start the download task
|
||||
is_downloading = True
|
||||
download_task = asyncio.create_task(
|
||||
DownloadManager._download_all_example_images(
|
||||
output_dir,
|
||||
optimize,
|
||||
model_types,
|
||||
delay
|
||||
)
|
||||
)
|
||||
|
||||
# Create a copy for JSON serialization
|
||||
response_progress = download_progress.copy()
|
||||
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Download started',
|
||||
'status': response_progress
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to start example images download: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_status(request):
|
||||
"""Get the current status of example images download"""
|
||||
global download_progress
|
||||
|
||||
# Create a copy of the progress dict with the set converted to a list for JSON serialization
|
||||
response_progress = download_progress.copy()
|
||||
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'is_downloading': is_downloading,
|
||||
'status': response_progress
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
async def pause_download(request):
|
||||
"""Pause the example images download"""
|
||||
global download_progress
|
||||
|
||||
if not is_downloading:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No download in progress'
|
||||
}, status=400)
|
||||
|
||||
download_progress['status'] = 'paused'
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Download paused'
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
async def resume_download(request):
|
||||
"""Resume the example images download"""
|
||||
global download_progress
|
||||
|
||||
if not is_downloading:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No download in progress'
|
||||
}, status=400)
|
||||
|
||||
if download_progress['status'] == 'paused':
|
||||
download_progress['status'] = 'running'
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Download resumed'
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Download is in '{download_progress['status']}' state, cannot resume"
|
||||
}, status=400)
|
||||
|
||||
@staticmethod
|
||||
async def _download_all_example_images(output_dir, optimize, model_types, delay):
|
||||
"""Download example images for all models"""
|
||||
global is_downloading, download_progress
|
||||
|
||||
# Create independent download session
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=3,
|
||||
force_close=False,
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
|
||||
independent_session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=True,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
try:
|
||||
# Get scanners
|
||||
scanners = []
|
||||
if 'lora' in model_types:
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
scanners.append(('lora', lora_scanner))
|
||||
|
||||
if 'checkpoint' in model_types:
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
scanners.append(('checkpoint', checkpoint_scanner))
|
||||
|
||||
if 'embedding' in model_types:
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
scanners.append(('embedding', embedding_scanner))
|
||||
|
||||
# Get all models
|
||||
all_models = []
|
||||
for scanner_type, scanner in scanners:
|
||||
cache = await scanner.get_cached_data()
|
||||
if cache and cache.raw_data:
|
||||
for model in cache.raw_data:
|
||||
if model.get('sha256'):
|
||||
all_models.append((scanner_type, model, scanner))
|
||||
|
||||
# Update total count
|
||||
download_progress['total'] = len(all_models)
|
||||
logger.debug(f"Found {download_progress['total']} models to process")
|
||||
|
||||
# Process each model
|
||||
for i, (scanner_type, model, scanner) in enumerate(all_models):
|
||||
# Main logic for processing model is here, but actual operations are delegated to other classes
|
||||
was_remote_download = await DownloadManager._process_model(
|
||||
scanner_type, model, scanner,
|
||||
output_dir, optimize, independent_session
|
||||
)
|
||||
|
||||
# Update progress
|
||||
download_progress['completed'] += 1
|
||||
|
||||
# Only add delay after remote download of models, and not after processing the last model
|
||||
if was_remote_download and i < len(all_models) - 1 and download_progress['status'] == 'running':
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# Mark as completed
|
||||
download_progress['status'] = 'completed'
|
||||
download_progress['end_time'] = time.time()
|
||||
logger.debug(f"Example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error during example images download: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
download_progress['errors'].append(error_msg)
|
||||
download_progress['last_error'] = error_msg
|
||||
download_progress['status'] = 'error'
|
||||
download_progress['end_time'] = time.time()
|
||||
|
||||
finally:
|
||||
# Close the independent session
|
||||
try:
|
||||
await independent_session.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error closing download session: {e}")
|
||||
|
||||
# Save final progress to file
|
||||
try:
|
||||
DownloadManager._save_progress(output_dir)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save progress file: {e}")
|
||||
|
||||
# Set download status to not downloading
|
||||
is_downloading = False
|
||||
|
||||
@staticmethod
|
||||
async def _process_model(scanner_type, model, scanner, output_dir, optimize, independent_session):
|
||||
"""Process a single model download"""
|
||||
global download_progress
|
||||
|
||||
# Check if download is paused
|
||||
while download_progress['status'] == 'paused':
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Check if download should continue
|
||||
if download_progress['status'] != 'running':
|
||||
logger.info(f"Download stopped: {download_progress['status']}")
|
||||
return False # Return False to indicate no remote download happened
|
||||
|
||||
model_hash = model.get('sha256', '').lower()
|
||||
model_name = model.get('model_name', 'Unknown')
|
||||
model_file_path = model.get('file_path', '')
|
||||
model_file_name = model.get('file_name', '')
|
||||
|
||||
try:
|
||||
# Update current model info
|
||||
download_progress['current_model'] = f"{model_name} ({model_hash[:8]})"
|
||||
|
||||
# Skip if already processed AND directory exists with files
|
||||
if model_hash in download_progress['processed_models']:
|
||||
model_dir = os.path.join(output_dir, model_hash)
|
||||
has_files = os.path.exists(model_dir) and any(os.listdir(model_dir))
|
||||
if has_files:
|
||||
logger.debug(f"Skipping already processed model: {model_name}")
|
||||
return False
|
||||
else:
|
||||
logger.debug(f"Model {model_name} marked as processed but folder empty or missing, reprocessing")
|
||||
|
||||
# Create model directory
|
||||
model_dir = os.path.join(output_dir, model_hash)
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
|
||||
# First check for local example images - local processing doesn't need delay
|
||||
local_images_processed = await ExampleImagesProcessor.process_local_examples(
|
||||
model_file_path, model_file_name, model_name, model_dir, optimize
|
||||
)
|
||||
|
||||
# If we processed local images, update metadata
|
||||
if local_images_processed:
|
||||
await MetadataUpdater.update_metadata_from_local_examples(
|
||||
model_hash, model, scanner_type, scanner, model_dir
|
||||
)
|
||||
download_progress['processed_models'].add(model_hash)
|
||||
return False # Return False to indicate no remote download happened
|
||||
|
||||
# If no local images, try to download from remote
|
||||
elif model.get('civitai') and model.get('civitai', {}).get('images'):
|
||||
images = model.get('civitai', {}).get('images', [])
|
||||
|
||||
success, is_stale = await ExampleImagesProcessor.download_model_images(
|
||||
model_hash, model_name, images, model_dir, optimize, independent_session
|
||||
)
|
||||
|
||||
# If metadata is stale, try to refresh it
|
||||
if is_stale and model_hash not in download_progress['refreshed_models']:
|
||||
await MetadataUpdater.refresh_model_metadata(
|
||||
model_hash, model_name, scanner_type, scanner
|
||||
)
|
||||
|
||||
# Get the updated model data
|
||||
updated_model = await MetadataUpdater.get_updated_model(
|
||||
model_hash, scanner
|
||||
)
|
||||
|
||||
if updated_model and updated_model.get('civitai', {}).get('images'):
|
||||
# Retry download with updated metadata
|
||||
updated_images = updated_model.get('civitai', {}).get('images', [])
|
||||
success, _ = await ExampleImagesProcessor.download_model_images(
|
||||
model_hash, model_name, updated_images, model_dir, optimize, independent_session
|
||||
)
|
||||
|
||||
# Only mark as processed if all images were downloaded successfully
|
||||
if success:
|
||||
download_progress['processed_models'].add(model_hash)
|
||||
|
||||
return True # Return True to indicate a remote download happened
|
||||
|
||||
# Save progress periodically
|
||||
if download_progress['completed'] % 10 == 0 or download_progress['completed'] == download_progress['total'] - 1:
|
||||
DownloadManager._save_progress(output_dir)
|
||||
|
||||
return False # Default return if no conditions met
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error processing model {model.get('model_name')}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
download_progress['errors'].append(error_msg)
|
||||
download_progress['last_error'] = error_msg
|
||||
return False # Return False on exception
|
||||
|
||||
@staticmethod
|
||||
def _save_progress(output_dir):
|
||||
"""Save download progress to file"""
|
||||
global download_progress
|
||||
try:
|
||||
progress_file = os.path.join(output_dir, '.download_progress.json')
|
||||
|
||||
# Read existing progress file if it exists
|
||||
existing_data = {}
|
||||
if os.path.exists(progress_file):
|
||||
try:
|
||||
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||
existing_data = json.load(f)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to read existing progress file: {e}")
|
||||
|
||||
# Create new progress data
|
||||
progress_data = {
|
||||
'processed_models': list(download_progress['processed_models']),
|
||||
'refreshed_models': list(download_progress['refreshed_models']),
|
||||
'completed': download_progress['completed'],
|
||||
'total': download_progress['total'],
|
||||
'last_update': time.time()
|
||||
}
|
||||
|
||||
# Preserve existing fields (especially naming_version)
|
||||
for key, value in existing_data.items():
|
||||
if key not in progress_data:
|
||||
progress_data[key] = value
|
||||
|
||||
# Write updated progress data
|
||||
with open(progress_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(progress_data, f, indent=2)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save progress file: {e}")
|
||||
209
py/utils/example_images_file_manager.py
Normal file
209
py/utils/example_images_file_manager.py
Normal file
@@ -0,0 +1,209 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
from aiohttp import web
|
||||
from ..services.settings_manager import settings
|
||||
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ExampleImagesFileManager:
|
||||
"""Manages access and operations for example image files"""
|
||||
|
||||
@staticmethod
|
||||
async def open_folder(request):
|
||||
"""
|
||||
Open the example images folder for a specific model
|
||||
|
||||
Expects a JSON request body with:
|
||||
{
|
||||
"model_hash": "sha256_hash" # SHA256 hash of the model
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# Parse request body
|
||||
data = await request.json()
|
||||
model_hash = data.get('model_hash')
|
||||
|
||||
if not model_hash:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing model_hash parameter'
|
||||
}, status=400)
|
||||
|
||||
# Get example images path from settings
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No example images path configured. Please set it in the settings panel first.'
|
||||
}, status=400)
|
||||
|
||||
# Construct folder path for this model
|
||||
model_folder = os.path.join(example_images_path, model_hash)
|
||||
model_folder = os.path.abspath(model_folder) # Get absolute path
|
||||
|
||||
# Path validation: ensure model_folder is under example_images_path
|
||||
if not model_folder.startswith(os.path.abspath(example_images_path)):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Invalid model folder path'
|
||||
}, status=400)
|
||||
|
||||
# Check if folder exists
|
||||
if not os.path.exists(model_folder):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No example images found for this model. Download example images first.'
|
||||
}, status=404)
|
||||
|
||||
# Open folder in file explorer
|
||||
if os.name == 'nt': # Windows
|
||||
os.startfile(model_folder)
|
||||
elif os.name == 'posix': # macOS and Linux
|
||||
if sys.platform == 'darwin': # macOS
|
||||
subprocess.Popen(['open', model_folder])
|
||||
else: # Linux
|
||||
subprocess.Popen(['xdg-open', model_folder])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'Opened example images folder for model {model_hash}'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to open example images folder: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_files(request):
|
||||
"""
|
||||
Get the list of example image files for a specific model
|
||||
|
||||
Expects:
|
||||
- model_hash in query parameters
|
||||
|
||||
Returns:
|
||||
- List of image files and their paths
|
||||
"""
|
||||
try:
|
||||
# Get model_hash from query parameters
|
||||
model_hash = request.query.get('model_hash')
|
||||
|
||||
if not model_hash:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing model_hash parameter'
|
||||
}, status=400)
|
||||
|
||||
# Get example images path from settings
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No example images path configured'
|
||||
}, status=400)
|
||||
|
||||
# Construct folder path for this model
|
||||
model_folder = os.path.join(example_images_path, model_hash)
|
||||
|
||||
# Check if folder exists
|
||||
if not os.path.exists(model_folder):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No example images found for this model',
|
||||
'files': []
|
||||
}, status=404)
|
||||
|
||||
# Get list of files in the folder
|
||||
files = []
|
||||
for file in os.listdir(model_folder):
|
||||
file_path = os.path.join(model_folder, file)
|
||||
if os.path.isfile(file_path):
|
||||
# Check if file is a supported media file
|
||||
file_ext = os.path.splitext(file)[1].lower()
|
||||
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
|
||||
files.append({
|
||||
'name': file,
|
||||
'path': f'/example_images_static/{model_hash}/{file}',
|
||||
'extension': file_ext,
|
||||
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
})
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'files': files
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get example image files: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def has_images(request):
|
||||
"""
|
||||
Check if the example images folder for a model exists and is not empty
|
||||
|
||||
Expects:
|
||||
- model_hash in query parameters
|
||||
|
||||
Returns:
|
||||
- Boolean indicating whether the folder exists and contains images/videos
|
||||
"""
|
||||
try:
|
||||
# Get model_hash from query parameters
|
||||
model_hash = request.query.get('model_hash')
|
||||
|
||||
if not model_hash:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing model_hash parameter'
|
||||
}, status=400)
|
||||
|
||||
# Get example images path from settings
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path:
|
||||
return web.json_response({
|
||||
'has_images': False
|
||||
})
|
||||
|
||||
# Construct folder path for this model
|
||||
model_folder = os.path.join(example_images_path, model_hash)
|
||||
|
||||
# Check if folder exists
|
||||
if not os.path.exists(model_folder) or not os.path.isdir(model_folder):
|
||||
return web.json_response({
|
||||
'has_images': False
|
||||
})
|
||||
|
||||
# Check if folder contains any supported media files
|
||||
for file in os.listdir(model_folder):
|
||||
file_path = os.path.join(model_folder, file)
|
||||
if os.path.isfile(file_path):
|
||||
file_ext = os.path.splitext(file)[1].lower()
|
||||
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
|
||||
return web.json_response({
|
||||
'has_images': True
|
||||
})
|
||||
|
||||
# If reached here, folder exists but has no supported media files
|
||||
return web.json_response({
|
||||
'has_images': False
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to check example images folder: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'has_images': False,
|
||||
'error': str(e)
|
||||
})
|
||||
390
py/utils/example_images_metadata.py
Normal file
390
py/utils/example_images_metadata.py
Normal file
@@ -0,0 +1,390 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..recipes.constants import GEN_PARAM_KEYS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MetadataUpdater:
|
||||
"""Handles updating model metadata related to example images"""
|
||||
|
||||
@staticmethod
|
||||
async def refresh_model_metadata(model_hash, model_name, scanner_type, scanner):
|
||||
"""Refresh model metadata from CivitAI
|
||||
|
||||
Args:
|
||||
model_hash: SHA256 hash of the model
|
||||
model_name: Model name (for logging)
|
||||
scanner_type: Scanner type ('lora' or 'checkpoint')
|
||||
scanner: Scanner instance for this model type
|
||||
|
||||
Returns:
|
||||
bool: True if metadata was successfully refreshed, False otherwise
|
||||
"""
|
||||
from ..utils.example_images_download_manager import download_progress
|
||||
|
||||
try:
|
||||
# Find the model in the scanner cache
|
||||
cache = await scanner.get_cached_data()
|
||||
model_data = None
|
||||
|
||||
for item in cache.raw_data:
|
||||
if item.get('sha256') == model_hash:
|
||||
model_data = item
|
||||
break
|
||||
|
||||
if not model_data:
|
||||
logger.warning(f"Model {model_name} with hash {model_hash} not found in cache")
|
||||
return False
|
||||
|
||||
file_path = model_data.get('file_path')
|
||||
if not file_path:
|
||||
logger.warning(f"Model {model_name} has no file path")
|
||||
return False
|
||||
|
||||
# Track that we're refreshing this model
|
||||
download_progress['refreshed_models'].add(model_hash)
|
||||
|
||||
# Use ModelRouteUtils to refresh metadata
|
||||
async def update_cache_func(old_path, new_path, metadata):
|
||||
return await scanner.update_single_model_cache(old_path, new_path, metadata)
|
||||
|
||||
success = await ModelRouteUtils.fetch_and_update_model(
|
||||
model_hash,
|
||||
file_path,
|
||||
model_data,
|
||||
update_cache_func
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(f"Successfully refreshed metadata for {model_name}")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Failed to refresh metadata for {model_name}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error refreshing metadata for {model_name}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
download_progress['errors'].append(error_msg)
|
||||
download_progress['last_error'] = error_msg
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
async def get_updated_model(model_hash, scanner):
|
||||
"""Get updated model data
|
||||
|
||||
Args:
|
||||
model_hash: SHA256 hash of the model
|
||||
scanner: Scanner instance
|
||||
|
||||
Returns:
|
||||
dict: Updated model data or None if not found
|
||||
"""
|
||||
cache = await scanner.get_cached_data()
|
||||
for item in cache.raw_data:
|
||||
if item.get('sha256') == model_hash:
|
||||
return item
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def update_metadata_from_local_examples(model_hash, model, scanner_type, scanner, model_dir):
|
||||
"""Update model metadata with local example image information
|
||||
|
||||
Args:
|
||||
model_hash: SHA256 hash of the model
|
||||
model: Model data dictionary
|
||||
scanner_type: Scanner type ('lora' or 'checkpoint')
|
||||
scanner: Scanner instance for this model type
|
||||
model_dir: Model images directory
|
||||
|
||||
Returns:
|
||||
bool: True if metadata was successfully updated, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Collect local image paths
|
||||
local_images_paths = []
|
||||
if os.path.exists(model_dir):
|
||||
for file in os.listdir(model_dir):
|
||||
file_path = os.path.join(model_dir, file)
|
||||
if os.path.isfile(file_path):
|
||||
file_ext = os.path.splitext(file)[1].lower()
|
||||
is_supported = (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'])
|
||||
if is_supported:
|
||||
local_images_paths.append(file_path)
|
||||
|
||||
# Check if metadata update is needed (no civitai field or empty images)
|
||||
needs_update = not model.get('civitai') or not model.get('civitai', {}).get('images')
|
||||
|
||||
if needs_update and local_images_paths:
|
||||
logger.debug(f"Found {len(local_images_paths)} local example images for {model.get('model_name')}, updating metadata")
|
||||
|
||||
# Create or get civitai field
|
||||
if not model.get('civitai'):
|
||||
model['civitai'] = {}
|
||||
|
||||
# Create images array
|
||||
images = []
|
||||
|
||||
# Generate metadata for each local image/video
|
||||
for path in local_images_paths:
|
||||
# Determine if video or image
|
||||
file_ext = os.path.splitext(path)[1].lower()
|
||||
is_video = file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
# Create image metadata entry
|
||||
image_entry = {
|
||||
"url": "", # Empty URL as required
|
||||
"nsfwLevel": 0,
|
||||
"width": 720, # Default dimensions
|
||||
"height": 1280,
|
||||
"type": "video" if is_video else "image",
|
||||
"meta": None,
|
||||
"hasMeta": False,
|
||||
"hasPositivePrompt": False
|
||||
}
|
||||
|
||||
# If it's an image, try to get actual dimensions (optional enhancement)
|
||||
try:
|
||||
from PIL import Image
|
||||
if not is_video and os.path.exists(path):
|
||||
with Image.open(path) as img:
|
||||
image_entry["width"], image_entry["height"] = img.size
|
||||
except:
|
||||
# If PIL fails or is unavailable, use default dimensions
|
||||
pass
|
||||
|
||||
images.append(image_entry)
|
||||
|
||||
# Update the model's civitai.images field
|
||||
model['civitai']['images'] = images
|
||||
|
||||
# Save metadata to .metadata.json file
|
||||
file_path = model.get('file_path')
|
||||
try:
|
||||
# Create a copy of model data without 'folder' field
|
||||
model_copy = model.copy()
|
||||
model_copy.pop('folder', None)
|
||||
|
||||
# Write metadata to file
|
||||
await MetadataManager.save_metadata(file_path, model_copy)
|
||||
logger.info(f"Saved metadata for {model.get('model_name')}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save metadata for {model.get('model_name')}: {str(e)}")
|
||||
|
||||
# Save updated metadata to scanner cache
|
||||
success = await scanner.update_single_model_cache(file_path, file_path, model)
|
||||
if success:
|
||||
logger.info(f"Successfully updated metadata for {model.get('model_name')} with {len(images)} local examples")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Failed to update metadata for {model.get('model_name')}")
|
||||
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating metadata from local examples: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
async def update_metadata_after_import(model_hash, model_data, scanner, newly_imported_paths):
|
||||
"""Update model metadata after importing example images
|
||||
|
||||
Args:
|
||||
model_hash: SHA256 hash of the model
|
||||
model_data: Model data dictionary
|
||||
scanner: Scanner instance (lora or checkpoint)
|
||||
newly_imported_paths: List of paths to newly imported files
|
||||
|
||||
Returns:
|
||||
tuple: (regular_images, custom_images) - Both image arrays
|
||||
"""
|
||||
try:
|
||||
# Ensure civitai field exists in model_data
|
||||
if not model_data.get('civitai'):
|
||||
model_data['civitai'] = {}
|
||||
|
||||
# Ensure customImages array exists
|
||||
if not model_data['civitai'].get('customImages'):
|
||||
model_data['civitai']['customImages'] = []
|
||||
|
||||
# Get current customImages array
|
||||
custom_images = model_data['civitai']['customImages']
|
||||
|
||||
# Add new image entry for each imported file
|
||||
for path_tuple in newly_imported_paths:
|
||||
path, short_id = path_tuple
|
||||
|
||||
# Determine if video or image
|
||||
file_ext = os.path.splitext(path)[1].lower()
|
||||
is_video = file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
# Create image metadata entry
|
||||
image_entry = {
|
||||
"url": "", # Empty URL as requested
|
||||
"id": short_id,
|
||||
"nsfwLevel": 0,
|
||||
"width": 720, # Default dimensions
|
||||
"height": 1280,
|
||||
"type": "video" if is_video else "image",
|
||||
"meta": None,
|
||||
"hasMeta": False,
|
||||
"hasPositivePrompt": False
|
||||
}
|
||||
|
||||
# Extract and parse metadata if this is an image
|
||||
if not is_video:
|
||||
try:
|
||||
# Extract metadata from image
|
||||
extracted_metadata = ExifUtils.extract_image_metadata(path)
|
||||
|
||||
if extracted_metadata:
|
||||
# Parse the extracted metadata to get generation parameters
|
||||
parsed_meta = MetadataUpdater._parse_image_metadata(extracted_metadata)
|
||||
|
||||
if parsed_meta:
|
||||
image_entry["meta"] = parsed_meta
|
||||
image_entry["hasMeta"] = True
|
||||
image_entry["hasPositivePrompt"] = bool(parsed_meta.get("prompt", ""))
|
||||
logger.debug(f"Extracted metadata from {os.path.basename(path)}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract metadata from {os.path.basename(path)}: {e}")
|
||||
|
||||
# If it's an image, try to get actual dimensions
|
||||
try:
|
||||
from PIL import Image
|
||||
if not is_video and os.path.exists(path):
|
||||
with Image.open(path) as img:
|
||||
image_entry["width"], image_entry["height"] = img.size
|
||||
except:
|
||||
# If PIL fails or is unavailable, use default dimensions
|
||||
pass
|
||||
|
||||
# Append to existing customImages array
|
||||
custom_images.append(image_entry)
|
||||
|
||||
# Save metadata to .metadata.json file
|
||||
file_path = model_data.get('file_path')
|
||||
if file_path:
|
||||
try:
|
||||
# Create a copy of model data without 'folder' field
|
||||
model_copy = model_data.copy()
|
||||
model_copy.pop('folder', None)
|
||||
|
||||
# Write metadata to file
|
||||
await MetadataManager.save_metadata(file_path, model_copy)
|
||||
logger.info(f"Saved metadata for {model_data.get('model_name')}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save metadata: {str(e)}")
|
||||
|
||||
# Save updated metadata to scanner cache
|
||||
if file_path:
|
||||
await scanner.update_single_model_cache(file_path, file_path, model_data)
|
||||
|
||||
# Get regular images array (might be None)
|
||||
regular_images = model_data['civitai'].get('images', [])
|
||||
|
||||
# Return both image arrays
|
||||
return regular_images, custom_images
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata after import: {e}", exc_info=True)
|
||||
return [], []
|
||||
|
||||
@staticmethod
|
||||
def _parse_image_metadata(user_comment):
|
||||
"""Parse metadata from image to extract generation parameters
|
||||
|
||||
Args:
|
||||
user_comment: Metadata string extracted from image
|
||||
|
||||
Returns:
|
||||
dict: Parsed metadata with generation parameters
|
||||
"""
|
||||
if not user_comment:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Initialize metadata dictionary
|
||||
metadata = {}
|
||||
|
||||
# Split on Negative prompt if it exists
|
||||
if "Negative prompt:" in user_comment:
|
||||
parts = user_comment.split('Negative prompt:', 1)
|
||||
prompt = parts[0].strip()
|
||||
negative_and_params = parts[1] if len(parts) > 1 else ""
|
||||
else:
|
||||
# No negative prompt section
|
||||
param_start = re.search(r'Steps: \d+', user_comment)
|
||||
if param_start:
|
||||
prompt = user_comment[:param_start.start()].strip()
|
||||
negative_and_params = user_comment[param_start.start():]
|
||||
else:
|
||||
prompt = user_comment.strip()
|
||||
negative_and_params = ""
|
||||
|
||||
# Add prompt if it's in GEN_PARAM_KEYS
|
||||
if 'prompt' in GEN_PARAM_KEYS:
|
||||
metadata['prompt'] = prompt
|
||||
|
||||
# Extract negative prompt and parameters
|
||||
if negative_and_params:
|
||||
# If we split on "Negative prompt:", check for params section
|
||||
if "Negative prompt:" in user_comment:
|
||||
param_start = re.search(r'Steps: ', negative_and_params)
|
||||
if param_start:
|
||||
neg_prompt = negative_and_params[:param_start.start()].strip()
|
||||
if 'negative_prompt' in GEN_PARAM_KEYS:
|
||||
metadata['negative_prompt'] = neg_prompt
|
||||
params_section = negative_and_params[param_start.start():]
|
||||
else:
|
||||
if 'negative_prompt' in GEN_PARAM_KEYS:
|
||||
metadata['negative_prompt'] = negative_and_params.strip()
|
||||
params_section = ""
|
||||
else:
|
||||
# No negative prompt, entire section is params
|
||||
params_section = negative_and_params
|
||||
|
||||
# Extract generation parameters
|
||||
if params_section:
|
||||
# Extract basic parameters
|
||||
param_pattern = r'([A-Za-z\s]+): ([^,]+)'
|
||||
params = re.findall(param_pattern, params_section)
|
||||
|
||||
for key, value in params:
|
||||
clean_key = key.strip().lower().replace(' ', '_')
|
||||
|
||||
# Skip if not in recognized gen param keys
|
||||
if clean_key not in GEN_PARAM_KEYS:
|
||||
continue
|
||||
|
||||
# Convert numeric values
|
||||
if clean_key in ['steps', 'seed']:
|
||||
try:
|
||||
metadata[clean_key] = int(value.strip())
|
||||
except ValueError:
|
||||
metadata[clean_key] = value.strip()
|
||||
elif clean_key in ['cfg_scale']:
|
||||
try:
|
||||
metadata[clean_key] = float(value.strip())
|
||||
except ValueError:
|
||||
metadata[clean_key] = value.strip()
|
||||
else:
|
||||
metadata[clean_key] = value.strip()
|
||||
|
||||
# Extract size if available and add if a recognized key
|
||||
size_match = re.search(r'Size: (\d+)x(\d+)', params_section)
|
||||
if size_match and 'size' in GEN_PARAM_KEYS:
|
||||
width, height = size_match.groups()
|
||||
metadata['size'] = f"{width}x{height}"
|
||||
|
||||
# Return metadata if we have any entries
|
||||
return metadata if metadata else None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing image metadata: {e}", exc_info=True)
|
||||
return None
|
||||
318
py/utils/example_images_migration.py
Normal file
318
py/utils/example_images_migration.py
Normal file
@@ -0,0 +1,318 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from ..services.settings_manager import settings
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from ..utils.example_images_processor import ExampleImagesProcessor
|
||||
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENT_NAMING_VERSION = 2 # Increment this when naming conventions change
|
||||
|
||||
class ExampleImagesMigration:
|
||||
"""Handles migrations for example images naming conventions"""
|
||||
|
||||
@staticmethod
|
||||
async def check_and_run_migrations():
|
||||
"""Check if migrations are needed and run them in background"""
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path or not os.path.exists(example_images_path):
|
||||
logger.debug("No example images path configured or path doesn't exist, skipping migrations")
|
||||
return
|
||||
|
||||
# Check current version from progress file
|
||||
current_version = 0
|
||||
progress_file = os.path.join(example_images_path, '.download_progress.json')
|
||||
if os.path.exists(progress_file):
|
||||
try:
|
||||
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||
progress_data = json.load(f)
|
||||
current_version = progress_data.get('naming_version', 0)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load progress file for migration check: {e}")
|
||||
|
||||
# If current version is less than target version, start migration
|
||||
if current_version < CURRENT_NAMING_VERSION:
|
||||
logger.info(f"Starting example images naming migration from v{current_version} to v{CURRENT_NAMING_VERSION}")
|
||||
# Start migration in background task
|
||||
asyncio.create_task(
|
||||
ExampleImagesMigration.run_migrations(example_images_path, current_version, CURRENT_NAMING_VERSION)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def run_migrations(example_images_path, from_version, to_version):
|
||||
"""Run necessary migrations based on version difference"""
|
||||
try:
|
||||
# Get all model folders
|
||||
model_folders = []
|
||||
for item in os.listdir(example_images_path):
|
||||
item_path = os.path.join(example_images_path, item)
|
||||
if os.path.isdir(item_path) and len(item) == 64: # SHA256 hash is 64 chars
|
||||
model_folders.append(item_path)
|
||||
|
||||
logger.info(f"Found {len(model_folders)} model folders to check for migration")
|
||||
|
||||
# Apply migrations sequentially
|
||||
if from_version < 1 and to_version >= 1:
|
||||
await ExampleImagesMigration._migrate_to_v1(model_folders)
|
||||
|
||||
if from_version < 2 and to_version >= 2:
|
||||
await ExampleImagesMigration._migrate_to_v2(model_folders)
|
||||
|
||||
# Update version in progress file
|
||||
progress_file = os.path.join(example_images_path, '.download_progress.json')
|
||||
try:
|
||||
progress_data = {}
|
||||
if os.path.exists(progress_file):
|
||||
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||
progress_data = json.load(f)
|
||||
|
||||
progress_data['naming_version'] = to_version
|
||||
|
||||
with open(progress_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(progress_data, f, indent=2)
|
||||
|
||||
logger.info(f"Example images naming migration to v{to_version} completed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update version in progress file: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during migration: {e}", exc_info=True)
|
||||
|
||||
@staticmethod
|
||||
async def _migrate_to_v1(model_folders):
|
||||
"""Migrate from 1-based to 0-based indexing"""
|
||||
count = 0
|
||||
for folder in model_folders:
|
||||
has_one_based = False
|
||||
has_zero_based = False
|
||||
files_to_rename = []
|
||||
|
||||
# Check naming pattern in this folder
|
||||
for file in os.listdir(folder):
|
||||
if re.match(r'image_1\.\w+$', file):
|
||||
has_one_based = True
|
||||
if re.match(r'image_0\.\w+$', file):
|
||||
has_zero_based = True
|
||||
|
||||
# Only migrate folders with 1-based indexing and no 0-based
|
||||
if has_one_based and not has_zero_based:
|
||||
# Create rename mapping
|
||||
for file in os.listdir(folder):
|
||||
match = re.match(r'image_(\d+)\.(\w+)$', file)
|
||||
if match:
|
||||
index = int(match.group(1))
|
||||
ext = match.group(2)
|
||||
if index > 0: # Only rename if index is positive
|
||||
files_to_rename.append((
|
||||
file,
|
||||
f"image_{index-1}.{ext}"
|
||||
))
|
||||
|
||||
# Use temporary names to avoid conflicts
|
||||
for old_name, new_name in files_to_rename:
|
||||
old_path = os.path.join(folder, old_name)
|
||||
temp_path = os.path.join(folder, f"temp_{old_name}")
|
||||
try:
|
||||
os.rename(old_path, temp_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to rename {old_path} to {temp_path}: {e}")
|
||||
|
||||
# Rename from temporary names to final names
|
||||
for old_name, new_name in files_to_rename:
|
||||
temp_path = os.path.join(folder, f"temp_{old_name}")
|
||||
new_path = os.path.join(folder, new_name)
|
||||
try:
|
||||
os.rename(temp_path, new_path)
|
||||
logger.debug(f"Renamed {old_name} to {new_name} in {folder}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to rename {temp_path} to {new_path}: {e}")
|
||||
|
||||
count += 1
|
||||
|
||||
# Give other tasks a chance to run
|
||||
if count % 10 == 0:
|
||||
await asyncio.sleep(0)
|
||||
|
||||
logger.info(f"Migrated {count} folders from 1-based to 0-based indexing")
|
||||
|
||||
@staticmethod
|
||||
async def _migrate_to_v2(model_folders):
|
||||
"""
|
||||
Migrate to v2 naming scheme:
|
||||
- Move custom examples from images array to customImages array
|
||||
- Rename files from image_<index>.<ext> to custom_<short_id>.<ext>
|
||||
- Add id field to each custom image entry
|
||||
"""
|
||||
count = 0
|
||||
updated_models = 0
|
||||
migration_errors = 0
|
||||
|
||||
# Get scanner instances
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
# Wait until scanners are initialized
|
||||
scanners = [lora_scanner, checkpoint_scanner]
|
||||
for scanner in scanners:
|
||||
if scanner.is_initializing():
|
||||
logger.info("Waiting for scanners to complete initialization before starting migration...")
|
||||
initialized = False
|
||||
retry_count = 0
|
||||
while not initialized and retry_count < 120: # Wait up to 120 seconds
|
||||
await asyncio.sleep(1)
|
||||
initialized = not scanner.is_initializing()
|
||||
retry_count += 1
|
||||
|
||||
if not initialized:
|
||||
logger.warning("Scanner initialization timeout - proceeding with migration anyway")
|
||||
|
||||
logger.info(f"Starting migration to v2 naming scheme for {len(model_folders)} model folders")
|
||||
|
||||
for folder in model_folders:
|
||||
try:
|
||||
# Extract model hash from folder name
|
||||
model_hash = os.path.basename(folder)
|
||||
if not model_hash or len(model_hash) != 64:
|
||||
continue
|
||||
|
||||
# Find the model in scanner cache
|
||||
model_data = None
|
||||
scanner = None
|
||||
|
||||
for scan_obj in scanners:
|
||||
if scan_obj.has_hash(model_hash):
|
||||
cache = await scan_obj.get_cached_data()
|
||||
for item in cache.raw_data:
|
||||
if item.get('sha256') == model_hash:
|
||||
model_data = item
|
||||
scanner = scan_obj
|
||||
break
|
||||
if model_data:
|
||||
break
|
||||
|
||||
if not model_data or not scanner:
|
||||
logger.debug(f"Model with hash {model_hash} not found in cache, skipping migration")
|
||||
continue
|
||||
|
||||
# Clone model data to avoid modifying the cache directly
|
||||
model_metadata = model_data.copy()
|
||||
|
||||
# Check if model has civitai metadata
|
||||
if not model_metadata.get('civitai'):
|
||||
continue
|
||||
|
||||
# Get images array
|
||||
images = model_metadata.get('civitai', {}).get('images', [])
|
||||
if not images:
|
||||
continue
|
||||
|
||||
# Initialize customImages array if it doesn't exist
|
||||
if not model_metadata['civitai'].get('customImages'):
|
||||
model_metadata['civitai']['customImages'] = []
|
||||
|
||||
# Find custom examples (entries with empty url)
|
||||
custom_indices = []
|
||||
for i, image in enumerate(images):
|
||||
if image.get('url') == "":
|
||||
custom_indices.append(i)
|
||||
|
||||
if not custom_indices:
|
||||
continue
|
||||
|
||||
logger.debug(f"Found {len(custom_indices)} custom examples in {model_hash}")
|
||||
|
||||
# Process each custom example
|
||||
for index in custom_indices:
|
||||
try:
|
||||
image_entry = images[index]
|
||||
|
||||
# Determine media type based on the entry type
|
||||
media_type = 'videos' if image_entry.get('type') == 'video' else 'images'
|
||||
extensions_to_try = SUPPORTED_MEDIA_EXTENSIONS[media_type]
|
||||
|
||||
# Find the image file by trying possible extensions
|
||||
old_path = None
|
||||
old_filename = None
|
||||
found = False
|
||||
|
||||
for ext in extensions_to_try:
|
||||
test_path = os.path.join(folder, f"image_{index}{ext}")
|
||||
if os.path.exists(test_path):
|
||||
old_path = test_path
|
||||
old_filename = f"image_{index}{ext}"
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
logger.warning(f"Could not find file for index {index} in {model_hash}, skipping")
|
||||
continue
|
||||
|
||||
# Generate short ID for the custom example
|
||||
short_id = ExampleImagesProcessor.generate_short_id()
|
||||
|
||||
# Get file extension
|
||||
file_ext = os.path.splitext(old_path)[1]
|
||||
|
||||
# Create new filename
|
||||
new_filename = f"custom_{short_id}{file_ext}"
|
||||
new_path = os.path.join(folder, new_filename)
|
||||
|
||||
# Rename the file
|
||||
try:
|
||||
os.rename(old_path, new_path)
|
||||
logger.debug(f"Renamed {old_filename} to {new_filename} in {folder}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to rename {old_path} to {new_path}: {e}")
|
||||
continue
|
||||
|
||||
# Create a copy of the image entry with the id field
|
||||
custom_entry = image_entry.copy()
|
||||
custom_entry['id'] = short_id
|
||||
|
||||
# Add to customImages array
|
||||
model_metadata['civitai']['customImages'].append(custom_entry)
|
||||
|
||||
count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error migrating custom example at index {index} for {model_hash}: {e}")
|
||||
|
||||
# Remove custom examples from the original images array
|
||||
model_metadata['civitai']['images'] = [
|
||||
img for i, img in enumerate(images) if i not in custom_indices
|
||||
]
|
||||
|
||||
# Save the updated metadata
|
||||
file_path = model_data.get('file_path')
|
||||
if file_path:
|
||||
try:
|
||||
# Create a copy of model data without 'folder' field
|
||||
model_copy = model_metadata.copy()
|
||||
model_copy.pop('folder', None)
|
||||
|
||||
# Save metadata to file
|
||||
await MetadataManager.save_metadata(file_path, model_copy)
|
||||
|
||||
# Update scanner cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, model_metadata)
|
||||
|
||||
updated_models += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save metadata for {model_hash}: {e}")
|
||||
migration_errors += 1
|
||||
|
||||
# Give other tasks a chance to run
|
||||
if count % 10 == 0:
|
||||
await asyncio.sleep(0)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error migrating folder {folder}: {e}")
|
||||
migration_errors += 1
|
||||
|
||||
logger.info(f"Migration to v2 complete: migrated {count} custom examples across {updated_models} models with {migration_errors} errors")
|
||||
496
py/utils/example_images_processor.py
Normal file
496
py/utils/example_images_processor.py
Normal file
@@ -0,0 +1,496 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import random
|
||||
import string
|
||||
from aiohttp import web
|
||||
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.settings_manager import settings
|
||||
from .example_images_metadata import MetadataUpdater
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ExampleImagesProcessor:
|
||||
"""Processes and manipulates example images"""
|
||||
|
||||
@staticmethod
|
||||
def generate_short_id(length=8):
|
||||
"""Generate a short random alphanumeric identifier"""
|
||||
chars = string.ascii_lowercase + string.digits
|
||||
return ''.join(random.choice(chars) for _ in range(length))
|
||||
|
||||
@staticmethod
|
||||
def get_civitai_optimized_url(image_url):
|
||||
"""Convert Civitai image URL to its optimized WebP version"""
|
||||
base_pattern = r'(https://image\.civitai\.com/[^/]+/[^/]+)'
|
||||
match = re.match(base_pattern, image_url)
|
||||
|
||||
if match:
|
||||
base_url = match.group(1)
|
||||
return f"{base_url}/optimized=true/image.webp"
|
||||
|
||||
return image_url
|
||||
|
||||
@staticmethod
|
||||
async def download_model_images(model_hash, model_name, model_images, model_dir, optimize, independent_session):
|
||||
"""Download images for a single model
|
||||
|
||||
Returns:
|
||||
tuple: (success, is_stale_metadata) - whether download was successful, whether metadata is stale
|
||||
"""
|
||||
model_success = True
|
||||
|
||||
for i, image in enumerate(model_images):
|
||||
image_url = image.get('url')
|
||||
if not image_url:
|
||||
continue
|
||||
|
||||
# Get image filename from URL
|
||||
image_filename = os.path.basename(image_url.split('?')[0])
|
||||
image_ext = os.path.splitext(image_filename)[1].lower()
|
||||
|
||||
# Handle images and videos
|
||||
is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {image_filename}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing instead of 1-based indexing
|
||||
save_filename = f"image_{i}{image_ext}"
|
||||
|
||||
# If optimizing images and this is a Civitai image, use their pre-optimized WebP version
|
||||
if is_image and optimize and 'civitai.com' in image_url:
|
||||
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
|
||||
save_filename = f"image_{i}.webp"
|
||||
|
||||
# Check if already downloaded
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Download the file
|
||||
try:
|
||||
logger.debug(f"Downloading {save_filename} for {model_name}")
|
||||
|
||||
# Download directly using the independent session
|
||||
async with independent_session.get(image_url, timeout=60) as response:
|
||||
if response.status == 200:
|
||||
with open(save_path, 'wb') as f:
|
||||
async for chunk in response.content.iter_chunked(8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
elif response.status == 404:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True # (success, is_metadata_stale)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: {response.status}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
except Exception as e:
|
||||
error_msg = f"Error downloading file {image_url}: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
|
||||
return model_success, False # (success, is_metadata_stale)
|
||||
|
||||
@staticmethod
|
||||
async def process_local_examples(model_file_path, model_file_name, model_name, model_dir, optimize):
|
||||
"""Process local example images
|
||||
|
||||
Returns:
|
||||
bool: True if local images were processed successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
if not model_file_path or not os.path.exists(os.path.dirname(model_file_path)):
|
||||
return False
|
||||
|
||||
model_dir_path = os.path.dirname(model_file_path)
|
||||
local_images = []
|
||||
|
||||
# Look for files with pattern: filename.example.*.ext
|
||||
if model_file_name:
|
||||
example_prefix = f"{model_file_name}.example."
|
||||
|
||||
if os.path.exists(model_dir_path):
|
||||
for file in os.listdir(model_dir_path):
|
||||
file_lower = file.lower()
|
||||
if file_lower.startswith(example_prefix.lower()):
|
||||
file_ext = os.path.splitext(file_lower)[1]
|
||||
is_supported = (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'])
|
||||
|
||||
if is_supported:
|
||||
local_images.append(os.path.join(model_dir_path, file))
|
||||
|
||||
# Process local images if found
|
||||
if local_images:
|
||||
logger.info(f"Found {len(local_images)} local example images for {model_name}")
|
||||
|
||||
for local_image_path in local_images:
|
||||
# Extract index from filename
|
||||
file_name = os.path.basename(local_image_path)
|
||||
example_prefix = f"{model_file_name}.example."
|
||||
|
||||
try:
|
||||
# Extract the part between '.example.' and the file extension
|
||||
index_part = file_name[len(example_prefix):].split('.')[0]
|
||||
# Try to parse it as an integer
|
||||
index = int(index_part)
|
||||
local_ext = os.path.splitext(local_image_path)[1].lower()
|
||||
save_filename = f"image_{index}{local_ext}"
|
||||
except (ValueError, IndexError):
|
||||
# If we can't parse the index, fall back to sequential numbering
|
||||
logger.warning(f"Could not extract index from {file_name}, using sequential numbering")
|
||||
local_ext = os.path.splitext(local_image_path)[1].lower()
|
||||
save_filename = f"image_{len(local_images)}{local_ext}"
|
||||
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
|
||||
# Skip if already exists in output directory
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists in output: {save_path}")
|
||||
continue
|
||||
|
||||
# Copy the file
|
||||
with open(local_image_path, 'rb') as src_file:
|
||||
with open(save_path, 'wb') as dst_file:
|
||||
dst_file.write(src_file.read())
|
||||
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing local examples for {model_name}: {str(e)}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
async def import_images(request):
|
||||
"""
|
||||
Import local example images
|
||||
|
||||
Accepts:
|
||||
- multipart/form-data form with model_hash and files fields
|
||||
or
|
||||
- JSON request with model_hash and file_paths
|
||||
|
||||
Returns:
|
||||
- Success status and list of imported files
|
||||
"""
|
||||
try:
|
||||
model_hash = None
|
||||
files_to_import = []
|
||||
temp_files_to_cleanup = []
|
||||
|
||||
# Check if it's a multipart form-data request (direct file upload)
|
||||
if request.content_type and 'multipart/form-data' in request.content_type:
|
||||
reader = await request.multipart()
|
||||
|
||||
# First get model_hash
|
||||
field = await reader.next()
|
||||
if field.name == 'model_hash':
|
||||
model_hash = await field.text()
|
||||
|
||||
# Then process all files
|
||||
while True:
|
||||
field = await reader.next()
|
||||
if field is None:
|
||||
break
|
||||
|
||||
if field.name == 'files':
|
||||
# Create a temporary file with appropriate suffix for type detection
|
||||
file_name = field.filename
|
||||
file_ext = os.path.splitext(file_name)[1].lower()
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=file_ext, delete=False) as tmp_file:
|
||||
temp_path = tmp_file.name
|
||||
temp_files_to_cleanup.append(temp_path) # Track for cleanup
|
||||
|
||||
# Write chunks to the temporary file
|
||||
while True:
|
||||
chunk = await field.read_chunk()
|
||||
if not chunk:
|
||||
break
|
||||
tmp_file.write(chunk)
|
||||
|
||||
# Add to the list of files to process
|
||||
files_to_import.append(temp_path)
|
||||
else:
|
||||
# Parse JSON request (legacy method using file paths)
|
||||
data = await request.json()
|
||||
model_hash = data.get('model_hash')
|
||||
files_to_import = data.get('file_paths', [])
|
||||
|
||||
if not model_hash:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing model_hash parameter'
|
||||
}, status=400)
|
||||
|
||||
if not files_to_import:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No files provided to import'
|
||||
}, status=400)
|
||||
|
||||
# Get example images path
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No example images path configured'
|
||||
}, status=400)
|
||||
|
||||
# Find the model and get current metadata
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
|
||||
model_data = None
|
||||
scanner = None
|
||||
|
||||
# Check both scanners to find the model
|
||||
for scan_obj in [lora_scanner, checkpoint_scanner, embedding_scanner]:
|
||||
cache = await scan_obj.get_cached_data()
|
||||
for item in cache.raw_data:
|
||||
if item.get('sha256') == model_hash:
|
||||
model_data = item
|
||||
scanner = scan_obj
|
||||
break
|
||||
if model_data:
|
||||
break
|
||||
|
||||
if not model_data:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Model with hash {model_hash} not found in cache"
|
||||
}, status=404)
|
||||
|
||||
# Create model folder
|
||||
model_folder = os.path.join(example_images_path, model_hash)
|
||||
os.makedirs(model_folder, exist_ok=True)
|
||||
|
||||
imported_files = []
|
||||
errors = []
|
||||
newly_imported_paths = []
|
||||
|
||||
# Process each file path
|
||||
for file_path in files_to_import:
|
||||
try:
|
||||
# Ensure the file exists
|
||||
if not os.path.isfile(file_path):
|
||||
errors.append(f"File not found: {file_path}")
|
||||
continue
|
||||
|
||||
# Check if file type is supported
|
||||
file_ext = os.path.splitext(file_path)[1].lower()
|
||||
if not (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
|
||||
errors.append(f"Unsupported file type: {file_path}")
|
||||
continue
|
||||
|
||||
# Generate new filename using short ID instead of UUID
|
||||
short_id = ExampleImagesProcessor.generate_short_id()
|
||||
new_filename = f"custom_{short_id}{file_ext}"
|
||||
|
||||
dest_path = os.path.join(model_folder, new_filename)
|
||||
|
||||
# Copy the file
|
||||
import shutil
|
||||
shutil.copy2(file_path, dest_path)
|
||||
# Store both the dest_path and the short_id
|
||||
newly_imported_paths.append((dest_path, short_id))
|
||||
|
||||
# Add to imported files list
|
||||
imported_files.append({
|
||||
'name': new_filename,
|
||||
'path': f'/example_images_static/{model_hash}/{new_filename}',
|
||||
'extension': file_ext,
|
||||
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
})
|
||||
except Exception as e:
|
||||
errors.append(f"Error importing {file_path}: {str(e)}")
|
||||
|
||||
# Update metadata with new example images
|
||||
regular_images, custom_images = await MetadataUpdater.update_metadata_after_import(
|
||||
model_hash,
|
||||
model_data,
|
||||
scanner,
|
||||
newly_imported_paths
|
||||
)
|
||||
|
||||
return web.json_response({
|
||||
'success': len(imported_files) > 0,
|
||||
'message': f'Successfully imported {len(imported_files)} files' +
|
||||
(f' with {len(errors)} errors' if errors else ''),
|
||||
'files': imported_files,
|
||||
'errors': errors,
|
||||
'regular_images': regular_images,
|
||||
'custom_images': custom_images,
|
||||
"model_file_path": model_data.get('file_path', ''),
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to import example images: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
finally:
|
||||
# Clean up temporary files
|
||||
for temp_file in temp_files_to_cleanup:
|
||||
try:
|
||||
os.remove(temp_file)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to remove temporary file {temp_file}: {e}")
|
||||
|
||||
@staticmethod
|
||||
async def delete_custom_image(request):
|
||||
"""
|
||||
Delete a custom example image for a model
|
||||
|
||||
Accepts:
|
||||
- JSON request with model_hash and short_id
|
||||
|
||||
Returns:
|
||||
- Success status and updated image lists
|
||||
"""
|
||||
try:
|
||||
# Parse request data
|
||||
data = await request.json()
|
||||
model_hash = data.get('model_hash')
|
||||
short_id = data.get('short_id')
|
||||
|
||||
if not model_hash or not short_id:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing required parameters: model_hash and short_id'
|
||||
}, status=400)
|
||||
|
||||
# Get example images path
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No example images path configured'
|
||||
}, status=400)
|
||||
|
||||
# Find the model and get current metadata
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
|
||||
model_data = None
|
||||
scanner = None
|
||||
|
||||
# Check both scanners to find the model
|
||||
for scan_obj in [lora_scanner, checkpoint_scanner, embedding_scanner]:
|
||||
if scan_obj.has_hash(model_hash):
|
||||
cache = await scan_obj.get_cached_data()
|
||||
for item in cache.raw_data:
|
||||
if item.get('sha256') == model_hash:
|
||||
model_data = item
|
||||
scanner = scan_obj
|
||||
break
|
||||
if model_data:
|
||||
break
|
||||
|
||||
if not model_data:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Model with hash {model_hash} not found in cache"
|
||||
}, status=404)
|
||||
|
||||
# Check if model has custom images
|
||||
if not model_data.get('civitai', {}).get('customImages'):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Model has no custom images"
|
||||
}, status=404)
|
||||
|
||||
# Find the custom image with matching short_id
|
||||
custom_images = model_data['civitai']['customImages']
|
||||
matching_image = None
|
||||
new_custom_images = []
|
||||
|
||||
for image in custom_images:
|
||||
if image.get('id') == short_id:
|
||||
matching_image = image
|
||||
else:
|
||||
new_custom_images.append(image)
|
||||
|
||||
if not matching_image:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Custom image with id {short_id} not found"
|
||||
}, status=404)
|
||||
|
||||
# Find and delete the actual file
|
||||
model_folder = os.path.join(example_images_path, model_hash)
|
||||
file_deleted = False
|
||||
|
||||
if os.path.exists(model_folder):
|
||||
for filename in os.listdir(model_folder):
|
||||
if f"custom_{short_id}" in filename:
|
||||
file_path = os.path.join(model_folder, filename)
|
||||
try:
|
||||
os.remove(file_path)
|
||||
file_deleted = True
|
||||
logger.info(f"Deleted custom example file: {file_path}")
|
||||
break
|
||||
except Exception as e:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Failed to delete file: {str(e)}"
|
||||
}, status=500)
|
||||
|
||||
if not file_deleted:
|
||||
logger.warning(f"File for custom example with id {short_id} not found, but metadata will still be updated")
|
||||
|
||||
# Update metadata
|
||||
model_data['civitai']['customImages'] = new_custom_images
|
||||
|
||||
# Save updated metadata to file
|
||||
file_path = model_data.get('file_path')
|
||||
if file_path:
|
||||
try:
|
||||
# Create a copy of model data without 'folder' field
|
||||
model_copy = model_data.copy()
|
||||
model_copy.pop('folder', None)
|
||||
|
||||
# Write metadata to file
|
||||
await MetadataManager.save_metadata(file_path, model_copy)
|
||||
logger.debug(f"Saved updated metadata for {model_data.get('model_name')}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save metadata: {str(e)}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Failed to save metadata: {str(e)}"
|
||||
}, status=500)
|
||||
|
||||
# Update cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, model_data)
|
||||
|
||||
# Get regular images array (might be None)
|
||||
regular_images = model_data['civitai'].get('images', [])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'regular_images': regular_images,
|
||||
'custom_images': new_custom_images,
|
||||
'model_file_path': model_data.get('file_path', '')
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete custom example image: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
@@ -31,7 +31,7 @@ class ExifUtils:
|
||||
# Method 2: Check EXIF UserComment field
|
||||
if img.format not in ['JPEG', 'TIFF', 'WEBP']:
|
||||
# For non-JPEG/TIFF/WEBP images, try to get EXIF through PIL
|
||||
exif = img._getexif()
|
||||
exif = img.getexif()
|
||||
if exif and piexif.ExifIFD.UserComment in exif:
|
||||
user_comment = exif[piexif.ExifIFD.UserComment]
|
||||
if isinstance(user_comment, bytes):
|
||||
@@ -147,7 +147,7 @@ class ExifUtils:
|
||||
"file_name": lora.get("file_name", ""),
|
||||
"hash": lora.get("hash", "").lower() if lora.get("hash") else "",
|
||||
"strength": float(lora.get("strength", 1.0)),
|
||||
"modelVersionId": lora.get("modelVersionId", ""),
|
||||
"modelVersionId": lora.get("modelVersionId", 0),
|
||||
"modelName": lora.get("modelName", ""),
|
||||
"modelVersionName": lora.get("modelVersionName", ""),
|
||||
}
|
||||
@@ -203,7 +203,7 @@ class ExifUtils:
|
||||
return user_comment[:recipe_marker_index] + user_comment[next_line_index:]
|
||||
|
||||
@staticmethod
|
||||
def optimize_image(image_data, target_width=250, format='webp', quality=85, preserve_metadata=True):
|
||||
def optimize_image(image_data, target_width=250, format='webp', quality=85, preserve_metadata=False):
|
||||
"""
|
||||
Optimize an image by resizing and converting to WebP format
|
||||
|
||||
@@ -218,98 +218,144 @@ class ExifUtils:
|
||||
Tuple of (optimized_image_data, extension)
|
||||
"""
|
||||
try:
|
||||
# Extract metadata if needed
|
||||
# First validate the image data is usable
|
||||
img = None
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
# It's a file path - validate file
|
||||
try:
|
||||
with Image.open(image_data) as test_img:
|
||||
# Verify the image can be fully loaded by accessing its size
|
||||
width, height = test_img.size
|
||||
# If we got here, the image is valid
|
||||
img = Image.open(image_data)
|
||||
except (IOError, OSError) as e:
|
||||
logger.error(f"Invalid or corrupt image file: {image_data}: {e}")
|
||||
raise ValueError(f"Cannot process corrupt image: {e}")
|
||||
else:
|
||||
# It's binary data - validate data
|
||||
try:
|
||||
with BytesIO(image_data) as temp_buf:
|
||||
test_img = Image.open(temp_buf)
|
||||
# Verify the image can be fully loaded
|
||||
width, height = test_img.size
|
||||
# If successful, reopen for processing
|
||||
img = Image.open(BytesIO(image_data))
|
||||
except Exception as e:
|
||||
logger.error(f"Invalid binary image data: {e}")
|
||||
raise ValueError(f"Cannot process corrupt image data: {e}")
|
||||
|
||||
# Extract metadata if needed and valid
|
||||
metadata = None
|
||||
if preserve_metadata:
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
# It's a file path
|
||||
metadata = ExifUtils.extract_image_metadata(image_data)
|
||||
img = Image.open(image_data)
|
||||
else:
|
||||
# It's binary data
|
||||
temp_img = BytesIO(image_data)
|
||||
img = Image.open(temp_img)
|
||||
# Save to a temporary file to extract metadata
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(image_data)
|
||||
metadata = ExifUtils.extract_image_metadata(temp_path)
|
||||
os.unlink(temp_path)
|
||||
else:
|
||||
# Just open the image without extracting metadata
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
img = Image.open(image_data)
|
||||
else:
|
||||
img = Image.open(BytesIO(image_data))
|
||||
|
||||
try:
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
# For file path, extract directly
|
||||
metadata = ExifUtils.extract_image_metadata(image_data)
|
||||
else:
|
||||
# For binary data, save to temp file first
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(image_data)
|
||||
try:
|
||||
metadata = ExifUtils.extract_image_metadata(temp_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract metadata from temp file: {e}")
|
||||
finally:
|
||||
# Clean up temp file
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract metadata, continuing without it: {e}")
|
||||
# Continue without metadata
|
||||
|
||||
# Calculate new height to maintain aspect ratio
|
||||
width, height = img.size
|
||||
new_height = int(height * (target_width / width))
|
||||
|
||||
# Resize the image
|
||||
resized_img = img.resize((target_width, new_height), Image.LANCZOS)
|
||||
# Resize the image with error handling
|
||||
try:
|
||||
resized_img = img.resize((target_width, new_height), Image.LANCZOS)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to resize image: {e}")
|
||||
# Return original image if resize fails
|
||||
return image_data, '.jpg' if not isinstance(image_data, str) else os.path.splitext(image_data)[1]
|
||||
|
||||
# Save to BytesIO in the specified format
|
||||
output = BytesIO()
|
||||
|
||||
# WebP format
|
||||
# Set format and extension
|
||||
if format.lower() == 'webp':
|
||||
resized_img.save(output, format='WEBP', quality=quality)
|
||||
extension = '.webp'
|
||||
# JPEG format
|
||||
save_format, extension = 'WEBP', '.webp'
|
||||
elif format.lower() in ('jpg', 'jpeg'):
|
||||
resized_img.save(output, format='JPEG', quality=quality)
|
||||
extension = '.jpg'
|
||||
# PNG format
|
||||
save_format, extension = 'JPEG', '.jpg'
|
||||
elif format.lower() == 'png':
|
||||
resized_img.save(output, format='PNG', optimize=True)
|
||||
extension = '.png'
|
||||
save_format, extension = 'PNG', '.png'
|
||||
else:
|
||||
# Default to WebP
|
||||
resized_img.save(output, format='WEBP', quality=quality)
|
||||
extension = '.webp'
|
||||
save_format, extension = 'WEBP', '.webp'
|
||||
|
||||
# Save with error handling
|
||||
try:
|
||||
if save_format == 'PNG':
|
||||
resized_img.save(output, format=save_format, optimize=True)
|
||||
else:
|
||||
resized_img.save(output, format=save_format, quality=quality)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save optimized image: {e}")
|
||||
# Return original image if save fails
|
||||
return image_data, '.jpg' if not isinstance(image_data, str) else os.path.splitext(image_data)[1]
|
||||
|
||||
# Get the optimized image data
|
||||
optimized_data = output.getvalue()
|
||||
|
||||
# If we need to preserve metadata, write it to a temporary file
|
||||
# Handle metadata preservation if requested and available
|
||||
if preserve_metadata and metadata:
|
||||
# For WebP format, we'll directly save with metadata
|
||||
if format.lower() == 'webp':
|
||||
# Create a new BytesIO with metadata
|
||||
output_with_metadata = BytesIO()
|
||||
|
||||
# Create EXIF data with user comment
|
||||
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
|
||||
# Save with metadata
|
||||
resized_img.save(output_with_metadata, format='WEBP', exif=exif_bytes, quality=quality)
|
||||
optimized_data = output_with_metadata.getvalue()
|
||||
else:
|
||||
# For other formats, use the temporary file approach
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(optimized_data)
|
||||
|
||||
# Add the metadata back
|
||||
ExifUtils.update_image_metadata(temp_path, metadata)
|
||||
|
||||
# Read the file with metadata
|
||||
with open(temp_path, 'rb') as f:
|
||||
optimized_data = f.read()
|
||||
|
||||
# Clean up
|
||||
os.unlink(temp_path)
|
||||
try:
|
||||
if save_format == 'WEBP':
|
||||
# For WebP format, directly save with metadata
|
||||
try:
|
||||
output_with_metadata = BytesIO()
|
||||
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
resized_img.save(output_with_metadata, format='WEBP', exif=exif_bytes, quality=quality)
|
||||
optimized_data = output_with_metadata.getvalue()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to add metadata to WebP, continuing without it: {e}")
|
||||
else:
|
||||
# For other formats, use temporary file
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(optimized_data)
|
||||
|
||||
try:
|
||||
# Add metadata
|
||||
ExifUtils.update_image_metadata(temp_path, metadata)
|
||||
# Read back the file
|
||||
with open(temp_path, 'rb') as f:
|
||||
optimized_data = f.read()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to add metadata to image, continuing without it: {e}")
|
||||
finally:
|
||||
# Clean up temp file
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to preserve metadata: {e}, continuing with unmodified output")
|
||||
|
||||
return optimized_data, extension
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing image: {e}", exc_info=True)
|
||||
# Return original data if optimization fails
|
||||
# Return original data if optimization completely fails
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
with open(image_data, 'rb') as f:
|
||||
return f.read(), os.path.splitext(image_data)[1]
|
||||
try:
|
||||
with open(image_data, 'rb') as f:
|
||||
return f.read(), os.path.splitext(image_data)[1]
|
||||
except Exception:
|
||||
return image_data, '.jpg' # Last resort fallback
|
||||
return image_data, '.jpg'
|
||||
@@ -1,13 +1,9 @@
|
||||
import logging
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Dict, Optional
|
||||
|
||||
from .model_utils import determine_base_model
|
||||
|
||||
from .lora_metadata import extract_lora_metadata
|
||||
from .models import LoraMetadata
|
||||
from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
|
||||
from .exif_utils import ExifUtils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -15,166 +11,55 @@ async def calculate_sha256(file_path: str) -> str:
|
||||
"""Calculate SHA256 hash of a file"""
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(file_path, "rb") as f:
|
||||
for byte_block in iter(lambda: f.read(4096), b""):
|
||||
for byte_block in iter(lambda: f.read(128 * 1024), b""):
|
||||
sha256_hash.update(byte_block)
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
def find_preview_file(base_name: str, dir_path: str) -> str:
|
||||
"""Find preview file for given base name in directory"""
|
||||
preview_patterns = [
|
||||
f"{base_name}.preview.png",
|
||||
f"{base_name}.preview.jpg",
|
||||
f"{base_name}.preview.jpeg",
|
||||
f"{base_name}.preview.mp4",
|
||||
f"{base_name}.png",
|
||||
f"{base_name}.jpg",
|
||||
f"{base_name}.jpeg",
|
||||
f"{base_name}.mp4"
|
||||
]
|
||||
|
||||
for pattern in preview_patterns:
|
||||
full_pattern = os.path.join(dir_path, pattern)
|
||||
temp_extensions = PREVIEW_EXTENSIONS.copy()
|
||||
# Add example extension for compatibility
|
||||
# https://github.com/willmiao/ComfyUI-Lora-Manager/issues/225
|
||||
# The preview image will be optimized to lora-name.webp, so it won't affect other logic
|
||||
temp_extensions.append(".example.0.jpeg")
|
||||
for ext in temp_extensions:
|
||||
full_pattern = os.path.join(dir_path, f"{base_name}{ext}")
|
||||
if os.path.exists(full_pattern):
|
||||
# Check if this is an image and not already webp
|
||||
if ext.lower().endswith(('.jpg', '.jpeg', '.png')) and not ext.lower().endswith('.webp'):
|
||||
try:
|
||||
# Optimize the image to webp format
|
||||
webp_path = os.path.join(dir_path, f"{base_name}.webp")
|
||||
|
||||
# Use ExifUtils to optimize the image
|
||||
with open(full_pattern, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=image_data,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False
|
||||
)
|
||||
|
||||
# Save the optimized webp file
|
||||
with open(webp_path, 'wb') as f:
|
||||
f.write(optimized_data)
|
||||
|
||||
logger.debug(f"Optimized preview image from {full_pattern} to {webp_path}")
|
||||
return webp_path.replace(os.sep, "/")
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing preview image {full_pattern}: {e}")
|
||||
# Fall back to original file if optimization fails
|
||||
return full_pattern.replace(os.sep, "/")
|
||||
|
||||
# Return the original path for webp images or non-image files
|
||||
return full_pattern.replace(os.sep, "/")
|
||||
|
||||
return ""
|
||||
|
||||
def normalize_path(path: str) -> str:
|
||||
"""Normalize file path to use forward slashes"""
|
||||
return path.replace(os.sep, "/") if path else path
|
||||
|
||||
async def get_file_info(file_path: str) -> Optional[LoraMetadata]:
|
||||
"""Get basic file information as LoraMetadata object"""
|
||||
# First check if file actually exists and resolve symlinks
|
||||
try:
|
||||
real_path = os.path.realpath(file_path)
|
||||
if not os.path.exists(real_path):
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking file existence for {file_path}: {e}")
|
||||
return None
|
||||
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
dir_path = os.path.dirname(file_path)
|
||||
|
||||
preview_url = find_preview_file(base_name, dir_path)
|
||||
|
||||
# Check if a .json file exists with SHA256 hash to avoid recalculation
|
||||
json_path = f"{os.path.splitext(file_path)[0]}.json"
|
||||
sha256 = None
|
||||
if os.path.exists(json_path):
|
||||
try:
|
||||
with open(json_path, 'r', encoding='utf-8') as f:
|
||||
json_data = json.load(f)
|
||||
if 'sha256' in json_data:
|
||||
sha256 = json_data['sha256'].lower()
|
||||
logger.debug(f"Using SHA256 from .json file for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading .json file for {file_path}: {e}")
|
||||
|
||||
try:
|
||||
# If we didn't get SHA256 from the .json file, calculate it
|
||||
if not sha256:
|
||||
sha256 = await calculate_sha256(real_path)
|
||||
|
||||
metadata = LoraMetadata(
|
||||
file_name=base_name,
|
||||
model_name=base_name,
|
||||
file_path=normalize_path(file_path),
|
||||
size=os.path.getsize(real_path),
|
||||
modified=os.path.getmtime(real_path),
|
||||
sha256=sha256,
|
||||
base_model="Unknown", # Will be updated later
|
||||
usage_tips="",
|
||||
notes="",
|
||||
from_civitai=True,
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription=""
|
||||
)
|
||||
|
||||
# create metadata file
|
||||
base_model_info = await extract_lora_metadata(real_path)
|
||||
metadata.base_model = base_model_info['base_model']
|
||||
await save_metadata(file_path, metadata)
|
||||
|
||||
return metadata
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting file info for {file_path}: {e}")
|
||||
return None
|
||||
|
||||
async def save_metadata(file_path: str, metadata: LoraMetadata) -> None:
|
||||
"""Save metadata to .metadata.json file"""
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
try:
|
||||
metadata_dict = metadata.to_dict()
|
||||
metadata_dict['file_path'] = normalize_path(metadata_dict['file_path'])
|
||||
metadata_dict['preview_url'] = normalize_path(metadata_dict['preview_url'])
|
||||
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata_dict, f, indent=2, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
print(f"Error saving metadata to {metadata_path}: {str(e)}")
|
||||
|
||||
async def load_metadata(file_path: str) -> Optional[LoraMetadata]:
|
||||
"""Load metadata from .metadata.json file"""
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
try:
|
||||
if os.path.exists(metadata_path):
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
needs_update = False
|
||||
|
||||
# Check and normalize base model name
|
||||
normalized_base_model = determine_base_model(data['base_model'])
|
||||
if data['base_model'] != normalized_base_model:
|
||||
data['base_model'] = normalized_base_model
|
||||
needs_update = True
|
||||
|
||||
# Compare paths without extensions
|
||||
stored_path_base = os.path.splitext(data['file_path'])[0]
|
||||
current_path_base = os.path.splitext(normalize_path(file_path))[0]
|
||||
if stored_path_base != current_path_base:
|
||||
data['file_path'] = normalize_path(file_path)
|
||||
needs_update = True
|
||||
|
||||
preview_url = data.get('preview_url', '')
|
||||
if not preview_url or not os.path.exists(preview_url):
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
dir_path = os.path.dirname(file_path)
|
||||
new_preview_url = normalize_path(find_preview_file(base_name, dir_path))
|
||||
if new_preview_url != preview_url:
|
||||
data['preview_url'] = new_preview_url
|
||||
needs_update = True
|
||||
else:
|
||||
# Compare preview paths without extensions
|
||||
stored_preview_base = os.path.splitext(preview_url)[0]
|
||||
current_preview_base = os.path.splitext(normalize_path(preview_url))[0]
|
||||
if stored_preview_base != current_preview_base:
|
||||
data['preview_url'] = normalize_path(preview_url)
|
||||
needs_update = True
|
||||
|
||||
# Ensure all fields are present
|
||||
if 'tags' not in data:
|
||||
data['tags'] = []
|
||||
needs_update = True
|
||||
|
||||
if 'modelDescription' not in data:
|
||||
data['modelDescription'] = ""
|
||||
needs_update = True
|
||||
|
||||
if needs_update:
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return LoraMetadata.from_dict(data)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading metadata from {metadata_path}: {str(e)}")
|
||||
return None
|
||||
|
||||
async def update_civitai_metadata(file_path: str, civitai_data: Dict) -> None:
|
||||
"""Update metadata file with Civitai data"""
|
||||
metadata = await load_metadata(file_path)
|
||||
metadata['civitai'] = civitai_data
|
||||
await save_metadata(file_path, metadata)
|
||||
return path.replace(os.sep, "/") if path else path
|
||||
@@ -1,6 +1,11 @@
|
||||
from safetensors import safe_open
|
||||
from typing import Dict
|
||||
from typing import Dict, List, Tuple
|
||||
from .model_utils import determine_base_model
|
||||
import os
|
||||
import logging
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def extract_lora_metadata(file_path: str) -> Dict:
|
||||
"""Extract essential metadata from safetensors file"""
|
||||
@@ -13,4 +18,116 @@ async def extract_lora_metadata(file_path: str) -> Dict:
|
||||
return {"base_model": base_model}
|
||||
except Exception as e:
|
||||
print(f"Error reading metadata from {file_path}: {str(e)}")
|
||||
return {"base_model": "Unknown"}
|
||||
return {"base_model": "Unknown"}
|
||||
|
||||
async def extract_checkpoint_metadata(file_path: str) -> dict:
|
||||
"""Extract metadata from a checkpoint file to determine model type and base model"""
|
||||
try:
|
||||
# Analyze filename for clues about the model
|
||||
filename = os.path.basename(file_path).lower()
|
||||
|
||||
model_info = {
|
||||
'base_model': 'Unknown',
|
||||
'model_type': 'checkpoint'
|
||||
}
|
||||
|
||||
# Detect base model from filename
|
||||
if 'xl' in filename or 'sdxl' in filename:
|
||||
model_info['base_model'] = 'SDXL'
|
||||
elif 'sd3' in filename:
|
||||
model_info['base_model'] = 'SD3'
|
||||
elif 'sd2' in filename or 'v2' in filename:
|
||||
model_info['base_model'] = 'SD2.x'
|
||||
elif 'sd1' in filename or 'v1' in filename:
|
||||
model_info['base_model'] = 'SD1.5'
|
||||
|
||||
# Detect model type from filename
|
||||
if 'inpaint' in filename:
|
||||
model_info['model_type'] = 'inpainting'
|
||||
elif 'anime' in filename:
|
||||
model_info['model_type'] = 'anime'
|
||||
elif 'realistic' in filename:
|
||||
model_info['model_type'] = 'realistic'
|
||||
|
||||
# Try to peek at the safetensors file structure if available
|
||||
if file_path.endswith('.safetensors'):
|
||||
import json
|
||||
import struct
|
||||
|
||||
with open(file_path, 'rb') as f:
|
||||
header_size = struct.unpack('<Q', f.read(8))[0]
|
||||
header_json = f.read(header_size)
|
||||
header = json.loads(header_json)
|
||||
|
||||
# Look for specific keys to identify model type
|
||||
metadata = header.get('__metadata__', {})
|
||||
if metadata:
|
||||
# Try to determine if it's SDXL
|
||||
if any(key.startswith('conditioner.embedders.1') for key in header):
|
||||
model_info['base_model'] = 'SDXL'
|
||||
|
||||
# Look for model type info
|
||||
if metadata.get('modelspec.architecture') == 'SD-XL':
|
||||
model_info['base_model'] = 'SDXL'
|
||||
elif metadata.get('modelspec.architecture') == 'SD-3':
|
||||
model_info['base_model'] = 'SD3'
|
||||
|
||||
# Check for specific use case
|
||||
if metadata.get('modelspec.purpose') == 'inpainting':
|
||||
model_info['model_type'] = 'inpainting'
|
||||
|
||||
return model_info
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting checkpoint metadata for {file_path}: {e}")
|
||||
# Return default values
|
||||
return {'base_model': 'Unknown', 'model_type': 'checkpoint'}
|
||||
|
||||
async def extract_trained_words(file_path: str) -> Tuple[List[Tuple[str, int]], str]:
|
||||
"""Extract trained words from a safetensors file and sort by frequency
|
||||
|
||||
Args:
|
||||
file_path: Path to the safetensors file
|
||||
|
||||
Returns:
|
||||
Tuple of:
|
||||
- List of (word, frequency) tuples sorted by frequency (highest first)
|
||||
- class_tokens value (or None if not found)
|
||||
"""
|
||||
class_tokens = None
|
||||
|
||||
try:
|
||||
with safe_open(file_path, framework="pt", device="cpu") as f:
|
||||
metadata = f.metadata()
|
||||
|
||||
# Extract class_tokens from ss_datasets if present
|
||||
if metadata and "ss_datasets" in metadata:
|
||||
try:
|
||||
datasets_data = json.loads(metadata["ss_datasets"])
|
||||
# Look for class_tokens in the first subset
|
||||
if datasets_data and isinstance(datasets_data, list) and datasets_data[0].get("subsets"):
|
||||
subsets = datasets_data[0].get("subsets", [])
|
||||
if subsets and isinstance(subsets, list) and len(subsets) > 0:
|
||||
class_tokens = subsets[0].get("class_tokens")
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing ss_datasets for class_tokens: {str(e)}")
|
||||
|
||||
# Extract tag frequency as before
|
||||
if metadata and "ss_tag_frequency" in metadata:
|
||||
# Parse the JSON string into a dictionary
|
||||
tag_data = json.loads(metadata["ss_tag_frequency"])
|
||||
|
||||
# The structure may have an outer key (like "image_dir" or "img")
|
||||
# We need to get the inner dictionary with the actual word frequencies
|
||||
if tag_data:
|
||||
# Get the first key (usually "image_dir" or "img")
|
||||
first_key = list(tag_data.keys())[0]
|
||||
words_dict = tag_data[first_key]
|
||||
|
||||
# Sort words by frequency (highest first)
|
||||
sorted_words = sorted(words_dict.items(), key=lambda x: x[1], reverse=True)
|
||||
return sorted_words, class_tokens
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting trained words from {file_path}: {str(e)}")
|
||||
|
||||
return [], class_tokens
|
||||
313
py/utils/metadata_manager.py
Normal file
313
py/utils/metadata_manager.py
Normal file
@@ -0,0 +1,313 @@
|
||||
from datetime import datetime
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
import logging
|
||||
from typing import Dict, Optional, Type, Union
|
||||
|
||||
from .models import BaseModelMetadata, LoraMetadata
|
||||
from .file_utils import normalize_path, find_preview_file, calculate_sha256
|
||||
from .lora_metadata import extract_lora_metadata, extract_checkpoint_metadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MetadataManager:
|
||||
"""
|
||||
Centralized manager for all metadata operations.
|
||||
|
||||
This class is responsible for:
|
||||
1. Loading metadata safely with fallback mechanisms
|
||||
2. Saving metadata with atomic operations and backups
|
||||
3. Creating default metadata for models
|
||||
4. Handling unknown fields gracefully
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
async def load_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
||||
"""
|
||||
Load metadata with robust error handling and data preservation.
|
||||
|
||||
Args:
|
||||
file_path: Path to the model file
|
||||
model_class: Class to instantiate (LoraMetadata, CheckpointMetadata, etc.)
|
||||
|
||||
Returns:
|
||||
BaseModelMetadata instance or None if file doesn't exist
|
||||
"""
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
backup_path = f"{metadata_path}.bak"
|
||||
|
||||
# Try loading the main metadata file
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Create model instance
|
||||
metadata = model_class.from_dict(data)
|
||||
|
||||
# Normalize paths
|
||||
await MetadataManager._normalize_metadata_paths(metadata, file_path)
|
||||
|
||||
return metadata
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# JSON parsing error - try to restore from backup
|
||||
logger.warning(f"Invalid JSON in metadata file: {metadata_path}")
|
||||
return await MetadataManager._restore_from_backup(backup_path, file_path, model_class)
|
||||
|
||||
except Exception as e:
|
||||
# Other errors might be due to unknown fields or schema changes
|
||||
logger.error(f"Error loading metadata from {metadata_path}: {str(e)}")
|
||||
return await MetadataManager._restore_from_backup(backup_path, file_path, model_class)
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def _restore_from_backup(backup_path: str, file_path: str, model_class: Type[BaseModelMetadata]) -> Optional[BaseModelMetadata]:
|
||||
"""
|
||||
Try to restore metadata from backup file
|
||||
|
||||
Args:
|
||||
backup_path: Path to backup file
|
||||
file_path: Path to the original model file
|
||||
model_class: Class to instantiate
|
||||
|
||||
Returns:
|
||||
BaseModelMetadata instance or None if restoration fails
|
||||
"""
|
||||
if os.path.exists(backup_path):
|
||||
try:
|
||||
logger.info(f"Attempting to restore metadata from backup: {backup_path}")
|
||||
with open(backup_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Process data similarly to normal loading
|
||||
metadata = model_class.from_dict(data)
|
||||
await MetadataManager._normalize_metadata_paths(metadata, file_path)
|
||||
return metadata
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to restore from backup: {str(e)}")
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def save_metadata(path: str, metadata: Union[BaseModelMetadata, Dict], create_backup: bool = False) -> bool:
|
||||
"""
|
||||
Save metadata with atomic write operations and backup creation.
|
||||
|
||||
Args:
|
||||
path: Path to the model file or directly to the metadata file
|
||||
metadata: Metadata to save (either BaseModelMetadata object or dict)
|
||||
create_backup: Whether to create a new backup of existing file if a backup doesn't already exist
|
||||
|
||||
Returns:
|
||||
bool: Success or failure
|
||||
"""
|
||||
# Determine if the input is a metadata path or a model file path
|
||||
if path.endswith('.metadata.json'):
|
||||
metadata_path = path
|
||||
else:
|
||||
# Use existing logic for model file paths
|
||||
file_path = path
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
temp_path = f"{metadata_path}.tmp"
|
||||
backup_path = f"{metadata_path}.bak"
|
||||
|
||||
try:
|
||||
# Create backup if file exists and either:
|
||||
# 1. create_backup is True, OR
|
||||
# 2. backup file doesn't already exist
|
||||
if os.path.exists(metadata_path) and (create_backup or not os.path.exists(backup_path)):
|
||||
try:
|
||||
shutil.copy2(metadata_path, backup_path)
|
||||
logger.debug(f"Created metadata backup at: {backup_path}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to create metadata backup: {str(e)}")
|
||||
|
||||
# Convert to dict if needed
|
||||
if isinstance(metadata, BaseModelMetadata):
|
||||
metadata_dict = metadata.to_dict()
|
||||
# Preserve unknown fields if present
|
||||
if hasattr(metadata, '_unknown_fields'):
|
||||
metadata_dict.update(metadata._unknown_fields)
|
||||
else:
|
||||
metadata_dict = metadata.copy()
|
||||
|
||||
# Normalize paths
|
||||
if 'file_path' in metadata_dict:
|
||||
metadata_dict['file_path'] = normalize_path(metadata_dict['file_path'])
|
||||
if 'preview_url' in metadata_dict:
|
||||
metadata_dict['preview_url'] = normalize_path(metadata_dict['preview_url'])
|
||||
|
||||
# Write to temporary file first
|
||||
with open(temp_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata_dict, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Atomic rename operation
|
||||
os.replace(temp_path, metadata_path)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving metadata to {metadata_path}: {str(e)}")
|
||||
# Clean up temporary file if it exists
|
||||
if os.path.exists(temp_path):
|
||||
try:
|
||||
os.remove(temp_path)
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
async def create_default_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
||||
"""
|
||||
Create basic metadata structure for a model file.
|
||||
This replaces the old get_file_info function with a more appropriately named method.
|
||||
|
||||
Args:
|
||||
file_path: Path to the model file
|
||||
model_class: Class to instantiate
|
||||
|
||||
Returns:
|
||||
BaseModelMetadata instance or None if file doesn't exist
|
||||
"""
|
||||
# First check if file actually exists and resolve symlinks
|
||||
try:
|
||||
real_path = os.path.realpath(file_path)
|
||||
if not os.path.exists(real_path):
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking file existence for {file_path}: {e}")
|
||||
return None
|
||||
|
||||
try:
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
dir_path = os.path.dirname(file_path)
|
||||
|
||||
# Find preview image
|
||||
preview_url = find_preview_file(base_name, dir_path)
|
||||
|
||||
# Calculate file hash
|
||||
sha256 = await calculate_sha256(real_path)
|
||||
|
||||
# Create instance based on model type
|
||||
if model_class.__name__ == "CheckpointMetadata":
|
||||
metadata = model_class(
|
||||
file_name=base_name,
|
||||
model_name=base_name,
|
||||
file_path=normalize_path(file_path),
|
||||
size=os.path.getsize(real_path),
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=sha256,
|
||||
base_model="Unknown",
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription="",
|
||||
model_type="checkpoint",
|
||||
from_civitai=True
|
||||
)
|
||||
elif model_class.__name__ == "EmbeddingMetadata":
|
||||
metadata = model_class(
|
||||
file_name=base_name,
|
||||
model_name=base_name,
|
||||
file_path=normalize_path(file_path),
|
||||
size=os.path.getsize(real_path),
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=sha256,
|
||||
base_model="Unknown",
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription="",
|
||||
from_civitai=True
|
||||
)
|
||||
else: # Default to LoraMetadata
|
||||
metadata = model_class(
|
||||
file_name=base_name,
|
||||
model_name=base_name,
|
||||
file_path=normalize_path(file_path),
|
||||
size=os.path.getsize(real_path),
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=sha256,
|
||||
base_model="Unknown",
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription="",
|
||||
from_civitai=True,
|
||||
usage_tips="{}"
|
||||
)
|
||||
|
||||
# Try to extract model-specific metadata
|
||||
# await MetadataManager._enrich_metadata(metadata, real_path)
|
||||
|
||||
# Save the created metadata
|
||||
await MetadataManager.save_metadata(file_path, metadata, create_backup=False)
|
||||
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating default metadata for {file_path}: {e}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def _enrich_metadata(metadata: BaseModelMetadata, file_path: str) -> None:
|
||||
"""
|
||||
Enrich metadata with model-specific information
|
||||
|
||||
Args:
|
||||
metadata: Metadata to enrich
|
||||
file_path: Path to the model file
|
||||
"""
|
||||
try:
|
||||
if metadata.__class__.__name__ == "LoraMetadata":
|
||||
model_info = await extract_lora_metadata(file_path)
|
||||
metadata.base_model = model_info['base_model']
|
||||
|
||||
# elif metadata.__class__.__name__ == "CheckpointMetadata":
|
||||
# model_info = await extract_checkpoint_metadata(file_path)
|
||||
# metadata.base_model = model_info['base_model']
|
||||
# if 'model_type' in model_info:
|
||||
# metadata.model_type = model_info['model_type']
|
||||
except Exception as e:
|
||||
logger.error(f"Error enriching metadata: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
async def _normalize_metadata_paths(metadata: BaseModelMetadata, file_path: str) -> None:
|
||||
"""
|
||||
Normalize paths in metadata object
|
||||
|
||||
Args:
|
||||
metadata: Metadata object to update
|
||||
file_path: Current file path for the model
|
||||
"""
|
||||
need_update = False
|
||||
|
||||
# Check if file_name matches the actual file name
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
if metadata.file_name != base_name:
|
||||
metadata.file_name = base_name
|
||||
need_update = True
|
||||
|
||||
# Check if file path is different from what's in metadata
|
||||
if normalize_path(file_path) != metadata.file_path:
|
||||
metadata.file_path = normalize_path(file_path)
|
||||
need_update = True
|
||||
|
||||
# Check if preview exists at the current location
|
||||
preview_url = metadata.preview_url
|
||||
if preview_url:
|
||||
# Get directory parts of both paths
|
||||
file_dir = os.path.dirname(file_path)
|
||||
preview_dir = os.path.dirname(preview_url)
|
||||
|
||||
# Update preview if it doesn't exist OR if model and preview are in different directories
|
||||
if not os.path.exists(preview_url) or file_dir != preview_dir:
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
dir_path = os.path.dirname(file_path)
|
||||
new_preview_url = find_preview_file(base_name, dir_path)
|
||||
if new_preview_url:
|
||||
metadata.preview_url = normalize_path(new_preview_url)
|
||||
need_update = True
|
||||
|
||||
# If path attributes were changed, save the metadata back to disk
|
||||
if need_update:
|
||||
await MetadataManager.save_metadata(file_path, metadata, create_backup=False)
|
||||
@@ -1,27 +1,30 @@
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import Dict, Optional, List
|
||||
from dataclasses import dataclass, asdict, field
|
||||
from typing import Dict, Optional, List, Any
|
||||
from datetime import datetime
|
||||
import os
|
||||
from .model_utils import determine_base_model
|
||||
|
||||
@dataclass
|
||||
class LoraMetadata:
|
||||
"""Represents the metadata structure for a Lora model"""
|
||||
file_name: str # The filename without extension of the lora
|
||||
model_name: str # The lora's name defined by the creator, initially same as file_name
|
||||
file_path: str # Full path to the safetensors file
|
||||
class BaseModelMetadata:
|
||||
"""Base class for all model metadata structures"""
|
||||
file_name: str # The filename without extension
|
||||
model_name: str # The model's name defined by the creator
|
||||
file_path: str # Full path to the model file
|
||||
size: int # File size in bytes
|
||||
modified: float # Last modified timestamp
|
||||
modified: float # Timestamp when the model was added to the management system
|
||||
sha256: str # SHA256 hash of the file
|
||||
base_model: str # Base model (SD1.5/SD2.1/SDXL/etc.)
|
||||
base_model: str # Base model type (SD1.5/SD2.1/SDXL/etc.)
|
||||
preview_url: str # Preview image URL
|
||||
preview_nsfw_level: int = 0 # NSFW level of the preview image
|
||||
usage_tips: str = "{}" # Usage tips for the model, json string
|
||||
notes: str = "" # Additional notes
|
||||
from_civitai: bool = True # Whether the lora is from Civitai
|
||||
from_civitai: bool = True # Whether from Civitai
|
||||
civitai: Optional[Dict] = None # Civitai API data if available
|
||||
tags: List[str] = None # Model tags
|
||||
modelDescription: str = "" # Full model description
|
||||
civitai_deleted: bool = False # Whether deleted from Civitai
|
||||
favorite: bool = False # Whether the model is a favorite
|
||||
exclude: bool = False # Whether to exclude this model from the cache
|
||||
_unknown_fields: Dict[str, Any] = field(default_factory=dict, repr=False, compare=False) # Store unknown fields
|
||||
|
||||
def __post_init__(self):
|
||||
# Initialize empty lists to avoid mutable default parameter issue
|
||||
@@ -29,40 +32,46 @@ class LoraMetadata:
|
||||
self.tags = []
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict) -> 'LoraMetadata':
|
||||
"""Create LoraMetadata instance from dictionary"""
|
||||
# Create a copy of the data to avoid modifying the input
|
||||
def from_dict(cls, data: Dict) -> 'BaseModelMetadata':
|
||||
"""Create instance from dictionary"""
|
||||
data_copy = data.copy()
|
||||
return cls(**data_copy)
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'LoraMetadata':
|
||||
"""Create LoraMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_nsfw_level=0, # Will be updated after preview download, it is decided by the nsfw level of the preview image
|
||||
from_civitai=True,
|
||||
civitai=version_info
|
||||
)
|
||||
# Use cached fields if available, otherwise compute them
|
||||
if not hasattr(cls, '_known_fields_cache'):
|
||||
known_fields = set()
|
||||
for c in cls.mro():
|
||||
if hasattr(c, '__annotations__'):
|
||||
known_fields.update(c.__annotations__.keys())
|
||||
cls._known_fields_cache = known_fields
|
||||
|
||||
known_fields = cls._known_fields_cache
|
||||
|
||||
# Extract fields that match our class attributes
|
||||
fields_to_use = {k: v for k, v in data_copy.items() if k in known_fields}
|
||||
|
||||
# Store unknown fields separately
|
||||
unknown_fields = {k: v for k, v in data_copy.items() if k not in known_fields and not k.startswith('_')}
|
||||
|
||||
# Create instance with known fields
|
||||
instance = cls(**fields_to_use)
|
||||
|
||||
# Add unknown fields as a separate attribute
|
||||
instance._unknown_fields = unknown_fields
|
||||
|
||||
return instance
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
return asdict(self)
|
||||
|
||||
@property
|
||||
def modified_datetime(self) -> datetime:
|
||||
"""Convert modified timestamp to datetime object"""
|
||||
return datetime.fromtimestamp(self.modified)
|
||||
result = asdict(self)
|
||||
|
||||
# Remove private fields
|
||||
result = {k: v for k, v in result.items() if not k.startswith('_')}
|
||||
|
||||
# Add back unknown fields if they exist
|
||||
if hasattr(self, '_unknown_fields'):
|
||||
result.update(self._unknown_fields)
|
||||
|
||||
return result
|
||||
|
||||
def update_civitai_info(self, civitai_data: Dict) -> None:
|
||||
"""Update Civitai information"""
|
||||
@@ -75,3 +84,115 @@ class LoraMetadata:
|
||||
self.modified = os.path.getmtime(file_path)
|
||||
self.file_path = file_path.replace(os.sep, '/')
|
||||
|
||||
@dataclass
|
||||
class LoraMetadata(BaseModelMetadata):
|
||||
"""Represents the metadata structure for a Lora model"""
|
||||
usage_tips: str = "{}" # Usage tips for the model, json string
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'LoraMetadata':
|
||||
"""Create LoraMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_nsfw_level=0, # Will be updated after preview download
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
tags=tags,
|
||||
modelDescription=description
|
||||
)
|
||||
|
||||
@dataclass
|
||||
class CheckpointMetadata(BaseModelMetadata):
|
||||
"""Represents the metadata structure for a Checkpoint model"""
|
||||
model_type: str = "checkpoint" # Model type (checkpoint, diffusion_model, etc.)
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'CheckpointMetadata':
|
||||
"""Create CheckpointMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
model_type = version_info.get('type', 'checkpoint')
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_nsfw_level=0,
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
model_type=model_type,
|
||||
tags=tags,
|
||||
modelDescription=description
|
||||
)
|
||||
|
||||
@dataclass
|
||||
class EmbeddingMetadata(BaseModelMetadata):
|
||||
"""Represents the metadata structure for an Embedding model"""
|
||||
model_type: str = "embedding" # Model type (embedding, textual_inversion, etc.)
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'EmbeddingMetadata':
|
||||
"""Create EmbeddingMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
model_type = version_info.get('type', 'embedding')
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_nsfw_level=0,
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
model_type=model_type,
|
||||
tags=tags,
|
||||
modelDescription=description
|
||||
)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1100
py/utils/routes_common.py
Normal file
1100
py/utils/routes_common.py
Normal file
File diff suppressed because it is too large
Load Diff
376
py/utils/usage_stats.py
Normal file
376
py/utils/usage_stats.py
Normal file
@@ -0,0 +1,376 @@
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
import asyncio
|
||||
import logging
|
||||
import datetime
|
||||
import shutil
|
||||
from typing import Dict, Set
|
||||
|
||||
from ..config import config
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
|
||||
# Check if running in standalone mode
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
|
||||
if not standalone_mode:
|
||||
from ..metadata_collector.metadata_registry import MetadataRegistry
|
||||
from ..metadata_collector.constants import MODELS, LORAS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class UsageStats:
|
||||
"""Track usage statistics for models and save to JSON"""
|
||||
|
||||
_instance = None
|
||||
_lock = asyncio.Lock() # For thread safety
|
||||
|
||||
# Default stats file name
|
||||
STATS_FILENAME = "lora_manager_stats.json"
|
||||
BACKUP_SUFFIX = ".backup"
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
# Initialize stats storage
|
||||
self.stats = {
|
||||
"checkpoints": {}, # sha256 -> { total: count, history: { date: count } }
|
||||
"loras": {}, # sha256 -> { total: count, history: { date: count } }
|
||||
"total_executions": 0,
|
||||
"last_save_time": 0
|
||||
}
|
||||
|
||||
# Queue for prompt_ids to process
|
||||
self.pending_prompt_ids = set()
|
||||
|
||||
# Load existing stats if available
|
||||
self._stats_file_path = self._get_stats_file_path()
|
||||
self._load_stats()
|
||||
|
||||
# Save interval in seconds
|
||||
self.save_interval = 90 # 1.5 minutes
|
||||
|
||||
# Start background task to process queued prompt_ids
|
||||
self._bg_task = asyncio.create_task(self._background_processor())
|
||||
|
||||
self._initialized = True
|
||||
logger.info("Usage statistics tracker initialized")
|
||||
|
||||
def _get_stats_file_path(self) -> str:
|
||||
"""Get the path to the stats JSON file"""
|
||||
if not config.loras_roots or len(config.loras_roots) == 0:
|
||||
# Fallback to temporary directory if no lora roots
|
||||
return os.path.join(config.temp_directory, self.STATS_FILENAME)
|
||||
|
||||
# Use the first lora root
|
||||
return os.path.join(config.loras_roots[0], self.STATS_FILENAME)
|
||||
|
||||
def _backup_old_stats(self):
|
||||
"""Backup the old stats file before conversion"""
|
||||
if os.path.exists(self._stats_file_path):
|
||||
backup_path = f"{self._stats_file_path}{self.BACKUP_SUFFIX}"
|
||||
try:
|
||||
shutil.copy2(self._stats_file_path, backup_path)
|
||||
logger.info(f"Backed up old stats file to {backup_path}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to backup stats file: {e}")
|
||||
return False
|
||||
|
||||
def _convert_old_format(self, old_stats):
|
||||
"""Convert old stats format to new format with history"""
|
||||
new_stats = {
|
||||
"checkpoints": {},
|
||||
"loras": {},
|
||||
"total_executions": old_stats.get("total_executions", 0),
|
||||
"last_save_time": old_stats.get("last_save_time", time.time())
|
||||
}
|
||||
|
||||
# Get today's date in YYYY-MM-DD format
|
||||
today = datetime.datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
# Convert checkpoint stats
|
||||
if "checkpoints" in old_stats and isinstance(old_stats["checkpoints"], dict):
|
||||
for hash_id, count in old_stats["checkpoints"].items():
|
||||
new_stats["checkpoints"][hash_id] = {
|
||||
"total": count,
|
||||
"history": {
|
||||
today: count
|
||||
}
|
||||
}
|
||||
|
||||
# Convert lora stats
|
||||
if "loras" in old_stats and isinstance(old_stats["loras"], dict):
|
||||
for hash_id, count in old_stats["loras"].items():
|
||||
new_stats["loras"][hash_id] = {
|
||||
"total": count,
|
||||
"history": {
|
||||
today: count
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("Successfully converted stats from old format to new format with history")
|
||||
return new_stats
|
||||
|
||||
def _is_old_format(self, stats):
|
||||
"""Check if the stats are in the old format (direct count values)"""
|
||||
# Check if any lora or checkpoint entry is a direct number instead of an object
|
||||
if "loras" in stats and isinstance(stats["loras"], dict):
|
||||
for hash_id, data in stats["loras"].items():
|
||||
if isinstance(data, (int, float)):
|
||||
return True
|
||||
|
||||
if "checkpoints" in stats and isinstance(stats["checkpoints"], dict):
|
||||
for hash_id, data in stats["checkpoints"].items():
|
||||
if isinstance(data, (int, float)):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _load_stats(self):
|
||||
"""Load existing statistics from file"""
|
||||
try:
|
||||
if os.path.exists(self._stats_file_path):
|
||||
with open(self._stats_file_path, 'r', encoding='utf-8') as f:
|
||||
loaded_stats = json.load(f)
|
||||
|
||||
# Check if old format and needs conversion
|
||||
if self._is_old_format(loaded_stats):
|
||||
logger.info("Detected old stats format, performing conversion")
|
||||
self._backup_old_stats()
|
||||
self.stats = self._convert_old_format(loaded_stats)
|
||||
else:
|
||||
# Update our stats with loaded data (already in new format)
|
||||
if isinstance(loaded_stats, dict):
|
||||
# Update individual sections to maintain structure
|
||||
if "checkpoints" in loaded_stats and isinstance(loaded_stats["checkpoints"], dict):
|
||||
self.stats["checkpoints"] = loaded_stats["checkpoints"]
|
||||
|
||||
if "loras" in loaded_stats and isinstance(loaded_stats["loras"], dict):
|
||||
self.stats["loras"] = loaded_stats["loras"]
|
||||
|
||||
if "total_executions" in loaded_stats:
|
||||
self.stats["total_executions"] = loaded_stats["total_executions"]
|
||||
|
||||
if "last_save_time" in loaded_stats:
|
||||
self.stats["last_save_time"] = loaded_stats["last_save_time"]
|
||||
|
||||
logger.info(f"Loaded usage statistics from {self._stats_file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading usage statistics: {e}")
|
||||
|
||||
async def save_stats(self, force=False):
|
||||
"""Save statistics to file"""
|
||||
try:
|
||||
# Only save if it's been at least save_interval since last save or force is True
|
||||
current_time = time.time()
|
||||
if not force and (current_time - self.stats.get("last_save_time", 0)) < self.save_interval:
|
||||
return False
|
||||
|
||||
# Use a lock to prevent concurrent writes
|
||||
async with self._lock:
|
||||
# Update last save time
|
||||
self.stats["last_save_time"] = current_time
|
||||
|
||||
# Create directory if it doesn't exist
|
||||
os.makedirs(os.path.dirname(self._stats_file_path), exist_ok=True)
|
||||
|
||||
# Write to a temporary file first, then move it to avoid corruption
|
||||
temp_path = f"{self._stats_file_path}.tmp"
|
||||
with open(temp_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.stats, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Replace the old file with the new one
|
||||
os.replace(temp_path, self._stats_file_path)
|
||||
|
||||
logger.debug(f"Saved usage statistics to {self._stats_file_path}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving usage statistics: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def register_execution(self, prompt_id):
|
||||
"""Register a completed execution by prompt_id for later processing"""
|
||||
if prompt_id:
|
||||
self.pending_prompt_ids.add(prompt_id)
|
||||
|
||||
async def _background_processor(self):
|
||||
"""Background task to process queued prompt_ids"""
|
||||
try:
|
||||
while True:
|
||||
# Wait a short interval before checking for new prompt_ids
|
||||
await asyncio.sleep(5) # Check every 5 seconds
|
||||
|
||||
# Process any pending prompt_ids
|
||||
if self.pending_prompt_ids:
|
||||
async with self._lock:
|
||||
# Get a copy of the set and clear original
|
||||
prompt_ids = self.pending_prompt_ids.copy()
|
||||
self.pending_prompt_ids.clear()
|
||||
|
||||
# Process each prompt_id
|
||||
registry = MetadataRegistry()
|
||||
for prompt_id in prompt_ids:
|
||||
try:
|
||||
metadata = registry.get_metadata(prompt_id)
|
||||
await self._process_metadata(metadata)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing prompt_id {prompt_id}: {e}")
|
||||
|
||||
# Periodically save stats
|
||||
await self.save_stats()
|
||||
except asyncio.CancelledError:
|
||||
# Task was cancelled, clean up
|
||||
await self.save_stats(force=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in background processing task: {e}", exc_info=True)
|
||||
# Restart the task after a delay if it fails
|
||||
asyncio.create_task(self._restart_background_task())
|
||||
|
||||
async def _restart_background_task(self):
|
||||
"""Restart the background task after a delay"""
|
||||
await asyncio.sleep(30) # Wait 30 seconds before restarting
|
||||
self._bg_task = asyncio.create_task(self._background_processor())
|
||||
|
||||
async def _process_metadata(self, metadata):
|
||||
"""Process metadata from an execution"""
|
||||
if not metadata or not isinstance(metadata, dict):
|
||||
return
|
||||
|
||||
# Increment total executions count
|
||||
self.stats["total_executions"] += 1
|
||||
|
||||
# Get today's date in YYYY-MM-DD format
|
||||
today = datetime.datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
# Process checkpoints
|
||||
if MODELS in metadata and isinstance(metadata[MODELS], dict):
|
||||
await self._process_checkpoints(metadata[MODELS], today)
|
||||
|
||||
# Process loras
|
||||
if LORAS in metadata and isinstance(metadata[LORAS], dict):
|
||||
await self._process_loras(metadata[LORAS], today)
|
||||
|
||||
async def _process_checkpoints(self, models_data, today_date):
|
||||
"""Process checkpoint models from metadata"""
|
||||
try:
|
||||
# Get checkpoint scanner service
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
if not checkpoint_scanner:
|
||||
logger.warning("Checkpoint scanner not available for usage tracking")
|
||||
return
|
||||
|
||||
for node_id, model_info in models_data.items():
|
||||
if not isinstance(model_info, dict):
|
||||
continue
|
||||
|
||||
# Check if this is a checkpoint model
|
||||
model_type = model_info.get("type")
|
||||
if model_type == "checkpoint":
|
||||
model_name = model_info.get("name")
|
||||
if not model_name:
|
||||
continue
|
||||
|
||||
# Clean up filename (remove extension if present)
|
||||
model_filename = os.path.splitext(os.path.basename(model_name))[0]
|
||||
|
||||
# Get hash for this checkpoint
|
||||
model_hash = checkpoint_scanner.get_hash_by_filename(model_filename)
|
||||
if model_hash:
|
||||
# Update stats for this checkpoint with date tracking
|
||||
if model_hash not in self.stats["checkpoints"]:
|
||||
self.stats["checkpoints"][model_hash] = {
|
||||
"total": 0,
|
||||
"history": {}
|
||||
}
|
||||
|
||||
# Increment total count
|
||||
self.stats["checkpoints"][model_hash]["total"] += 1
|
||||
|
||||
# Increment today's count
|
||||
if today_date not in self.stats["checkpoints"][model_hash]["history"]:
|
||||
self.stats["checkpoints"][model_hash]["history"][today_date] = 0
|
||||
self.stats["checkpoints"][model_hash]["history"][today_date] += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing checkpoint usage: {e}", exc_info=True)
|
||||
|
||||
async def _process_loras(self, loras_data, today_date):
|
||||
"""Process LoRA models from metadata"""
|
||||
try:
|
||||
# Get LoRA scanner service
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
if not lora_scanner:
|
||||
logger.warning("LoRA scanner not available for usage tracking")
|
||||
return
|
||||
|
||||
for node_id, lora_info in loras_data.items():
|
||||
if not isinstance(lora_info, dict):
|
||||
continue
|
||||
|
||||
# Get the list of LoRAs from standardized format
|
||||
lora_list = lora_info.get("lora_list", [])
|
||||
for lora in lora_list:
|
||||
if not isinstance(lora, dict):
|
||||
continue
|
||||
|
||||
lora_name = lora.get("name")
|
||||
if not lora_name:
|
||||
continue
|
||||
|
||||
# Get hash for this LoRA
|
||||
lora_hash = lora_scanner.get_hash_by_filename(lora_name)
|
||||
if lora_hash:
|
||||
# Update stats for this LoRA with date tracking
|
||||
if lora_hash not in self.stats["loras"]:
|
||||
self.stats["loras"][lora_hash] = {
|
||||
"total": 0,
|
||||
"history": {}
|
||||
}
|
||||
|
||||
# Increment total count
|
||||
self.stats["loras"][lora_hash]["total"] += 1
|
||||
|
||||
# Increment today's count
|
||||
if today_date not in self.stats["loras"][lora_hash]["history"]:
|
||||
self.stats["loras"][lora_hash]["history"][today_date] = 0
|
||||
self.stats["loras"][lora_hash]["history"][today_date] += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing LoRA usage: {e}", exc_info=True)
|
||||
|
||||
async def get_stats(self):
|
||||
"""Get current usage statistics"""
|
||||
return self.stats
|
||||
|
||||
async def get_model_usage_count(self, model_type, sha256):
|
||||
"""Get usage count for a specific model by hash"""
|
||||
if model_type == "checkpoint":
|
||||
if sha256 in self.stats["checkpoints"]:
|
||||
return self.stats["checkpoints"][sha256]["total"]
|
||||
elif model_type == "lora":
|
||||
if sha256 in self.stats["loras"]:
|
||||
return self.stats["loras"][sha256]["total"]
|
||||
return 0
|
||||
|
||||
async def process_execution(self, prompt_id):
|
||||
"""Process a prompt execution immediately (synchronous approach)"""
|
||||
if not prompt_id:
|
||||
return
|
||||
|
||||
try:
|
||||
# Process metadata for this prompt_id
|
||||
registry = MetadataRegistry()
|
||||
metadata = registry.get_metadata(prompt_id)
|
||||
if metadata:
|
||||
await self._process_metadata(metadata)
|
||||
# Save stats if needed
|
||||
await self.save_stats()
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing prompt_id {prompt_id}: {e}", exc_info=True)
|
||||
@@ -1,85 +1,53 @@
|
||||
from difflib import SequenceMatcher
|
||||
import requests
|
||||
import tempfile
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
import os
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..config import config
|
||||
import asyncio
|
||||
|
||||
def download_twitter_image(url):
|
||||
"""Download image from a URL containing twitter:image meta tag
|
||||
def get_lora_info(lora_name):
|
||||
"""Get the lora path and trigger words from cache"""
|
||||
async def _get_lora_info_async():
|
||||
scanner = await ServiceRegistry.get_lora_scanner()
|
||||
cache = await scanner.get_cached_data()
|
||||
|
||||
for item in cache.raw_data:
|
||||
if item.get('file_name') == lora_name:
|
||||
file_path = item.get('file_path')
|
||||
if file_path:
|
||||
for root in config.loras_roots:
|
||||
root = root.replace(os.sep, '/')
|
||||
if file_path.startswith(root):
|
||||
relative_path = os.path.relpath(file_path, root).replace(os.sep, '/')
|
||||
# Get trigger words from civitai metadata
|
||||
civitai = item.get('civitai', {})
|
||||
trigger_words = civitai.get('trainedWords', []) if civitai else []
|
||||
return relative_path, trigger_words
|
||||
return lora_name, []
|
||||
|
||||
Args:
|
||||
url (str): The URL to download image from
|
||||
|
||||
Returns:
|
||||
str: Path to downloaded temporary image file
|
||||
"""
|
||||
try:
|
||||
# Download page content
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
# Check if we're already in an event loop
|
||||
loop = asyncio.get_running_loop()
|
||||
# If we're in a running loop, we need to use a different approach
|
||||
# Create a new thread to run the async code
|
||||
import concurrent.futures
|
||||
|
||||
# Parse HTML
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
def run_in_thread():
|
||||
new_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(new_loop)
|
||||
try:
|
||||
return new_loop.run_until_complete(_get_lora_info_async())
|
||||
finally:
|
||||
new_loop.close()
|
||||
|
||||
# Find twitter:image meta tag
|
||||
meta_tag = soup.find('meta', attrs={'property': 'twitter:image'})
|
||||
if not meta_tag:
|
||||
return None
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(run_in_thread)
|
||||
return future.result()
|
||||
|
||||
image_url = meta_tag['content']
|
||||
|
||||
# Download image
|
||||
image_response = requests.get(image_url)
|
||||
image_response.raise_for_status()
|
||||
|
||||
# Save to temp file
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
|
||||
temp_file.write(image_response.content)
|
||||
return temp_file.name
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error downloading twitter image: {e}")
|
||||
return None
|
||||
except RuntimeError:
|
||||
# No event loop is running, we can use asyncio.run()
|
||||
return asyncio.run(_get_lora_info_async())
|
||||
|
||||
def download_civitai_image(url):
|
||||
"""Download image from a URL containing avatar image with specific class and style attributes
|
||||
|
||||
Args:
|
||||
url (str): The URL to download image from
|
||||
|
||||
Returns:
|
||||
str: Path to downloaded temporary image file
|
||||
"""
|
||||
try:
|
||||
# Download page content
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse HTML
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
|
||||
# Find image with specific class and style attributes
|
||||
image = soup.select_one('img.EdgeImage_image__iH4_q.max-h-full.w-auto.max-w-full')
|
||||
|
||||
if not image or 'src' not in image.attrs:
|
||||
return None
|
||||
|
||||
image_url = image['src']
|
||||
|
||||
# Download image
|
||||
image_response = requests.get(image_url)
|
||||
image_response.raise_for_status()
|
||||
|
||||
# Save to temp file
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
|
||||
temp_file.write(image_response.content)
|
||||
return temp_file.name
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error downloading civitai avatar: {e}")
|
||||
return None
|
||||
|
||||
def fuzzy_match(text: str, pattern: str, threshold: float = 0.7) -> bool:
|
||||
def fuzzy_match(text: str, pattern: str, threshold: float = 0.85) -> bool:
|
||||
"""
|
||||
Check if text matches pattern using fuzzy matching.
|
||||
Returns True if similarity ratio is above threshold.
|
||||
@@ -114,3 +82,49 @@ def fuzzy_match(text: str, pattern: str, threshold: float = 0.7) -> bool:
|
||||
|
||||
# All words found either as substrings or fuzzy matches
|
||||
return True
|
||||
|
||||
def calculate_recipe_fingerprint(loras):
|
||||
"""
|
||||
Calculate a unique fingerprint for a recipe based on its LoRAs.
|
||||
|
||||
The fingerprint is created by sorting LoRA hashes, filtering invalid entries,
|
||||
normalizing strength values to 2 decimal places, and joining in format:
|
||||
hash1:strength1|hash2:strength2|...
|
||||
|
||||
Args:
|
||||
loras (list): List of LoRA dictionaries with hash and strength values
|
||||
|
||||
Returns:
|
||||
str: The calculated fingerprint
|
||||
"""
|
||||
if not loras:
|
||||
return ""
|
||||
|
||||
# Filter valid entries and extract hash and strength
|
||||
valid_loras = []
|
||||
for lora in loras:
|
||||
# Skip excluded loras
|
||||
if lora.get("exclude", False):
|
||||
continue
|
||||
|
||||
# Get the hash - use modelVersionId as fallback if hash is empty
|
||||
hash_value = lora.get("hash", "").lower()
|
||||
if not hash_value and lora.get("isDeleted", False) and lora.get("modelVersionId"):
|
||||
hash_value = str(lora.get("modelVersionId"))
|
||||
|
||||
# Skip entries without a valid hash
|
||||
if not hash_value:
|
||||
continue
|
||||
|
||||
# Normalize strength to 2 decimal places (check both strength and weight fields)
|
||||
strength = round(float(lora.get("strength", lora.get("weight", 1.0))), 2)
|
||||
|
||||
valid_loras.append((hash_value, strength))
|
||||
|
||||
# Sort by hash
|
||||
valid_loras.sort()
|
||||
|
||||
# Join in format hash1:strength1|hash2:strength2|...
|
||||
fingerprint = "|".join([f"{hash_value}:{strength}" for hash_value, strength in valid_loras])
|
||||
|
||||
return fingerprint
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
"""
|
||||
ComfyUI workflow parsing module to extract generation parameters
|
||||
"""
|
||||
@@ -1,58 +0,0 @@
|
||||
"""
|
||||
Command-line interface for the ComfyUI workflow parser
|
||||
"""
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
import sys
|
||||
from .parser import parse_workflow
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[logging.StreamHandler()]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def main():
|
||||
"""Entry point for the CLI"""
|
||||
parser = argparse.ArgumentParser(description='Parse ComfyUI workflow files')
|
||||
parser.add_argument('input', help='Input workflow JSON file path')
|
||||
parser.add_argument('-o', '--output', help='Output JSON file path')
|
||||
parser.add_argument('-p', '--pretty', action='store_true', help='Pretty print JSON output')
|
||||
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Set logging level
|
||||
if args.debug:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
# Validate input file
|
||||
if not os.path.isfile(args.input):
|
||||
logger.error(f"Input file not found: {args.input}")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse workflow
|
||||
try:
|
||||
result = parse_workflow(args.input, args.output)
|
||||
|
||||
# Print result to console if output file not specified
|
||||
if not args.output:
|
||||
if args.pretty:
|
||||
print(json.dumps(result, indent=4))
|
||||
else:
|
||||
print(json.dumps(result))
|
||||
else:
|
||||
logger.info(f"Output saved to: {args.output}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing workflow: {e}")
|
||||
if args.debug:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,3 +0,0 @@
|
||||
"""
|
||||
Extension directory for custom node mappers
|
||||
"""
|
||||
@@ -1,285 +0,0 @@
|
||||
"""
|
||||
ComfyUI Core nodes mappers extension for workflow parsing
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, Any, List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =============================================================================
|
||||
# Transform Functions
|
||||
# =============================================================================
|
||||
|
||||
def transform_random_noise(inputs: Dict) -> Dict:
|
||||
"""Transform function for RandomNoise node"""
|
||||
return {"seed": str(inputs.get("noise_seed", ""))}
|
||||
|
||||
def transform_ksampler_select(inputs: Dict) -> Dict:
|
||||
"""Transform function for KSamplerSelect node"""
|
||||
return {"sampler": inputs.get("sampler_name", "")}
|
||||
|
||||
def transform_basic_scheduler(inputs: Dict) -> Dict:
|
||||
"""Transform function for BasicScheduler node"""
|
||||
result = {
|
||||
"scheduler": inputs.get("scheduler", ""),
|
||||
"denoise": str(inputs.get("denoise", "1.0"))
|
||||
}
|
||||
|
||||
# Get steps from inputs or steps input
|
||||
if "steps" in inputs:
|
||||
if isinstance(inputs["steps"], str):
|
||||
result["steps"] = inputs["steps"]
|
||||
elif isinstance(inputs["steps"], dict) and "value" in inputs["steps"]:
|
||||
result["steps"] = str(inputs["steps"]["value"])
|
||||
else:
|
||||
result["steps"] = str(inputs["steps"])
|
||||
|
||||
return result
|
||||
|
||||
def transform_basic_guider(inputs: Dict) -> Dict:
|
||||
"""Transform function for BasicGuider node"""
|
||||
result = {}
|
||||
|
||||
# Process conditioning
|
||||
if "conditioning" in inputs:
|
||||
if isinstance(inputs["conditioning"], str):
|
||||
result["prompt"] = inputs["conditioning"]
|
||||
elif isinstance(inputs["conditioning"], dict):
|
||||
result["conditioning"] = inputs["conditioning"]
|
||||
|
||||
# Get model information if needed
|
||||
if "model" in inputs and isinstance(inputs["model"], dict):
|
||||
result["model"] = inputs["model"]
|
||||
|
||||
return result
|
||||
|
||||
def transform_model_sampling_flux(inputs: Dict) -> Dict:
|
||||
"""Transform function for ModelSamplingFlux - mostly a pass-through node"""
|
||||
# This node is primarily used for routing, so we mostly pass through values
|
||||
|
||||
return inputs["model"]
|
||||
|
||||
def transform_sampler_custom_advanced(inputs: Dict) -> Dict:
|
||||
"""Transform function for SamplerCustomAdvanced node"""
|
||||
result = {}
|
||||
|
||||
# Extract seed from noise
|
||||
if "noise" in inputs and isinstance(inputs["noise"], dict):
|
||||
result["seed"] = str(inputs["noise"].get("seed", ""))
|
||||
|
||||
# Extract sampler info
|
||||
if "sampler" in inputs and isinstance(inputs["sampler"], dict):
|
||||
sampler = inputs["sampler"].get("sampler", "")
|
||||
if sampler:
|
||||
result["sampler"] = sampler
|
||||
|
||||
# Extract scheduler, steps, denoise from sigmas
|
||||
if "sigmas" in inputs and isinstance(inputs["sigmas"], dict):
|
||||
sigmas = inputs["sigmas"]
|
||||
result["scheduler"] = sigmas.get("scheduler", "")
|
||||
result["steps"] = str(sigmas.get("steps", ""))
|
||||
result["denoise"] = str(sigmas.get("denoise", "1.0"))
|
||||
|
||||
# Extract prompt and guidance from guider
|
||||
if "guider" in inputs and isinstance(inputs["guider"], dict):
|
||||
guider = inputs["guider"]
|
||||
|
||||
# Get prompt from conditioning
|
||||
if "conditioning" in guider and isinstance(guider["conditioning"], str):
|
||||
result["prompt"] = guider["conditioning"]
|
||||
elif "conditioning" in guider and isinstance(guider["conditioning"], dict):
|
||||
result["guidance"] = guider["conditioning"].get("guidance", "")
|
||||
result["prompt"] = guider["conditioning"].get("prompt", "")
|
||||
|
||||
if "model" in guider and isinstance(guider["model"], dict):
|
||||
result["checkpoint"] = guider["model"].get("checkpoint", "")
|
||||
result["loras"] = guider["model"].get("loras", "")
|
||||
result["clip_skip"] = str(int(guider["model"].get("clip_skip", "-1")) * -1)
|
||||
|
||||
# Extract dimensions from latent_image
|
||||
if "latent_image" in inputs and isinstance(inputs["latent_image"], dict):
|
||||
latent = inputs["latent_image"]
|
||||
width = latent.get("width", 0)
|
||||
height = latent.get("height", 0)
|
||||
if width and height:
|
||||
result["width"] = width
|
||||
result["height"] = height
|
||||
result["size"] = f"{width}x{height}"
|
||||
|
||||
return result
|
||||
|
||||
def transform_ksampler(inputs: Dict) -> Dict:
|
||||
"""Transform function for KSampler nodes"""
|
||||
result = {
|
||||
"seed": str(inputs.get("seed", "")),
|
||||
"steps": str(inputs.get("steps", "")),
|
||||
"cfg": str(inputs.get("cfg", "")),
|
||||
"sampler": inputs.get("sampler_name", ""),
|
||||
"scheduler": inputs.get("scheduler", ""),
|
||||
}
|
||||
|
||||
# Process positive prompt
|
||||
if "positive" in inputs:
|
||||
result["prompt"] = inputs["positive"]
|
||||
|
||||
# Process negative prompt
|
||||
if "negative" in inputs:
|
||||
result["negative_prompt"] = inputs["negative"]
|
||||
|
||||
# Get dimensions from latent image
|
||||
if "latent_image" in inputs and isinstance(inputs["latent_image"], dict):
|
||||
width = inputs["latent_image"].get("width", 0)
|
||||
height = inputs["latent_image"].get("height", 0)
|
||||
if width and height:
|
||||
result["size"] = f"{width}x{height}"
|
||||
|
||||
# Add clip_skip if present
|
||||
if "clip_skip" in inputs:
|
||||
result["clip_skip"] = str(inputs.get("clip_skip", ""))
|
||||
|
||||
# Add guidance if present
|
||||
if "guidance" in inputs:
|
||||
result["guidance"] = str(inputs.get("guidance", ""))
|
||||
|
||||
# Add model if present
|
||||
if "model" in inputs:
|
||||
result["checkpoint"] = inputs.get("model", {}).get("checkpoint", "")
|
||||
result["loras"] = inputs.get("model", {}).get("loras", "")
|
||||
result["clip_skip"] = str(inputs.get("model", {}).get("clip_skip", -1) * -1)
|
||||
|
||||
return result
|
||||
|
||||
def transform_empty_latent(inputs: Dict) -> Dict:
|
||||
"""Transform function for EmptyLatentImage nodes"""
|
||||
width = inputs.get("width", 0)
|
||||
height = inputs.get("height", 0)
|
||||
return {"width": width, "height": height, "size": f"{width}x{height}"}
|
||||
|
||||
def transform_clip_text(inputs: Dict) -> Any:
|
||||
"""Transform function for CLIPTextEncode nodes"""
|
||||
return inputs.get("text", "")
|
||||
|
||||
def transform_flux_guidance(inputs: Dict) -> Dict:
|
||||
"""Transform function for FluxGuidance nodes"""
|
||||
result = {}
|
||||
|
||||
if "guidance" in inputs:
|
||||
result["guidance"] = inputs["guidance"]
|
||||
|
||||
if "conditioning" in inputs:
|
||||
conditioning = inputs["conditioning"]
|
||||
if isinstance(conditioning, str):
|
||||
result["prompt"] = conditioning
|
||||
else:
|
||||
result["prompt"] = "Unknown prompt"
|
||||
|
||||
return result
|
||||
|
||||
def transform_unet_loader(inputs: Dict) -> Dict:
|
||||
"""Transform function for UNETLoader node"""
|
||||
unet_name = inputs.get("unet_name", "")
|
||||
return {"checkpoint": unet_name} if unet_name else {}
|
||||
|
||||
def transform_checkpoint_loader(inputs: Dict) -> Dict:
|
||||
"""Transform function for CheckpointLoaderSimple node"""
|
||||
ckpt_name = inputs.get("ckpt_name", "")
|
||||
return {"checkpoint": ckpt_name} if ckpt_name else {}
|
||||
|
||||
def transform_latent_upscale_by(inputs: Dict) -> Dict:
|
||||
"""Transform function for LatentUpscaleBy node"""
|
||||
result = {}
|
||||
|
||||
width = inputs["samples"].get("width", 0) * inputs["scale_by"]
|
||||
height = inputs["samples"].get("height", 0) * inputs["scale_by"]
|
||||
result["width"] = width
|
||||
result["height"] = height
|
||||
result["size"] = f"{width}x{height}"
|
||||
|
||||
return result
|
||||
|
||||
def transform_clip_set_last_layer(inputs: Dict) -> Dict:
|
||||
"""Transform function for CLIPSetLastLayer node"""
|
||||
result = {}
|
||||
|
||||
if "stop_at_clip_layer" in inputs:
|
||||
result["clip_skip"] = inputs["stop_at_clip_layer"]
|
||||
|
||||
return result
|
||||
|
||||
# =============================================================================
|
||||
# Node Mapper Definitions
|
||||
# =============================================================================
|
||||
|
||||
# Define the mappers for ComfyUI core nodes not in main mapper
|
||||
NODE_MAPPERS_EXT = {
|
||||
# KSamplers
|
||||
"SamplerCustomAdvanced": {
|
||||
"inputs_to_track": ["noise", "guider", "sampler", "sigmas", "latent_image"],
|
||||
"transform_func": transform_sampler_custom_advanced
|
||||
},
|
||||
"KSampler": {
|
||||
"inputs_to_track": [
|
||||
"seed", "steps", "cfg", "sampler_name", "scheduler",
|
||||
"denoise", "positive", "negative", "latent_image",
|
||||
"model", "clip_skip"
|
||||
],
|
||||
"transform_func": transform_ksampler
|
||||
},
|
||||
# ComfyUI core nodes
|
||||
"EmptyLatentImage": {
|
||||
"inputs_to_track": ["width", "height", "batch_size"],
|
||||
"transform_func": transform_empty_latent
|
||||
},
|
||||
"EmptySD3LatentImage": {
|
||||
"inputs_to_track": ["width", "height", "batch_size"],
|
||||
"transform_func": transform_empty_latent
|
||||
},
|
||||
"CLIPTextEncode": {
|
||||
"inputs_to_track": ["text", "clip"],
|
||||
"transform_func": transform_clip_text
|
||||
},
|
||||
"FluxGuidance": {
|
||||
"inputs_to_track": ["guidance", "conditioning"],
|
||||
"transform_func": transform_flux_guidance
|
||||
},
|
||||
"RandomNoise": {
|
||||
"inputs_to_track": ["noise_seed"],
|
||||
"transform_func": transform_random_noise
|
||||
},
|
||||
"KSamplerSelect": {
|
||||
"inputs_to_track": ["sampler_name"],
|
||||
"transform_func": transform_ksampler_select
|
||||
},
|
||||
"BasicScheduler": {
|
||||
"inputs_to_track": ["scheduler", "steps", "denoise", "model"],
|
||||
"transform_func": transform_basic_scheduler
|
||||
},
|
||||
"BasicGuider": {
|
||||
"inputs_to_track": ["model", "conditioning"],
|
||||
"transform_func": transform_basic_guider
|
||||
},
|
||||
"ModelSamplingFlux": {
|
||||
"inputs_to_track": ["max_shift", "base_shift", "width", "height", "model"],
|
||||
"transform_func": transform_model_sampling_flux
|
||||
},
|
||||
"UNETLoader": {
|
||||
"inputs_to_track": ["unet_name"],
|
||||
"transform_func": transform_unet_loader
|
||||
},
|
||||
"CheckpointLoaderSimple": {
|
||||
"inputs_to_track": ["ckpt_name"],
|
||||
"transform_func": transform_checkpoint_loader
|
||||
},
|
||||
"LatentUpscale": {
|
||||
"inputs_to_track": ["width", "height"],
|
||||
"transform_func": transform_empty_latent
|
||||
},
|
||||
"LatentUpscaleBy": {
|
||||
"inputs_to_track": ["samples", "scale_by"],
|
||||
"transform_func": transform_latent_upscale_by
|
||||
},
|
||||
"CLIPSetLastLayer": {
|
||||
"inputs_to_track": ["clip", "stop_at_clip_layer"],
|
||||
"transform_func": transform_clip_set_last_layer
|
||||
}
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
"""
|
||||
KJNodes mappers extension for ComfyUI workflow parsing
|
||||
"""
|
||||
import logging
|
||||
import re
|
||||
from typing import Dict, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# =============================================================================
|
||||
# Transform Functions
|
||||
# =============================================================================
|
||||
|
||||
def transform_join_strings(inputs: Dict) -> str:
|
||||
"""Transform function for JoinStrings nodes"""
|
||||
string1 = inputs.get("string1", "")
|
||||
string2 = inputs.get("string2", "")
|
||||
delimiter = inputs.get("delimiter", "")
|
||||
return f"{string1}{delimiter}{string2}"
|
||||
|
||||
def transform_string_constant(inputs: Dict) -> str:
|
||||
"""Transform function for StringConstant nodes"""
|
||||
return inputs.get("string", "")
|
||||
|
||||
def transform_empty_latent_presets(inputs: Dict) -> Dict:
|
||||
"""Transform function for EmptyLatentImagePresets nodes"""
|
||||
dimensions = inputs.get("dimensions", "")
|
||||
invert = inputs.get("invert", False)
|
||||
|
||||
# Extract width and height from dimensions string
|
||||
# Expected format: "width x height (ratio)" or similar
|
||||
width = 0
|
||||
height = 0
|
||||
|
||||
if dimensions:
|
||||
# Try to extract dimensions using regex
|
||||
match = re.search(r'(\d+)\s*x\s*(\d+)', dimensions)
|
||||
if match:
|
||||
width = int(match.group(1))
|
||||
height = int(match.group(2))
|
||||
|
||||
# If invert is True, swap width and height
|
||||
if invert and width and height:
|
||||
width, height = height, width
|
||||
|
||||
return {"width": width, "height": height, "size": f"{width}x{height}"}
|
||||
|
||||
def transform_int_constant(inputs: Dict) -> int:
|
||||
"""Transform function for INTConstant nodes"""
|
||||
return inputs.get("value", 0)
|
||||
|
||||
# =============================================================================
|
||||
# Node Mapper Definitions
|
||||
# =============================================================================
|
||||
|
||||
# Define the mappers for KJNodes
|
||||
NODE_MAPPERS_EXT = {
|
||||
"JoinStrings": {
|
||||
"inputs_to_track": ["string1", "string2", "delimiter"],
|
||||
"transform_func": transform_join_strings
|
||||
},
|
||||
"StringConstantMultiline": {
|
||||
"inputs_to_track": ["string"],
|
||||
"transform_func": transform_string_constant
|
||||
},
|
||||
"EmptyLatentImagePresets": {
|
||||
"inputs_to_track": ["dimensions", "invert", "batch_size"],
|
||||
"transform_func": transform_empty_latent_presets
|
||||
},
|
||||
"INTConstant": {
|
||||
"inputs_to_track": ["value"],
|
||||
"transform_func": transform_int_constant
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
"""
|
||||
Main entry point for the workflow parser module
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
# Add the parent directory to sys.path to enable imports
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..'))
|
||||
sys.path.insert(0, os.path.dirname(SCRIPT_DIR))
|
||||
|
||||
from .parser import parse_workflow
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def parse_comfyui_workflow(
|
||||
workflow_path: str,
|
||||
output_path: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Parse a ComfyUI workflow file and extract generation parameters
|
||||
|
||||
Args:
|
||||
workflow_path: Path to the workflow JSON file
|
||||
output_path: Optional path to save the output JSON
|
||||
|
||||
Returns:
|
||||
Dictionary containing extracted parameters
|
||||
"""
|
||||
return parse_workflow(workflow_path, output_path)
|
||||
|
||||
if __name__ == "__main__":
|
||||
# If run directly, use the CLI
|
||||
from .cli import main
|
||||
main()
|
||||
@@ -1,282 +0,0 @@
|
||||
"""
|
||||
Node mappers for ComfyUI workflow parsing
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import importlib.util
|
||||
import inspect
|
||||
from typing import Dict, List, Any, Optional, Union, Type, Callable, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Global mapper registry
|
||||
_MAPPER_REGISTRY: Dict[str, Dict] = {}
|
||||
|
||||
# =============================================================================
|
||||
# Mapper Definition Functions
|
||||
# =============================================================================
|
||||
|
||||
def create_mapper(
|
||||
node_type: str,
|
||||
inputs_to_track: List[str],
|
||||
transform_func: Callable[[Dict], Any] = None
|
||||
) -> Dict:
|
||||
"""Create a mapper definition for a node type"""
|
||||
mapper = {
|
||||
"node_type": node_type,
|
||||
"inputs_to_track": inputs_to_track,
|
||||
"transform": transform_func or (lambda inputs: inputs)
|
||||
}
|
||||
return mapper
|
||||
|
||||
def register_mapper(mapper: Dict) -> None:
|
||||
"""Register a node mapper in the global registry"""
|
||||
_MAPPER_REGISTRY[mapper["node_type"]] = mapper
|
||||
logger.debug(f"Registered mapper for node type: {mapper['node_type']}")
|
||||
|
||||
def get_mapper(node_type: str) -> Optional[Dict]:
|
||||
"""Get a mapper for the specified node type"""
|
||||
return _MAPPER_REGISTRY.get(node_type)
|
||||
|
||||
def get_all_mappers() -> Dict[str, Dict]:
|
||||
"""Get all registered mappers"""
|
||||
return _MAPPER_REGISTRY.copy()
|
||||
|
||||
# =============================================================================
|
||||
# Node Processing Function
|
||||
# =============================================================================
|
||||
|
||||
def process_node(node_id: str, node_data: Dict, workflow: Dict, parser: 'WorkflowParser') -> Any: # type: ignore
|
||||
"""Process a node using its mapper and extract relevant information"""
|
||||
node_type = node_data.get("class_type")
|
||||
mapper = get_mapper(node_type)
|
||||
|
||||
if not mapper:
|
||||
logger.warning(f"No mapper found for node type: {node_type}")
|
||||
return None
|
||||
|
||||
result = {}
|
||||
|
||||
# Extract inputs based on the mapper's tracked inputs
|
||||
for input_name in mapper["inputs_to_track"]:
|
||||
if input_name in node_data.get("inputs", {}):
|
||||
input_value = node_data["inputs"][input_name]
|
||||
|
||||
# Check if input is a reference to another node's output
|
||||
if isinstance(input_value, list) and len(input_value) == 2:
|
||||
try:
|
||||
# Format is [node_id, output_slot]
|
||||
ref_node_id, output_slot = input_value
|
||||
# Convert node_id to string if it's an integer
|
||||
if isinstance(ref_node_id, int):
|
||||
ref_node_id = str(ref_node_id)
|
||||
|
||||
# Recursively process the referenced node
|
||||
ref_value = parser.process_node(ref_node_id, workflow)
|
||||
|
||||
if ref_value is not None:
|
||||
result[input_name] = ref_value
|
||||
else:
|
||||
# If we couldn't get a value from the reference, store the raw value
|
||||
result[input_name] = input_value
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing reference in node {node_id}, input {input_name}: {e}")
|
||||
result[input_name] = input_value
|
||||
else:
|
||||
# Direct value
|
||||
result[input_name] = input_value
|
||||
|
||||
# Apply the transform function
|
||||
try:
|
||||
return mapper["transform"](result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in transform function for node {node_id} of type {node_type}: {e}")
|
||||
return result
|
||||
|
||||
# =============================================================================
|
||||
# Transform Functions
|
||||
# =============================================================================
|
||||
|
||||
|
||||
|
||||
def transform_lora_loader(inputs: Dict) -> Dict:
|
||||
"""Transform function for LoraLoader nodes"""
|
||||
loras_data = inputs.get("loras", [])
|
||||
lora_stack = inputs.get("lora_stack", {}).get("lora_stack", [])
|
||||
|
||||
lora_texts = []
|
||||
|
||||
# Process loras array
|
||||
if isinstance(loras_data, dict) and "__value__" in loras_data:
|
||||
loras_list = loras_data["__value__"]
|
||||
elif isinstance(loras_data, list):
|
||||
loras_list = loras_data
|
||||
else:
|
||||
loras_list = []
|
||||
|
||||
# Process each active lora entry
|
||||
for lora in loras_list:
|
||||
if isinstance(lora, dict) and lora.get("active", False):
|
||||
lora_name = lora.get("name", "")
|
||||
strength = lora.get("strength", 1.0)
|
||||
lora_texts.append(f"<lora:{lora_name}:{strength}>")
|
||||
|
||||
# Process lora_stack if valid
|
||||
if lora_stack and isinstance(lora_stack, list):
|
||||
if not (len(lora_stack) == 2 and isinstance(lora_stack[0], (str, int)) and isinstance(lora_stack[1], int)):
|
||||
for stack_entry in lora_stack:
|
||||
lora_name = stack_entry[0]
|
||||
strength = stack_entry[1]
|
||||
lora_texts.append(f"<lora:{lora_name}:{strength}>")
|
||||
|
||||
result = {
|
||||
"checkpoint": inputs.get("model", {}).get("checkpoint", ""),
|
||||
"loras": " ".join(lora_texts)
|
||||
}
|
||||
|
||||
if "clip" in inputs:
|
||||
result["clip_skip"] = inputs["clip"].get("clip_skip", "-1")
|
||||
|
||||
return result
|
||||
|
||||
def transform_lora_stacker(inputs: Dict) -> Dict:
|
||||
"""Transform function for LoraStacker nodes"""
|
||||
loras_data = inputs.get("loras", [])
|
||||
result_stack = []
|
||||
|
||||
# Handle existing stack entries
|
||||
existing_stack = []
|
||||
lora_stack_input = inputs.get("lora_stack", [])
|
||||
|
||||
if isinstance(lora_stack_input, dict) and "lora_stack" in lora_stack_input:
|
||||
existing_stack = lora_stack_input["lora_stack"]
|
||||
elif isinstance(lora_stack_input, list):
|
||||
if not (len(lora_stack_input) == 2 and isinstance(lora_stack_input[0], (str, int)) and
|
||||
isinstance(lora_stack_input[1], int)):
|
||||
existing_stack = lora_stack_input
|
||||
|
||||
# Add existing entries
|
||||
if existing_stack:
|
||||
result_stack.extend(existing_stack)
|
||||
|
||||
# Process new loras
|
||||
if isinstance(loras_data, dict) and "__value__" in loras_data:
|
||||
loras_list = loras_data["__value__"]
|
||||
elif isinstance(loras_data, list):
|
||||
loras_list = loras_data
|
||||
else:
|
||||
loras_list = []
|
||||
|
||||
for lora in loras_list:
|
||||
if isinstance(lora, dict) and lora.get("active", False):
|
||||
lora_name = lora.get("name", "")
|
||||
strength = float(lora.get("strength", 1.0))
|
||||
result_stack.append((lora_name, strength))
|
||||
|
||||
return {"lora_stack": result_stack}
|
||||
|
||||
def transform_trigger_word_toggle(inputs: Dict) -> str:
|
||||
"""Transform function for TriggerWordToggle nodes"""
|
||||
toggle_data = inputs.get("toggle_trigger_words", [])
|
||||
|
||||
if isinstance(toggle_data, dict) and "__value__" in toggle_data:
|
||||
toggle_words = toggle_data["__value__"]
|
||||
elif isinstance(toggle_data, list):
|
||||
toggle_words = toggle_data
|
||||
else:
|
||||
toggle_words = []
|
||||
|
||||
# Filter active trigger words
|
||||
active_words = []
|
||||
for item in toggle_words:
|
||||
if isinstance(item, dict) and item.get("active", False):
|
||||
word = item.get("text", "")
|
||||
if word and not word.startswith("__dummy"):
|
||||
active_words.append(word)
|
||||
|
||||
return ", ".join(active_words)
|
||||
|
||||
# =============================================================================
|
||||
# Node Mapper Definitions
|
||||
# =============================================================================
|
||||
|
||||
# Central definition of all supported node types and their configurations
|
||||
NODE_MAPPERS = {
|
||||
|
||||
# LoraManager nodes
|
||||
"Lora Loader (LoraManager)": {
|
||||
"inputs_to_track": ["model", "clip", "loras", "lora_stack"],
|
||||
"transform_func": transform_lora_loader
|
||||
},
|
||||
"Lora Stacker (LoraManager)": {
|
||||
"inputs_to_track": ["loras", "lora_stack"],
|
||||
"transform_func": transform_lora_stacker
|
||||
},
|
||||
"TriggerWord Toggle (LoraManager)": {
|
||||
"inputs_to_track": ["toggle_trigger_words"],
|
||||
"transform_func": transform_trigger_word_toggle
|
||||
}
|
||||
}
|
||||
|
||||
def register_all_mappers() -> None:
|
||||
"""Register all mappers from the NODE_MAPPERS dictionary"""
|
||||
for node_type, config in NODE_MAPPERS.items():
|
||||
mapper = create_mapper(
|
||||
node_type=node_type,
|
||||
inputs_to_track=config["inputs_to_track"],
|
||||
transform_func=config["transform_func"]
|
||||
)
|
||||
register_mapper(mapper)
|
||||
logger.info(f"Registered {len(NODE_MAPPERS)} node mappers")
|
||||
|
||||
# =============================================================================
|
||||
# Extension Loading
|
||||
# =============================================================================
|
||||
|
||||
def load_extensions(ext_dir: str = None) -> None:
|
||||
"""
|
||||
Load mapper extensions from the specified directory
|
||||
|
||||
Extension files should define a NODE_MAPPERS_EXT dictionary containing mapper configurations.
|
||||
These will be added to the global NODE_MAPPERS dictionary and registered automatically.
|
||||
"""
|
||||
# Use default path if none provided
|
||||
if ext_dir is None:
|
||||
# Get the directory of this file
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
ext_dir = os.path.join(current_dir, 'ext')
|
||||
|
||||
# Ensure the extension directory exists
|
||||
if not os.path.exists(ext_dir):
|
||||
os.makedirs(ext_dir, exist_ok=True)
|
||||
logger.info(f"Created extension directory: {ext_dir}")
|
||||
return
|
||||
|
||||
# Load each Python file in the extension directory
|
||||
for filename in os.listdir(ext_dir):
|
||||
if filename.endswith('.py') and not filename.startswith('_'):
|
||||
module_path = os.path.join(ext_dir, filename)
|
||||
module_name = f"workflow.ext.{filename[:-3]}" # Remove .py
|
||||
|
||||
try:
|
||||
# Load the module
|
||||
spec = importlib.util.spec_from_file_location(module_name, module_path)
|
||||
if spec and spec.loader:
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
# Check if the module defines NODE_MAPPERS_EXT
|
||||
if hasattr(module, 'NODE_MAPPERS_EXT'):
|
||||
# Add the extension mappers to the global NODE_MAPPERS dictionary
|
||||
NODE_MAPPERS.update(module.NODE_MAPPERS_EXT)
|
||||
logger.info(f"Added {len(module.NODE_MAPPERS_EXT)} mappers from extension: {filename}")
|
||||
else:
|
||||
logger.warning(f"Extension {filename} does not define NODE_MAPPERS_EXT dictionary")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading extension {filename}: {e}")
|
||||
|
||||
# Re-register all mappers after loading extensions
|
||||
register_all_mappers()
|
||||
|
||||
# Initialize the registry with default mappers
|
||||
# register_default_mappers()
|
||||
@@ -1,181 +0,0 @@
|
||||
"""
|
||||
Main workflow parser implementation for ComfyUI
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional, Union, Set
|
||||
from .mappers import get_mapper, get_all_mappers, load_extensions, process_node
|
||||
from .utils import (
|
||||
load_workflow, save_output, find_node_by_type,
|
||||
trace_model_path
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class WorkflowParser:
|
||||
"""Parser for ComfyUI workflows"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the parser with mappers"""
|
||||
self.processed_nodes: Set[str] = set() # Track processed nodes to avoid cycles
|
||||
self.node_results_cache: Dict[str, Any] = {} # Cache for processed node results
|
||||
|
||||
# Load extensions
|
||||
load_extensions()
|
||||
|
||||
def process_node(self, node_id: str, workflow: Dict) -> Any:
|
||||
"""Process a single node and extract relevant information"""
|
||||
# Return cached result if available
|
||||
if node_id in self.node_results_cache:
|
||||
return self.node_results_cache[node_id]
|
||||
|
||||
# Check if we're in a cycle
|
||||
if node_id in self.processed_nodes:
|
||||
return None
|
||||
|
||||
# Mark this node as being processed (to detect cycles)
|
||||
self.processed_nodes.add(node_id)
|
||||
|
||||
if node_id not in workflow:
|
||||
self.processed_nodes.remove(node_id)
|
||||
return None
|
||||
|
||||
node_data = workflow[node_id]
|
||||
node_type = node_data.get("class_type")
|
||||
|
||||
result = None
|
||||
if get_mapper(node_type):
|
||||
try:
|
||||
result = process_node(node_id, node_data, workflow, self)
|
||||
# Cache the result
|
||||
self.node_results_cache[node_id] = result
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing node {node_id} of type {node_type}: {e}", exc_info=True)
|
||||
# Return a partial result or None depending on how we want to handle errors
|
||||
result = {}
|
||||
|
||||
# Remove node from processed set to allow it to be processed again in a different context
|
||||
self.processed_nodes.remove(node_id)
|
||||
return result
|
||||
|
||||
def find_primary_sampler_node(self, workflow: Dict) -> Optional[str]:
|
||||
"""
|
||||
Find the primary sampler node in the workflow.
|
||||
|
||||
Priority:
|
||||
1. First try to find a SamplerCustomAdvanced node
|
||||
2. If not found, look for KSampler nodes with denoise=1.0
|
||||
3. If still not found, use the first KSampler node
|
||||
|
||||
Args:
|
||||
workflow: The workflow data as a dictionary
|
||||
|
||||
Returns:
|
||||
The node ID of the primary sampler node, or None if not found
|
||||
"""
|
||||
# First check for SamplerCustomAdvanced nodes
|
||||
sampler_advanced_nodes = []
|
||||
ksampler_nodes = []
|
||||
|
||||
# Scan workflow for sampler nodes
|
||||
for node_id, node_data in workflow.items():
|
||||
node_type = node_data.get("class_type")
|
||||
|
||||
if node_type == "SamplerCustomAdvanced":
|
||||
sampler_advanced_nodes.append(node_id)
|
||||
elif node_type == "KSampler":
|
||||
ksampler_nodes.append(node_id)
|
||||
|
||||
# If we found SamplerCustomAdvanced nodes, return the first one
|
||||
if sampler_advanced_nodes:
|
||||
logger.debug(f"Found SamplerCustomAdvanced node: {sampler_advanced_nodes[0]}")
|
||||
return sampler_advanced_nodes[0]
|
||||
|
||||
# If we have KSampler nodes, look for one with denoise=1.0
|
||||
if ksampler_nodes:
|
||||
for node_id in ksampler_nodes:
|
||||
node_data = workflow[node_id]
|
||||
inputs = node_data.get("inputs", {})
|
||||
denoise = inputs.get("denoise", 0)
|
||||
|
||||
# Check if denoise is 1.0 (allowing for small floating point differences)
|
||||
if abs(float(denoise) - 1.0) < 0.001:
|
||||
logger.debug(f"Found KSampler node with denoise=1.0: {node_id}")
|
||||
return node_id
|
||||
|
||||
# If no KSampler with denoise=1.0 found, use the first one
|
||||
logger.debug(f"No KSampler with denoise=1.0 found, using first KSampler: {ksampler_nodes[0]}")
|
||||
return ksampler_nodes[0]
|
||||
|
||||
# No sampler nodes found
|
||||
logger.warning("No sampler nodes found in workflow")
|
||||
return None
|
||||
|
||||
def parse_workflow(self, workflow_data: Union[str, Dict], output_path: Optional[str] = None) -> Dict:
|
||||
"""
|
||||
Parse the workflow and extract generation parameters
|
||||
|
||||
Args:
|
||||
workflow_data: The workflow data as a dictionary or a file path
|
||||
output_path: Optional path to save the output JSON
|
||||
|
||||
Returns:
|
||||
Dictionary containing extracted parameters
|
||||
"""
|
||||
# Load workflow from file if needed
|
||||
if isinstance(workflow_data, str):
|
||||
workflow = load_workflow(workflow_data)
|
||||
else:
|
||||
workflow = workflow_data
|
||||
|
||||
# Reset the processed nodes tracker and cache
|
||||
self.processed_nodes = set()
|
||||
self.node_results_cache = {}
|
||||
|
||||
# Find the primary sampler node
|
||||
sampler_node_id = self.find_primary_sampler_node(workflow)
|
||||
if not sampler_node_id:
|
||||
logger.warning("No suitable sampler node found in workflow")
|
||||
return {}
|
||||
|
||||
# Process sampler node to extract parameters
|
||||
sampler_result = self.process_node(sampler_node_id, workflow)
|
||||
if not sampler_result:
|
||||
return {}
|
||||
|
||||
# Return the sampler result directly - it's already in the format we need
|
||||
# This simplifies the structure and makes it easier to use in recipe_routes.py
|
||||
|
||||
# Handle standard ComfyUI names vs our output format
|
||||
if "cfg" in sampler_result:
|
||||
sampler_result["cfg_scale"] = sampler_result.pop("cfg")
|
||||
|
||||
# Add clip_skip = 1 to match reference output if not already present
|
||||
if "clip_skip" not in sampler_result:
|
||||
sampler_result["clip_skip"] = "1"
|
||||
|
||||
# Ensure the prompt is a string and not a nested dictionary
|
||||
if "prompt" in sampler_result and isinstance(sampler_result["prompt"], dict):
|
||||
if "prompt" in sampler_result["prompt"]:
|
||||
sampler_result["prompt"] = sampler_result["prompt"]["prompt"]
|
||||
|
||||
# Save the result if requested
|
||||
if output_path:
|
||||
save_output(sampler_result, output_path)
|
||||
|
||||
return sampler_result
|
||||
|
||||
|
||||
def parse_workflow(workflow_path: str, output_path: Optional[str] = None) -> Dict:
|
||||
"""
|
||||
Parse a ComfyUI workflow file and extract generation parameters
|
||||
|
||||
Args:
|
||||
workflow_path: Path to the workflow JSON file
|
||||
output_path: Optional path to save the output JSON
|
||||
|
||||
Returns:
|
||||
Dictionary containing extracted parameters
|
||||
"""
|
||||
parser = WorkflowParser()
|
||||
return parser.parse_workflow(workflow_path, output_path)
|
||||
@@ -1,63 +0,0 @@
|
||||
"""
|
||||
Test script for the ComfyUI workflow parser
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from .parser import parse_workflow
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[logging.StreamHandler()]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configure paths
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..'))
|
||||
REFS_DIR = os.path.join(ROOT_DIR, 'refs')
|
||||
OUTPUT_DIR = os.path.join(ROOT_DIR, 'output')
|
||||
|
||||
def test_parse_flux_workflow():
|
||||
"""Test parsing the flux example workflow"""
|
||||
# Ensure output directory exists
|
||||
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
||||
|
||||
# Define input and output paths
|
||||
input_path = os.path.join(REFS_DIR, 'flux_prompt.json')
|
||||
output_path = os.path.join(OUTPUT_DIR, 'parsed_flux_output.json')
|
||||
|
||||
# Parse workflow
|
||||
logger.info(f"Parsing workflow: {input_path}")
|
||||
result = parse_workflow(input_path, output_path)
|
||||
|
||||
# Print result summary
|
||||
logger.info(f"Output saved to: {output_path}")
|
||||
logger.info(f"Parsing completed. Result summary:")
|
||||
logger.info(f" LoRAs: {result.get('loras', '')}")
|
||||
|
||||
gen_params = result.get('gen_params', {})
|
||||
logger.info(f" Prompt: {gen_params.get('prompt', '')[:50]}...")
|
||||
logger.info(f" Steps: {gen_params.get('steps', '')}")
|
||||
logger.info(f" Sampler: {gen_params.get('sampler', '')}")
|
||||
logger.info(f" Size: {gen_params.get('size', '')}")
|
||||
|
||||
# Compare with reference output
|
||||
ref_output_path = os.path.join(REFS_DIR, 'flux_output.json')
|
||||
try:
|
||||
with open(ref_output_path, 'r') as f:
|
||||
ref_output = json.load(f)
|
||||
|
||||
# Simple validation
|
||||
loras_match = result.get('loras', '') == ref_output.get('loras', '')
|
||||
prompt_match = gen_params.get('prompt', '') == ref_output.get('gen_params', {}).get('prompt', '')
|
||||
|
||||
logger.info(f"Validation against reference:")
|
||||
logger.info(f" LoRAs match: {loras_match}")
|
||||
logger.info(f" Prompt match: {prompt_match}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to compare with reference output: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_parse_flux_workflow()
|
||||
@@ -1,120 +0,0 @@
|
||||
"""
|
||||
Utility functions for ComfyUI workflow parsing
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional, Union, Set, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def load_workflow(workflow_path: str) -> Dict:
|
||||
"""Load a workflow from a JSON file"""
|
||||
try:
|
||||
with open(workflow_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading workflow from {workflow_path}: {e}")
|
||||
raise
|
||||
|
||||
def save_output(output: Dict, output_path: str) -> None:
|
||||
"""Save the parsed output to a JSON file"""
|
||||
os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
|
||||
try:
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(output, f, indent=4)
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving output to {output_path}: {e}")
|
||||
raise
|
||||
|
||||
def find_node_by_type(workflow: Dict, node_type: str) -> Optional[str]:
|
||||
"""Find a node of the specified type in the workflow"""
|
||||
for node_id, node_data in workflow.items():
|
||||
if node_data.get("class_type") == node_type:
|
||||
return node_id
|
||||
return None
|
||||
|
||||
def find_nodes_by_type(workflow: Dict, node_type: str) -> List[str]:
|
||||
"""Find all nodes of the specified type in the workflow"""
|
||||
return [node_id for node_id, node_data in workflow.items()
|
||||
if node_data.get("class_type") == node_type]
|
||||
|
||||
def get_input_node_ids(workflow: Dict, node_id: str) -> Dict[str, Tuple[str, int]]:
|
||||
"""
|
||||
Get the node IDs for all inputs of the given node
|
||||
|
||||
Returns a dictionary mapping input names to (node_id, output_slot) tuples
|
||||
"""
|
||||
result = {}
|
||||
if node_id not in workflow:
|
||||
return result
|
||||
|
||||
node_data = workflow[node_id]
|
||||
for input_name, input_value in node_data.get("inputs", {}).items():
|
||||
# Check if this input is connected to another node
|
||||
if isinstance(input_value, list) and len(input_value) == 2:
|
||||
# Input is connected to another node's output
|
||||
# Format: [node_id, output_slot]
|
||||
ref_node_id, output_slot = input_value
|
||||
result[input_name] = (str(ref_node_id), output_slot)
|
||||
|
||||
return result
|
||||
|
||||
def trace_model_path(workflow: Dict, start_node_id: str) -> List[str]:
|
||||
"""
|
||||
Trace the model path backward from KSampler to find all LoRA nodes
|
||||
|
||||
Args:
|
||||
workflow: The workflow data
|
||||
start_node_id: The starting node ID (usually KSampler)
|
||||
|
||||
Returns:
|
||||
List of node IDs in the model path
|
||||
"""
|
||||
model_path_nodes = []
|
||||
|
||||
# Get the model input from the start node
|
||||
if start_node_id not in workflow:
|
||||
return model_path_nodes
|
||||
|
||||
# Track visited nodes to avoid cycles
|
||||
visited = set()
|
||||
|
||||
# Stack for depth-first search
|
||||
stack = []
|
||||
|
||||
# Get model input reference if available
|
||||
start_node = workflow[start_node_id]
|
||||
if "inputs" in start_node and "model" in start_node["inputs"] and isinstance(start_node["inputs"]["model"], list):
|
||||
model_ref = start_node["inputs"]["model"]
|
||||
stack.append(str(model_ref[0]))
|
||||
|
||||
# Perform depth-first search
|
||||
while stack:
|
||||
node_id = stack.pop()
|
||||
|
||||
# Skip if already visited
|
||||
if node_id in visited:
|
||||
continue
|
||||
|
||||
# Mark as visited
|
||||
visited.add(node_id)
|
||||
|
||||
# Skip if node doesn't exist
|
||||
if node_id not in workflow:
|
||||
continue
|
||||
|
||||
node = workflow[node_id]
|
||||
node_type = node.get("class_type", "")
|
||||
|
||||
# Add current node to result list if it's a LoRA node
|
||||
if "Lora" in node_type:
|
||||
model_path_nodes.append(node_id)
|
||||
|
||||
# Add all input nodes that have a "model" or "lora_stack" output to the stack
|
||||
if "inputs" in node:
|
||||
for input_name, input_value in node["inputs"].items():
|
||||
if input_name in ["model", "lora_stack"] and isinstance(input_value, list) and len(input_value) == 2:
|
||||
stack.append(str(input_value[0]))
|
||||
|
||||
return model_path_nodes
|
||||
@@ -1,17 +1,18 @@
|
||||
[project]
|
||||
name = "comfyui-lora-manager"
|
||||
description = "LoRA Manager for ComfyUI - Access it at http://localhost:8188/loras for managing LoRA models with previews and metadata integration."
|
||||
version = "0.8.3"
|
||||
description = "Revolutionize your workflow with the ultimate LoRA companion for ComfyUI!"
|
||||
version = "0.8.26"
|
||||
license = {file = "LICENSE"}
|
||||
dependencies = [
|
||||
"aiohttp",
|
||||
"jinja2",
|
||||
"safetensors",
|
||||
"watchdog",
|
||||
"beautifulsoup4",
|
||||
"piexif",
|
||||
"Pillow",
|
||||
"requests"
|
||||
"olefile", # for getting rid of warning message
|
||||
"toml",
|
||||
"natsort",
|
||||
"GitPython"
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -21,4 +22,4 @@ Repository = "https://github.com/willmiao/ComfyUI-Lora-Manager"
|
||||
[tool.comfy]
|
||||
PublisherId = "willmiao"
|
||||
DisplayName = "ComfyUI-Lora-Manager"
|
||||
Icon = ""
|
||||
Icon = "https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/static/images/android-chrome-512x512.png?raw=true"
|
||||
|
||||
265
refs/output.json
265
refs/output.json
@@ -1,11 +1,258 @@
|
||||
{
|
||||
"loras": "<lora:ck-neon-retrowave-IL-000012:0.8> <lora:aorunIllstrious:1> <lora:ck-shadow-circuit-IL-000012:0.78> <lora:MoriiMee_Gothic_Niji_Style_Illustrious_r1:0.45> <lora:ck-nc-cyberpunk-IL-000011:0.4>",
|
||||
"prompt": "in the style of ck-rw, aorun, scales, makeup, bare shoulders, pointy ears, dress, claws, in the style of cksc, artist:moriimee, in the style of cknc, masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing",
|
||||
"negative_prompt": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw",
|
||||
"steps": "20",
|
||||
"sampler": "euler_ancestral",
|
||||
"cfg_scale": "8",
|
||||
"seed": "241",
|
||||
"size": "832x1216",
|
||||
"clip_skip": "2"
|
||||
"id": 649516,
|
||||
"name": "Cynthia -シロナ - Pokemon Diamond and Pearl - PDXL LORA",
|
||||
"description": "<p><strong>Warning: Without Adetailer eyes are fucked (rainbow color and artefact)</strong></p><p><span style=\"color:rgb(193, 194, 197)\">Trained on </span><a target=\"_blank\" rel=\"ugc\" href=\"https://civitai.com/models/257749/horsefucker-diffusion-v6-xl\"><strong>Pony Diffusion V6 XL</strong></a> with 63 pictures.<br />Best result with weight between : 0.8-1.</p><p><span style=\"color:rgb(193, 194, 197)\">Basic prompts : </span><code>1girl, cynthia \\(pokemon\\), blonde hair, hair over one eye, very long hair, grey eyes, eyelashes, hair ornament</code> <br /><span style=\"color:rgb(193, 194, 197)\">Outfit prompts : </span><code>fur collar, black coat, fur-trimmed coat, long sleeves, black pants, black shirt, high heels</code></p><p>Reviews are really appreciated, i love to see the community use my work, that's why I share it.<br />If you like my work, you can tip me <a target=\"_blank\" rel=\"ugc\" href=\"https://ko-fi.com/konan49773\"><strong>here.</strong></a></p><p>Got a specific request ? I'm open for commission on my <a target=\"_blank\" rel=\"ugc\" href=\"https://ko-fi.com/konan49773/commissions\"><strong>kofi</strong></a> or<strong> </strong><a target=\"_blank\" rel=\"ugc\" href=\"https://www.fiverr.com/konanai/create-lora-model-for-you\"><strong>fiverr gig</strong></a> *! If you provide enough data, OCs are accepted</p>",
|
||||
"allowNoCredit": true,
|
||||
"allowCommercialUse": [
|
||||
"Image",
|
||||
"RentCivit"
|
||||
],
|
||||
"allowDerivatives": true,
|
||||
"allowDifferentLicense": true,
|
||||
"type": "LORA",
|
||||
"minor": false,
|
||||
"sfwOnly": false,
|
||||
"poi": false,
|
||||
"nsfw": false,
|
||||
"nsfwLevel": 29,
|
||||
"availability": "Public",
|
||||
"cosmetic": null,
|
||||
"supportsGeneration": true,
|
||||
"stats": {
|
||||
"downloadCount": 811,
|
||||
"favoriteCount": 0,
|
||||
"thumbsUpCount": 175,
|
||||
"thumbsDownCount": 0,
|
||||
"commentCount": 4,
|
||||
"ratingCount": 0,
|
||||
"rating": 0,
|
||||
"tippedAmountCount": 10
|
||||
},
|
||||
"creator": {
|
||||
"username": "Konan",
|
||||
"image": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/7cd552a1-60fe-4baf-a0e4-f7d5d5381711/width=96/Konan.jpeg"
|
||||
},
|
||||
"tags": [
|
||||
"anime",
|
||||
"character",
|
||||
"cynthia",
|
||||
"woman",
|
||||
"pokemon",
|
||||
"pokegirl"
|
||||
],
|
||||
"modelVersions": [
|
||||
{
|
||||
"id": 726676,
|
||||
"index": 0,
|
||||
"name": "v1.0",
|
||||
"baseModel": "Pony",
|
||||
"createdAt": "2024-08-16T01:13:16.099Z",
|
||||
"publishedAt": "2024-08-16T01:14:44.984Z",
|
||||
"status": "Published",
|
||||
"availability": "Public",
|
||||
"nsfwLevel": 29,
|
||||
"trainedWords": [
|
||||
"1girl, cynthia \\(pokemon\\), blonde hair, hair over one eye, very long hair, grey eyes, eyelashes, hair ornament",
|
||||
"fur collar, black coat, fur-trimmed coat, long sleeves, black pants, black shirt, high heels"
|
||||
],
|
||||
"covered": true,
|
||||
"stats": {
|
||||
"downloadCount": 811,
|
||||
"ratingCount": 0,
|
||||
"rating": 0,
|
||||
"thumbsUpCount": 175,
|
||||
"thumbsDownCount": 0
|
||||
},
|
||||
"files": [
|
||||
{
|
||||
"id": 641092,
|
||||
"sizeKB": 56079.65234375,
|
||||
"name": "CynthiaXL.safetensors",
|
||||
"type": "Model",
|
||||
"pickleScanResult": "Success",
|
||||
"pickleScanMessage": "No Pickle imports",
|
||||
"virusScanResult": "Success",
|
||||
"virusScanMessage": null,
|
||||
"scannedAt": "2024-08-16T01:17:19.087Z",
|
||||
"metadata": {
|
||||
"format": "SafeTensor"
|
||||
},
|
||||
"hashes": {},
|
||||
"downloadUrl": "https://civitai.com/api/download/models/726676",
|
||||
"primary": true
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/b346d757-2b59-4aeb-9f09-3bee2724519d/width=1248/24511993.jpeg",
|
||||
"nsfwLevel": 1,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "UqNc==RP.9s+~pxvIst7kWWBWBjY%MWBt7WB",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/fc132ac0-cc1c-4b68-a1d7-5b97b0996ac2/width=1248/24511997.jpeg",
|
||||
"nsfwLevel": 1,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "UMGSS+?tTw.60MIX9cbb~WxHRRR-NEtLRiR%",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/7b3237d1-e672-466a-85d0-cc5dd42ab130/width=1160/24512001.jpeg",
|
||||
"nsfwLevel": 4,
|
||||
"width": 1160,
|
||||
"height": 1696,
|
||||
"hash": "U9NA6f~o00%h00wvIYt74:ER-=D%5600DiE1",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/ccd7d11d-4fa9-4434-85a1-fb999312e60d/width=1248/24511991.jpeg",
|
||||
"nsfwLevel": 1,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "UyNTg.j?~qxu?aoLRkj]%MfkM{jZaya}a#ax",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/1743be6d-7fe5-4b55-9f19-c931618fa259/width=1248/24511996.jpeg",
|
||||
"nsfwLevel": 4,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "UGOC~n^+?w~6Tx_4oM^$yYEkMds74:9F#*xY",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/91693c98-d037-4489-882c-100eb26019a0/width=1160/24512010.jpeg",
|
||||
"nsfwLevel": 4,
|
||||
"width": 1160,
|
||||
"height": 1696,
|
||||
"hash": "UJI}kp^-Kl%hXAIX4;Nf^+M|9GRP0Mt8%L%2",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/49c7a294-ac5b-4832-98e5-2acd0f1a8782/width=1248/24512017.jpeg",
|
||||
"nsfwLevel": 4,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "UML;8Qn|9G%3mnWA4nWFMf%N?Hae~qog-oNF",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/d7b442f2-6ead-4a7a-9578-54d9ec2ff148/width=1248/24512015.jpeg",
|
||||
"nsfwLevel": 1,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "UPGR#kt8xw%M0LWC9bWC?wxtR*NLM^jrxWM|",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/d840f1e9-3dd3-4531-b83a-1ba2c6b7feaa/width=1160/24512004.jpeg",
|
||||
"nsfwLevel": 8,
|
||||
"width": 1160,
|
||||
"height": 1696,
|
||||
"hash": "ULNm1i_39wi^*I%hDiM_tlo#xuV?^kNIxCs,",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/520387ae-c176-43e3-92bd-5cd2a672475e/width=1248/24512012.jpeg",
|
||||
"nsfwLevel": 4,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "URM%l.%M.9Ip~poIkExu_3V@M|xuD%oJM{D*",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/9ea28b94-f326-4776-83ff-851cc203c627/width=1248/24511988.jpeg",
|
||||
"nsfwLevel": 1,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "U-PZloog_Nxut6j]WXWB-;j?IVa#ofaxj]j]",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
},
|
||||
{
|
||||
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/2e749dbb-7d5a-48f1-8e29-fea5022a5fe9/width=1248/24522268.jpeg",
|
||||
"nsfwLevel": 16,
|
||||
"width": 1248,
|
||||
"height": 1824,
|
||||
"hash": "UPLgtm9Z0z=|0yRRE2-A9rWAoNE1~DwOr=t7",
|
||||
"type": "image",
|
||||
"minor": false,
|
||||
"poi": false,
|
||||
"hasMeta": true,
|
||||
"hasPositivePrompt": true,
|
||||
"onSite": false,
|
||||
"remixOfId": null
|
||||
}
|
||||
],
|
||||
"downloadUrl": "https://civitai.com/api/download/models/726676"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,294 +0,0 @@
|
||||
Loading workflow from D:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\refs\prompt.json
|
||||
Expected output from D:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\refs\output.json
|
||||
|
||||
Expected output:
|
||||
{
|
||||
"loras": "<lora:ck-neon-retrowave-IL-000012:0.8> <lora:aorunIllstrious:1> <lora:ck-shadow-circuit-IL-000012:0.78> <lora:MoriiMee_Gothic_Niji_Style_Illustrious_r1:0.45> <lora:ck-nc-cyberpunk-IL-000011:0.4>",
|
||||
"gen_params": {
|
||||
"prompt": "in the style of ck-rw, aorun, scales, makeup, bare shoulders, pointy ears, dress, claws, in the style of cksc, artist:moriimee, in the style of cknc, masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing",
|
||||
"negative_prompt": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw",
|
||||
"steps": "20",
|
||||
"sampler": "euler_ancestral",
|
||||
"cfg_scale": "8",
|
||||
"seed": "241",
|
||||
"size": "832x1216",
|
||||
"clip_skip": "2"
|
||||
}
|
||||
}
|
||||
|
||||
Sampler node:
|
||||
{
|
||||
"inputs": {
|
||||
"seed": 241,
|
||||
"steps": 20,
|
||||
"cfg": 8,
|
||||
"sampler_name": "euler_ancestral",
|
||||
"scheduler": "karras",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"56",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
}
|
||||
|
||||
Extracted parameters:
|
||||
seed: 241
|
||||
steps: 20
|
||||
cfg_scale: 8
|
||||
|
||||
Positive node (6):
|
||||
{
|
||||
"inputs": {
|
||||
"text": [
|
||||
"22",
|
||||
0
|
||||
],
|
||||
"clip": [
|
||||
"56",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
}
|
||||
|
||||
Text node (22):
|
||||
{
|
||||
"inputs": {
|
||||
"string1": [
|
||||
"55",
|
||||
0
|
||||
],
|
||||
"string2": [
|
||||
"21",
|
||||
0
|
||||
],
|
||||
"delimiter": ", "
|
||||
},
|
||||
"class_type": "JoinStrings",
|
||||
"_meta": {
|
||||
"title": "Join Strings"
|
||||
}
|
||||
}
|
||||
|
||||
String1 node (55):
|
||||
{
|
||||
"inputs": {
|
||||
"group_mode": true,
|
||||
"toggle_trigger_words": [
|
||||
{
|
||||
"text": "in the style of ck-rw",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"text": "aorun, scales, makeup, bare shoulders, pointy ears",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"text": "dress",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"text": "claws",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"text": "in the style of cksc",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"text": "artist:moriimee",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"text": "in the style of cknc",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"text": "__dummy_item__",
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
},
|
||||
{
|
||||
"text": "__dummy_item__",
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
}
|
||||
],
|
||||
"orinalMessage": "in the style of ck-rw,, aorun, scales, makeup, bare shoulders, pointy ears,, dress,, claws,, in the style of cksc,, artist:moriimee,, in the style of cknc",
|
||||
"trigger_words": [
|
||||
"56",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "TriggerWord Toggle (LoraManager)",
|
||||
"_meta": {
|
||||
"title": "TriggerWord Toggle (LoraManager)"
|
||||
}
|
||||
}
|
||||
|
||||
String2 node (21):
|
||||
{
|
||||
"inputs": {
|
||||
"string": "masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing",
|
||||
"strip_newlines": false
|
||||
},
|
||||
"class_type": "StringConstantMultiline",
|
||||
"_meta": {
|
||||
"title": "positive"
|
||||
}
|
||||
}
|
||||
|
||||
Negative node (7):
|
||||
{
|
||||
"inputs": {
|
||||
"text": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw",
|
||||
"clip": [
|
||||
"56",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
}
|
||||
|
||||
LoRA nodes (3):
|
||||
|
||||
LoRA node 56:
|
||||
{
|
||||
"inputs": {
|
||||
"text": "<lora:ck-shadow-circuit-IL-000012:0.78> <lora:MoriiMee_Gothic_Niji_Style_Illustrious_r1:0.45> <lora:ck-nc-cyberpunk-IL-000011:0.4>",
|
||||
"loras": [
|
||||
{
|
||||
"name": "ck-shadow-circuit-IL-000012",
|
||||
"strength": 0.78,
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"name": "MoriiMee_Gothic_Niji_Style_Illustrious_r1",
|
||||
"strength": 0.45,
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"name": "ck-nc-cyberpunk-IL-000011",
|
||||
"strength": 0.4,
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"name": "__dummy_item1__",
|
||||
"strength": 0,
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
},
|
||||
{
|
||||
"name": "__dummy_item2__",
|
||||
"strength": 0,
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
}
|
||||
],
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
],
|
||||
"lora_stack": [
|
||||
"57",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "Lora Loader (LoraManager)",
|
||||
"_meta": {
|
||||
"title": "Lora Loader (LoraManager)"
|
||||
}
|
||||
}
|
||||
|
||||
LoRA node 57:
|
||||
{
|
||||
"inputs": {
|
||||
"text": "<lora:aorunIllstrious:1>",
|
||||
"loras": [
|
||||
{
|
||||
"name": "aorunIllstrious",
|
||||
"strength": "0.90",
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"name": "__dummy_item1__",
|
||||
"strength": 0,
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
},
|
||||
{
|
||||
"name": "__dummy_item2__",
|
||||
"strength": 0,
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
}
|
||||
],
|
||||
"lora_stack": [
|
||||
"59",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "Lora Stacker (LoraManager)",
|
||||
"_meta": {
|
||||
"title": "Lora Stacker (LoraManager)"
|
||||
}
|
||||
}
|
||||
|
||||
LoRA node 59:
|
||||
{
|
||||
"inputs": {
|
||||
"text": "<lora:ck-neon-retrowave-IL-000012:0.8>",
|
||||
"loras": [
|
||||
{
|
||||
"name": "ck-neon-retrowave-IL-000012",
|
||||
"strength": 0.8,
|
||||
"active": true
|
||||
},
|
||||
{
|
||||
"name": "__dummy_item1__",
|
||||
"strength": 0,
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
},
|
||||
{
|
||||
"name": "__dummy_item2__",
|
||||
"strength": 0,
|
||||
"active": false,
|
||||
"_isDummy": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"class_type": "Lora Stacker (LoraManager)",
|
||||
"_meta": {
|
||||
"title": "Lora Stacker (LoraManager)"
|
||||
}
|
||||
}
|
||||
|
||||
Test completed.
|
||||
@@ -1,8 +1,10 @@
|
||||
aiohttp
|
||||
jinja2
|
||||
safetensors
|
||||
watchdog
|
||||
beautifulsoup4
|
||||
piexif
|
||||
Pillow
|
||||
requests
|
||||
olefile
|
||||
toml
|
||||
numpy
|
||||
natsort
|
||||
GitPython
|
||||
|
||||
14
settings.json.example
Normal file
14
settings.json.example
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"civitai_api_key": "your_civitai_api_key_here",
|
||||
"show_only_sfw": false,
|
||||
"folder_paths": {
|
||||
"loras": [
|
||||
"C:/path/to/your/loras_folder",
|
||||
"C:/path/to/another/loras_folder"
|
||||
],
|
||||
"checkpoints": [
|
||||
"C:/path/to/your/checkpoints_folder",
|
||||
"C:/path/to/another/checkpoints_folder"
|
||||
]
|
||||
}
|
||||
}
|
||||
418
standalone.py
Normal file
418
standalone.py
Normal file
@@ -0,0 +1,418 @@
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
|
||||
# Create mock modules for py/nodes directory - add this before any other imports
|
||||
def mock_nodes_directory():
|
||||
"""Create mock modules for all Python files in the py/nodes directory"""
|
||||
nodes_dir = os.path.join(os.path.dirname(__file__), 'py', 'nodes')
|
||||
if os.path.exists(nodes_dir):
|
||||
# Create a mock module for the nodes package itself
|
||||
sys.modules['py.nodes'] = type('MockNodesModule', (), {})
|
||||
|
||||
# Create mock modules for all Python files in the nodes directory
|
||||
for file in os.listdir(nodes_dir):
|
||||
if file.endswith('.py') and file != '__init__.py':
|
||||
module_name = file[:-3] # Remove .py extension
|
||||
full_module_name = f'py.nodes.{module_name}'
|
||||
# Create empty module object
|
||||
sys.modules[full_module_name] = type(f'Mock{module_name.capitalize()}Module', (), {})
|
||||
print(f"Created mock module for: {full_module_name}")
|
||||
|
||||
# Run the mocking function before any other imports
|
||||
mock_nodes_directory()
|
||||
|
||||
# Create mock folder_paths module BEFORE any other imports
|
||||
class MockFolderPaths:
|
||||
@staticmethod
|
||||
def get_folder_paths(folder_name):
|
||||
# Load paths from settings.json
|
||||
settings_path = os.path.join(os.path.dirname(__file__), 'settings.json')
|
||||
try:
|
||||
if os.path.exists(settings_path):
|
||||
with open(settings_path, 'r', encoding='utf-8') as f:
|
||||
settings = json.load(f)
|
||||
|
||||
# For diffusion_models, combine unet and diffusers paths
|
||||
if folder_name == "diffusion_models":
|
||||
paths = []
|
||||
if 'folder_paths' in settings:
|
||||
if 'unet' in settings['folder_paths']:
|
||||
paths.extend(settings['folder_paths']['unet'])
|
||||
if 'diffusers' in settings['folder_paths']:
|
||||
paths.extend(settings['folder_paths']['diffusers'])
|
||||
# Filter out paths that don't exist
|
||||
valid_paths = [p for p in paths if os.path.exists(p)]
|
||||
if valid_paths:
|
||||
return valid_paths
|
||||
else:
|
||||
print(f"Warning: No valid paths found for {folder_name}")
|
||||
# For other folder names, return their paths directly
|
||||
elif 'folder_paths' in settings and folder_name in settings['folder_paths']:
|
||||
paths = settings['folder_paths'][folder_name]
|
||||
valid_paths = [p for p in paths if os.path.exists(p)]
|
||||
if valid_paths:
|
||||
return valid_paths
|
||||
else:
|
||||
print(f"Warning: No valid paths found for {folder_name}")
|
||||
except Exception as e:
|
||||
print(f"Error loading folder paths from settings: {e}")
|
||||
|
||||
# Fallback to empty list if no paths found
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def get_temp_directory():
|
||||
return os.path.join(os.path.dirname(__file__), 'temp')
|
||||
|
||||
@staticmethod
|
||||
def set_temp_directory(path):
|
||||
os.makedirs(path, exist_ok=True)
|
||||
return path
|
||||
|
||||
# Create mock server module with PromptServer
|
||||
class MockPromptServer:
|
||||
def __init__(self):
|
||||
self.app = None
|
||||
|
||||
def send_sync(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
# Create mock metadata_collector module
|
||||
class MockMetadataCollector:
|
||||
def init(self):
|
||||
pass
|
||||
|
||||
def get_metadata(self, prompt_id=None):
|
||||
return {}
|
||||
|
||||
# Initialize basic mocks before any imports
|
||||
sys.modules['folder_paths'] = MockFolderPaths()
|
||||
sys.modules['server'] = type('server', (), {'PromptServer': MockPromptServer()})
|
||||
sys.modules['py.metadata_collector'] = MockMetadataCollector()
|
||||
|
||||
# Now we can safely import modules that depend on folder_paths and server
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
from aiohttp import web
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger("lora-manager-standalone")
|
||||
|
||||
# Configure aiohttp access logger to be less verbose
|
||||
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
|
||||
|
||||
# Add specific suppression for connection reset errors
|
||||
class ConnectionResetFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
# Filter out connection reset errors that are not critical
|
||||
if "ConnectionResetError" in str(record.getMessage()):
|
||||
return False
|
||||
if "_call_connection_lost" in str(record.getMessage()):
|
||||
return False
|
||||
if "WinError 10054" in str(record.getMessage()):
|
||||
return False
|
||||
return True
|
||||
|
||||
# Apply the filter to asyncio logger
|
||||
asyncio_logger = logging.getLogger("asyncio")
|
||||
asyncio_logger.addFilter(ConnectionResetFilter())
|
||||
|
||||
# Now we can import the global config from our local modules
|
||||
from py.config import config
|
||||
|
||||
class StandaloneServer:
|
||||
"""Server implementation for standalone mode"""
|
||||
|
||||
def __init__(self):
|
||||
self.app = web.Application(logger=logger)
|
||||
self.instance = self # Make it compatible with PromptServer.instance pattern
|
||||
|
||||
# Ensure the app's access logger is configured to reduce verbosity
|
||||
self.app._subapps = [] # Ensure this exists to avoid AttributeError
|
||||
|
||||
async def setup(self):
|
||||
"""Set up the standalone server"""
|
||||
# Create placeholders for compatibility with ComfyUI's implementation
|
||||
self.last_prompt_id = None
|
||||
self.last_node_id = None
|
||||
self.client_id = None
|
||||
|
||||
# Set up routes
|
||||
self.setup_routes()
|
||||
|
||||
# Add startup and shutdown handlers
|
||||
self.app.on_startup.append(self.on_startup)
|
||||
self.app.on_shutdown.append(self.on_shutdown)
|
||||
|
||||
def setup_routes(self):
|
||||
"""Set up basic routes"""
|
||||
# Add a simple status endpoint
|
||||
self.app.router.add_get('/', self.handle_status)
|
||||
|
||||
# Add static route for example images if the path exists in settings
|
||||
settings_path = os.path.join(os.path.dirname(__file__), 'settings.json')
|
||||
if os.path.exists(settings_path):
|
||||
with open(settings_path, 'r', encoding='utf-8') as f:
|
||||
settings = json.load(f)
|
||||
example_images_path = settings.get('example_images_path')
|
||||
logger.info(f"Example images path: {example_images_path}")
|
||||
if example_images_path and os.path.exists(example_images_path):
|
||||
self.app.router.add_static('/example_images_static', example_images_path)
|
||||
logger.info(f"Added static route for example images: /example_images_static -> {example_images_path}")
|
||||
|
||||
async def handle_status(self, request):
|
||||
"""Handle status request by redirecting to loras page"""
|
||||
# Redirect to loras page instead of showing status
|
||||
raise web.HTTPFound('/loras')
|
||||
|
||||
# Original JSON response (commented out)
|
||||
# return web.json_response({
|
||||
# "status": "running",
|
||||
# "mode": "standalone",
|
||||
# "loras_roots": config.loras_roots,
|
||||
# "checkpoints_roots": config.checkpoints_roots
|
||||
# })
|
||||
|
||||
async def on_startup(self, app):
|
||||
"""Startup handler"""
|
||||
logger.info("LoRA Manager standalone server starting...")
|
||||
|
||||
async def on_shutdown(self, app):
|
||||
"""Shutdown handler"""
|
||||
logger.info("LoRA Manager standalone server shutting down...")
|
||||
|
||||
def send_sync(self, event_type, data, sid=None):
|
||||
"""Stub for compatibility with PromptServer"""
|
||||
# In standalone mode, we don't have the same websocket system
|
||||
pass
|
||||
|
||||
async def start(self, host='127.0.0.1', port=8188):
|
||||
"""Start the server"""
|
||||
runner = web.AppRunner(self.app)
|
||||
await runner.setup()
|
||||
site = web.TCPSite(runner, host, port)
|
||||
await site.start()
|
||||
|
||||
# Log the server address with a clickable localhost URL regardless of the actual binding
|
||||
logger.info(f"Server started at http://127.0.0.1:{port}")
|
||||
|
||||
# Keep the server running
|
||||
while True:
|
||||
await asyncio.sleep(3600) # Sleep for a long time
|
||||
|
||||
async def publish_loop(self):
|
||||
"""Stub for compatibility with PromptServer"""
|
||||
# This method exists in ComfyUI's server but we don't need it
|
||||
pass
|
||||
|
||||
# After all mocks are in place, import LoraManager
|
||||
from py.lora_manager import LoraManager
|
||||
|
||||
class StandaloneLoraManager(LoraManager):
|
||||
"""Extended LoraManager for standalone mode"""
|
||||
|
||||
@classmethod
|
||||
def add_routes(cls, server_instance):
|
||||
"""Initialize and register all routes for standalone mode"""
|
||||
app = server_instance.app
|
||||
|
||||
# Store app in a global-like location for compatibility
|
||||
sys.modules['server'].PromptServer.instance = server_instance
|
||||
|
||||
added_targets = set() # Track already added target paths
|
||||
|
||||
# Add static routes for each lora root
|
||||
for idx, root in enumerate(config.loras_roots, start=1):
|
||||
if not os.path.exists(root):
|
||||
logger.warning(f"Lora root path does not exist: {root}")
|
||||
continue
|
||||
|
||||
preview_path = f'/loras_static/root{idx}/preview'
|
||||
|
||||
# Check if this root is a link path in the mappings
|
||||
real_root = root
|
||||
for target, link in config._path_mappings.items():
|
||||
if os.path.normpath(link) == os.path.normpath(root):
|
||||
# If so, route should point to the target (real path)
|
||||
real_root = target
|
||||
break
|
||||
|
||||
# Normalize and standardize path display for consistency
|
||||
display_root = real_root.replace('\\', '/')
|
||||
|
||||
# Add static route for original path - use the normalized path
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {display_root}")
|
||||
|
||||
# Record route mapping with normalized path
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(os.path.normpath(real_root))
|
||||
|
||||
# Add static routes for each checkpoint root
|
||||
for idx, root in enumerate(config.base_models_roots, start=1):
|
||||
if not os.path.exists(root):
|
||||
logger.warning(f"Checkpoint root path does not exist: {root}")
|
||||
continue
|
||||
|
||||
preview_path = f'/checkpoints_static/root{idx}/preview'
|
||||
|
||||
# Check if this root is a link path in the mappings
|
||||
real_root = root
|
||||
for target, link in config._path_mappings.items():
|
||||
if os.path.normpath(link) == os.path.normpath(root):
|
||||
# If so, route should point to the target (real path)
|
||||
real_root = target
|
||||
break
|
||||
|
||||
# Normalize and standardize path display for consistency
|
||||
display_root = real_root.replace('\\', '/')
|
||||
|
||||
# Add static route for original path
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {display_root}")
|
||||
|
||||
# Record route mapping
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(os.path.normpath(real_root))
|
||||
|
||||
# Add static routes for each embedding root
|
||||
for idx, root in enumerate(getattr(config, "embeddings_roots", []), start=1):
|
||||
if not os.path.exists(root):
|
||||
logger.warning(f"Embedding root path does not exist: {root}")
|
||||
continue
|
||||
|
||||
preview_path = f'/embeddings_static/root{idx}/preview'
|
||||
|
||||
real_root = root
|
||||
for target, link in config._path_mappings.items():
|
||||
if os.path.normpath(link) == os.path.normpath(root):
|
||||
real_root = target
|
||||
break
|
||||
|
||||
display_root = real_root.replace('\\', '/')
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {display_root}")
|
||||
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(os.path.normpath(real_root))
|
||||
|
||||
# Add static routes for symlink target paths that aren't already covered
|
||||
link_idx = {
|
||||
'lora': 1,
|
||||
'checkpoint': 1,
|
||||
'embedding': 1
|
||||
}
|
||||
|
||||
for target_path, link_path in config._path_mappings.items():
|
||||
norm_target = os.path.normpath(target_path)
|
||||
if norm_target not in added_targets:
|
||||
# Determine if this is a checkpoint, lora, or embedding link based on path
|
||||
is_checkpoint = any(os.path.normpath(cp_root) in os.path.normpath(link_path) for cp_root in config.base_models_roots)
|
||||
is_checkpoint = is_checkpoint or any(os.path.normpath(cp_root) in norm_target for cp_root in config.base_models_roots)
|
||||
is_embedding = any(os.path.normpath(emb_root) in os.path.normpath(link_path) for emb_root in getattr(config, "embeddings_roots", []))
|
||||
is_embedding = is_embedding or any(os.path.normpath(emb_root) in norm_target for emb_root in getattr(config, "embeddings_roots", []))
|
||||
|
||||
if is_checkpoint:
|
||||
route_path = f'/checkpoints_static/link_{link_idx["checkpoint"]}/preview'
|
||||
link_idx["checkpoint"] += 1
|
||||
elif is_embedding:
|
||||
route_path = f'/embeddings_static/link_{link_idx["embedding"]}/preview'
|
||||
link_idx["embedding"] += 1
|
||||
else:
|
||||
route_path = f'/loras_static/link_{link_idx["lora"]}/preview'
|
||||
link_idx["lora"] += 1
|
||||
|
||||
# Display path with forward slashes for consistency
|
||||
display_target = target_path.replace('\\', '/')
|
||||
|
||||
try:
|
||||
app.router.add_static(route_path, Path(target_path).resolve(strict=False))
|
||||
logger.info(f"Added static route for link target {route_path} -> {display_target}")
|
||||
config.add_route_mapping(target_path, route_path)
|
||||
added_targets.add(norm_target)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to add static route on initialization for {target_path}: {e}")
|
||||
continue
|
||||
|
||||
# Add static route for plugin assets
|
||||
app.router.add_static('/loras_static', config.static_path)
|
||||
|
||||
# Setup feature routes
|
||||
from py.services.model_service_factory import ModelServiceFactory, register_default_model_types
|
||||
from py.routes.recipe_routes import RecipeRoutes
|
||||
from py.routes.update_routes import UpdateRoutes
|
||||
from py.routes.misc_routes import MiscRoutes
|
||||
from py.routes.example_images_routes import ExampleImagesRoutes
|
||||
from py.routes.stats_routes import StatsRoutes
|
||||
from py.services.websocket_manager import ws_manager
|
||||
|
||||
|
||||
register_default_model_types()
|
||||
|
||||
# Setup all model routes using the factory
|
||||
ModelServiceFactory.setup_all_routes(app)
|
||||
|
||||
stats_routes = StatsRoutes()
|
||||
|
||||
# Initialize routes
|
||||
stats_routes.setup_routes(app)
|
||||
RecipeRoutes.setup_routes(app)
|
||||
UpdateRoutes.setup_routes(app)
|
||||
MiscRoutes.setup_routes(app)
|
||||
ExampleImagesRoutes.setup_routes(app)
|
||||
|
||||
# Setup WebSocket routes that are shared across all model types
|
||||
app.router.add_get('/ws/fetch-progress', ws_manager.handle_connection)
|
||||
app.router.add_get('/ws/download-progress', ws_manager.handle_download_connection)
|
||||
app.router.add_get('/ws/init-progress', ws_manager.handle_init_connection)
|
||||
|
||||
# Schedule service initialization
|
||||
app.on_startup.append(lambda app: cls._initialize_services())
|
||||
|
||||
# Add cleanup
|
||||
app.on_shutdown.append(cls._cleanup)
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments"""
|
||||
parser = argparse.ArgumentParser(description="LoRA Manager Standalone Server")
|
||||
parser.add_argument("--host", type=str, default="0.0.0.0",
|
||||
help="Host address to bind the server to (default: 0.0.0.0)")
|
||||
parser.add_argument("--port", type=int, default=8188,
|
||||
help="Port to bind the server to (default: 8188, access via http://localhost:8188/loras)")
|
||||
# parser.add_argument("--loras", type=str, nargs="+",
|
||||
# help="Additional paths to LoRA model directories (optional if settings.json has paths)")
|
||||
# parser.add_argument("--checkpoints", type=str, nargs="+",
|
||||
# help="Additional paths to checkpoint model directories (optional if settings.json has paths)")
|
||||
parser.add_argument("--log-level", type=str, default="INFO",
|
||||
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
||||
help="Logging level")
|
||||
return parser.parse_args()
|
||||
|
||||
async def main():
|
||||
"""Main entry point for standalone mode"""
|
||||
args = parse_args()
|
||||
|
||||
# Set log level
|
||||
logging.getLogger().setLevel(getattr(logging, args.log_level))
|
||||
|
||||
# Create the server instance
|
||||
server = StandaloneServer()
|
||||
|
||||
# Initialize routes via the standalone lora manager
|
||||
StandaloneLoraManager.add_routes(server)
|
||||
|
||||
# Set up and start the server
|
||||
await server.setup()
|
||||
await server.start(host=args.host, port=args.port)
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
# Run the main function
|
||||
asyncio.run(main())
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Server stopped by user")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user