Compare commits

...

4 Commits
v1.0.2 ... main

Author SHA1 Message Date
Will Miao
e13d70248a fix(usage-stats): resolve pending checkpoint hashes 2026-04-08 09:40:20 +08:00
pixelpaws
1c4919a3e8 Merge pull request #887 from NubeBuster/feat/usage-extractors
feat(usage-stats): add extractors for rgthree Power LoRA Loader and TensorRT loaders
2026-04-08 09:32:08 +08:00
Will Miao
18ddadc9ec feat(autocomplete): auto-format textarea on blur (#884) 2026-04-08 07:57:28 +08:00
NubeBuster
b711ac468a feat(usage-stats): add extractors for rgthree Power LoRA Loader and TensorRT Loader
Fixes #394 — LoRAs loaded via rgthree Power Lora Loader were not
tracked in usage statistics because no extractor existed for that node.

New extractors:
- RgthreePowerLoraLoaderExtractor: parses LORA_* kwargs, respects
  the per-LoRA 'on' toggle
- TensorRTLoaderExtractor: parses engine filename (strips _$profile
  suffix) as best-effort for vanilla TRT. If the output MODEL has
  attachments["source_model"] (set by NubeBuster fork), overrides
  with the real checkpoint name.

TensorRTRefitLoader and TensorRTLoaderAuto take a MODEL input whose
upstream checkpoint loader is already tracked — no extractor needed.

Also adds a name:<filename> fallback and warning log in both
_process_checkpoints and _process_loras when hash lookup fails.
2026-04-05 16:45:21 +02:00
6 changed files with 357 additions and 38 deletions

View File

@@ -1,4 +1,6 @@
import json
import os
import re
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES, IS_SAMPLER
@@ -427,6 +429,75 @@ class ImageSizeExtractor(NodeMetadataExtractor):
"node_id": node_id
}
class RgthreePowerLoraLoaderExtractor(NodeMetadataExtractor):
"""Extract LoRA metadata from rgthree Power Lora Loader.
The node passes LoRAs as dynamic kwargs: LORA_1, LORA_2, ... each containing
{'on': bool, 'lora': filename, 'strength': float, 'strengthTwo': float}.
"""
@staticmethod
def extract(node_id, inputs, outputs, metadata):
if not inputs:
return
active_loras = []
for key, value in inputs.items():
if not key.upper().startswith('LORA_'):
continue
if not isinstance(value, dict):
continue
if not value.get('on') or not value.get('lora'):
continue
lora_name = os.path.splitext(os.path.basename(value['lora']))[0]
active_loras.append({
"name": lora_name,
"strength": round(float(value.get('strength', 1.0)), 2)
})
if active_loras:
metadata[LORAS][node_id] = {
"lora_list": active_loras,
"node_id": node_id
}
class TensorRTLoaderExtractor(NodeMetadataExtractor):
"""Extract checkpoint metadata from TensorRT Loader.
extract() parses the engine filename from 'unet_name' as a best-effort
fallback (strips profile suffix after '_$' and counter suffix).
update() checks if the output MODEL has attachments["source_model"]
set by the node (NubeBuster fork) and overrides with the real name.
Vanilla TRT doesn't set this — the filename parse stands.
"""
@staticmethod
def extract(node_id, inputs, outputs, metadata):
if not inputs or "unet_name" not in inputs:
return
unet_name = inputs.get("unet_name")
# Strip path and extension, then drop the $_profile suffix
model_name = os.path.splitext(os.path.basename(unet_name))[0]
if "_$" in model_name:
model_name = model_name[:model_name.index("_$")]
# Strip counter suffix (e.g. _00001_) left by ComfyUI's save path
model_name = re.sub(r'_\d+_?$', '', model_name)
_store_checkpoint_metadata(metadata, node_id, model_name)
@staticmethod
def update(node_id, outputs, metadata):
if not outputs or not isinstance(outputs, list) or len(outputs) == 0:
return
first_output = outputs[0]
if not isinstance(first_output, tuple) or len(first_output) < 1:
return
model = first_output[0]
# NubeBuster fork sets attachments["source_model"] on the ModelPatcher
source_model = getattr(model, 'attachments', {}).get("source_model")
if source_model:
_store_checkpoint_metadata(metadata, node_id, source_model)
class LoraLoaderManagerExtractor(NodeMetadataExtractor):
@staticmethod
def extract(node_id, inputs, outputs, metadata):
@@ -577,8 +648,6 @@ class SamplerCustomAdvancedExtractor(BaseSamplerExtractor):
# Extract latent dimensions
BaseSamplerExtractor.extract_latent_dimensions(node_id, inputs, metadata)
import json
class CLIPTextEncodeFluxExtractor(NodeMetadataExtractor):
@staticmethod
def extract(node_id, inputs, outputs, metadata):
@@ -715,6 +784,8 @@ NODE_EXTRACTORS = {
"UnetLoaderGGUF": UNETLoaderExtractor, # Updated to use dedicated extractor
"LoraLoader": LoraLoaderExtractor,
"LoraLoaderLM": LoraLoaderManagerExtractor,
"RgthreePowerLoraLoader": RgthreePowerLoraLoaderExtractor,
"TensorRTLoader": TensorRTLoaderExtractor,
# Conditioning
"CLIPTextEncode": CLIPTextEncodeExtractor,
"PromptLM": CLIPTextEncodeExtractor,

View File

@@ -291,6 +291,80 @@ class UsageStats:
# Process loras
if LORAS in metadata and isinstance(metadata[LORAS], dict):
await self._process_loras(metadata[LORAS], today)
def _increment_usage_counter(self, category: str, stat_key: str, today_date: str) -> None:
"""Increment usage counters for a resolved stats key."""
if stat_key not in self.stats[category]:
self.stats[category][stat_key] = {
"total": 0,
"history": {}
}
self.stats[category][stat_key]["total"] += 1
if today_date not in self.stats[category][stat_key]["history"]:
self.stats[category][stat_key]["history"][today_date] = 0
self.stats[category][stat_key]["history"][today_date] += 1
def _normalize_model_lookup_name(self, model_name: str) -> str:
"""Normalize a model reference to its base filename without extension."""
return os.path.splitext(os.path.basename(model_name))[0]
async def _find_cached_checkpoint_entry(self, checkpoint_scanner, model_name: str):
"""Best-effort lookup for a checkpoint cache entry by filename/model name."""
get_cached_data = getattr(checkpoint_scanner, "get_cached_data", None)
if not callable(get_cached_data):
return None
cache = await get_cached_data()
raw_data = getattr(cache, "raw_data", None)
if not isinstance(raw_data, list):
return None
normalized_name = self._normalize_model_lookup_name(model_name)
for entry in raw_data:
if not isinstance(entry, dict):
continue
for candidate_key in ("file_name", "model_name", "file_path"):
candidate_value = entry.get(candidate_key)
if not candidate_value or not isinstance(candidate_value, str):
continue
if self._normalize_model_lookup_name(candidate_value) == normalized_name:
return entry
return None
async def _resolve_checkpoint_hash(self, checkpoint_scanner, model_name: str):
"""Resolve a checkpoint hash, calculating pending hashes on demand when needed."""
model_filename = self._normalize_model_lookup_name(model_name)
model_hash = checkpoint_scanner.get_hash_by_filename(model_filename)
if model_hash:
return model_hash
cached_entry = await self._find_cached_checkpoint_entry(checkpoint_scanner, model_name)
if not cached_entry:
logger.warning(f"No hash found for checkpoint '{model_filename}', skipping usage tracking")
return None
cached_hash = cached_entry.get("sha256")
if cached_hash:
return cached_hash
if cached_entry.get("hash_status") == "pending":
calculate_hash = getattr(checkpoint_scanner, "calculate_hash_for_model", None)
file_path = cached_entry.get("file_path")
if callable(calculate_hash) and file_path:
calculated_hash = await calculate_hash(file_path)
if calculated_hash:
return calculated_hash
logger.warning(
f"Failed to calculate pending hash for checkpoint '{model_filename}', skipping usage tracking"
)
return None
logger.warning(f"No hash found for checkpoint '{model_filename}', skipping usage tracking")
return None
async def _process_checkpoints(self, models_data, today_date):
"""Process checkpoint models from metadata"""
@@ -311,27 +385,12 @@ class UsageStats:
model_name = model_info.get("name")
if not model_name:
continue
# Clean up filename (remove extension if present)
model_filename = os.path.splitext(os.path.basename(model_name))[0]
# Get hash for this checkpoint
model_hash = checkpoint_scanner.get_hash_by_filename(model_filename)
if model_hash:
# Update stats for this checkpoint with date tracking
if model_hash not in self.stats["checkpoints"]:
self.stats["checkpoints"][model_hash] = {
"total": 0,
"history": {}
}
# Increment total count
self.stats["checkpoints"][model_hash]["total"] += 1
# Increment today's count
if today_date not in self.stats["checkpoints"][model_hash]["history"]:
self.stats["checkpoints"][model_hash]["history"][today_date] = 0
self.stats["checkpoints"][model_hash]["history"][today_date] += 1
model_hash = await self._resolve_checkpoint_hash(checkpoint_scanner, model_name)
if not model_hash:
continue
self._increment_usage_counter("checkpoints", model_hash, today_date)
except Exception as e:
logger.error(f"Error processing checkpoint usage: {e}", exc_info=True)
@@ -360,21 +419,11 @@ class UsageStats:
# Get hash for this LoRA
lora_hash = lora_scanner.get_hash_by_filename(lora_name)
if lora_hash:
# Update stats for this LoRA with date tracking
if lora_hash not in self.stats["loras"]:
self.stats["loras"][lora_hash] = {
"total": 0,
"history": {}
}
# Increment total count
self.stats["loras"][lora_hash]["total"] += 1
# Increment today's count
if today_date not in self.stats["loras"][lora_hash]["history"]:
self.stats["loras"][lora_hash]["history"][today_date] = 0
self.stats["loras"][lora_hash]["history"][today_date] += 1
if not lora_hash:
logger.warning(f"No hash found for LoRA '{lora_name}', skipping usage tracking")
continue
self._increment_usage_counter("loras", lora_hash, today_date)
except Exception as e:
logger.error(f"Error processing LoRA usage: {e}", exc_info=True)

View File

@@ -69,6 +69,9 @@ describe('AutoComplete widget interactions', () => {
if (key === 'loramanager.autocomplete_append_comma') {
return true;
}
if (key === 'loramanager.autocomplete_auto_format') {
return true;
}
if (key === 'loramanager.autocomplete_accept_key') {
return 'both';
}
@@ -188,6 +191,59 @@ describe('AutoComplete widget interactions', () => {
expect(insertSelectionSpy).toHaveBeenCalledWith('example_completion');
});
it('formats duplicate commas and extra spaces when the textarea loses focus', async () => {
const input = document.createElement('textarea');
input.value = 'foo bar, , baz ,, qux';
document.body.append(input);
const inputListener = vi.fn();
input.addEventListener('input', inputListener);
const { AutoComplete } = await import(AUTOCOMPLETE_MODULE);
new AutoComplete(input,'prompt', { showPreview: false });
input.dispatchEvent(new Event('blur', { bubbles: true }));
expect(input.value).toBe('foo bar, baz, qux');
expect(inputListener).toHaveBeenCalledTimes(1);
});
it('skips blur formatting when autocomplete auto format is disabled', async () => {
settingGetMock.mockImplementation((key) => {
if (key === 'loramanager.autocomplete_append_comma') {
return true;
}
if (key === 'loramanager.autocomplete_auto_format') {
return false;
}
if (key === 'loramanager.autocomplete_accept_key') {
return 'both';
}
if (key === 'loramanager.prompt_tag_autocomplete') {
return true;
}
if (key === 'loramanager.tag_space_replacement') {
return false;
}
return undefined;
});
const input = document.createElement('textarea');
input.value = 'foo bar, , baz ,, qux';
document.body.append(input);
const inputListener = vi.fn();
input.addEventListener('input', inputListener);
const { AutoComplete } = await import(AUTOCOMPLETE_MODULE);
new AutoComplete(input,'prompt', { showPreview: false });
input.dispatchEvent(new Event('blur', { bubbles: true }));
expect(input.value).toBe('foo bar, , baz ,, qux');
expect(inputListener).not.toHaveBeenCalled();
});
it('accepts the selected suggestion with Enter', async () => {
caretHelperInstance.getBeforeCursor.mockReturnValue('example');
@@ -275,6 +331,9 @@ describe('AutoComplete widget interactions', () => {
if (key === 'loramanager.autocomplete_append_comma') {
return true;
}
if (key === 'loramanager.autocomplete_auto_format') {
return true;
}
if (key === 'loramanager.autocomplete_accept_key') {
return 'tab_only';
}
@@ -322,6 +381,9 @@ describe('AutoComplete widget interactions', () => {
if (key === 'loramanager.autocomplete_append_comma') {
return true;
}
if (key === 'loramanager.autocomplete_auto_format') {
return true;
}
if (key === 'loramanager.autocomplete_accept_key') {
return 'enter_only';
}

View File

@@ -152,3 +152,67 @@ async def test_usage_stats_background_processor_handles_pending_prompts(tmp_path
assert stats.stats["loras"]["lora-hash"]["history"][today] == 1
await _finalize_usage_stats(tasks)
async def test_usage_stats_calculates_pending_checkpoint_hash_on_demand(tmp_path, monkeypatch):
stats, tasks, _ = _prepare_usage_stats(tmp_path, monkeypatch)
metadata_payload = {
"models": {
"1": {"type": "checkpoint", "name": "pending_model.safetensors"},
},
"loras": {},
}
checkpoint_cache = SimpleNamespace(
raw_data=[
{
"file_name": "pending_model",
"model_name": "pending_model",
"file_path": "/models/pending_model.safetensors",
"sha256": "",
"hash_status": "pending",
}
]
)
checkpoint_scanner = SimpleNamespace(
get_hash_by_filename=lambda name: None,
get_cached_data=AsyncMock(return_value=checkpoint_cache),
calculate_hash_for_model=AsyncMock(return_value="resolved-hash"),
)
lora_scanner = SimpleNamespace(get_hash_by_filename=lambda name: None)
monkeypatch.setattr(ServiceRegistry, "get_checkpoint_scanner", AsyncMock(return_value=checkpoint_scanner))
monkeypatch.setattr(ServiceRegistry, "get_lora_scanner", AsyncMock(return_value=lora_scanner))
await stats._process_metadata(metadata_payload)
today = datetime.now().strftime("%Y-%m-%d")
checkpoint_scanner.calculate_hash_for_model.assert_awaited_once_with("/models/pending_model.safetensors")
assert stats.stats["checkpoints"]["resolved-hash"]["history"][today] == 1
await _finalize_usage_stats(tasks)
async def test_usage_stats_skips_name_fallback_for_missing_lora_hash(tmp_path, monkeypatch):
stats, tasks, _ = _prepare_usage_stats(tmp_path, monkeypatch)
metadata_payload = {
"models": {},
"loras": {
"2": {"lora_list": [{"name": "missing_lora"}]},
},
}
checkpoint_scanner = SimpleNamespace(get_hash_by_filename=lambda name: None)
lora_scanner = SimpleNamespace(get_hash_by_filename=lambda name: None)
monkeypatch.setattr(ServiceRegistry, "get_checkpoint_scanner", AsyncMock(return_value=checkpoint_scanner))
monkeypatch.setattr(ServiceRegistry, "get_lora_scanner", AsyncMock(return_value=lora_scanner))
await stats._process_metadata(metadata_payload)
assert stats.stats["loras"] == {}
assert not any(key.startswith("name:") for key in stats.stats["loras"])
await _finalize_usage_stats(tasks)

View File

@@ -3,6 +3,7 @@ import { app } from "../../scripts/app.js";
import { TextAreaCaretHelper } from "./textarea_caret_helper.js";
import {
getAutocompleteAppendCommaPreference,
getAutocompleteAutoFormatPreference,
getAutocompleteAcceptKeyPreference,
getPromptTagAutocompletePreference,
getTagSpaceReplacementPreference,
@@ -122,6 +123,32 @@ function formatAutocompleteInsertion(text = '') {
return getAutocompleteAppendCommaPreference() ? `${trimmed},` : `${trimmed} `;
}
function normalizeAutocompleteSegment(segment = '') {
return segment.replace(/\s+/g, ' ').trim();
}
export function formatAutocompleteTextOnBlur(text = '') {
if (typeof text !== 'string') {
return '';
}
return text
.split('\n')
.map((line) => {
if (!line.trim()) {
return '';
}
const cleanedSegments = line
.split(',')
.map(normalizeAutocompleteSegment)
.filter(Boolean);
return cleanedSegments.join(', ');
})
.join('\n');
}
function shouldAcceptAutocompleteKey(key) {
const mode = getAutocompleteAcceptKeyPreference();
@@ -481,6 +508,14 @@ class AutoComplete {
// Handle focus out to hide dropdown
this.onBlur = () => {
if (getAutocompleteAutoFormatPreference()) {
const formattedValue = formatAutocompleteTextOnBlur(this.inputElement.value);
if (formattedValue !== this.inputElement.value) {
this.inputElement.value = formattedValue;
this.inputElement.dispatchEvent(new Event('input', { bubbles: true }));
}
}
// Delay hiding to allow for clicks on dropdown items
setTimeout(() => {
this.hide();

View File

@@ -16,6 +16,9 @@ const PROMPT_TAG_AUTOCOMPLETE_DEFAULT = true;
const AUTOCOMPLETE_APPEND_COMMA_SETTING_ID = "loramanager.autocomplete_append_comma";
const AUTOCOMPLETE_APPEND_COMMA_DEFAULT = true;
const AUTOCOMPLETE_AUTO_FORMAT_SETTING_ID = "loramanager.autocomplete_auto_format";
const AUTOCOMPLETE_AUTO_FORMAT_DEFAULT = true;
const AUTOCOMPLETE_ACCEPT_KEY_SETTING_ID = "loramanager.autocomplete_accept_key";
const AUTOCOMPLETE_ACCEPT_KEY_DEFAULT = "both";
const AUTOCOMPLETE_ACCEPT_KEY_OPTION_BOTH = "Tab or Enter";
@@ -192,6 +195,32 @@ const getAutocompleteAppendCommaPreference = (() => {
};
})();
const getAutocompleteAutoFormatPreference = (() => {
let settingsUnavailableLogged = false;
return () => {
const settingManager = app?.extensionManager?.setting;
if (!settingManager || typeof settingManager.get !== "function") {
if (!settingsUnavailableLogged) {
console.warn("LoRA Manager: settings API unavailable, using default autocomplete auto format setting.");
settingsUnavailableLogged = true;
}
return AUTOCOMPLETE_AUTO_FORMAT_DEFAULT;
}
try {
const value = settingManager.get(AUTOCOMPLETE_AUTO_FORMAT_SETTING_ID);
return value ?? AUTOCOMPLETE_AUTO_FORMAT_DEFAULT;
} catch (error) {
if (!settingsUnavailableLogged) {
console.warn("LoRA Manager: unable to read autocomplete auto format setting, using default.", error);
settingsUnavailableLogged = true;
}
return AUTOCOMPLETE_AUTO_FORMAT_DEFAULT;
}
};
})();
const getAutocompleteAcceptKeyPreference = (() => {
let settingsUnavailableLogged = false;
@@ -375,6 +404,14 @@ app.registerExtension({
tooltip: "When enabled, accepted autocomplete suggestions append ', ' to the inserted text.",
category: ["LoRA Manager", "Autocomplete", "Append comma"],
},
{
id: AUTOCOMPLETE_AUTO_FORMAT_SETTING_ID,
name: "Auto format autocomplete text on blur",
type: "boolean",
defaultValue: AUTOCOMPLETE_AUTO_FORMAT_DEFAULT,
tooltip: "When enabled, leaving an autocomplete textarea removes duplicate commas and collapses unnecessary spaces.",
category: ["LoRA Manager", "Autocomplete", "Auto Format"],
},
{
id: AUTOCOMPLETE_ACCEPT_KEY_SETTING_ID,
name: "Autocomplete accept key",
@@ -505,6 +542,7 @@ export {
getWheelSensitivity,
getAutoPathCorrectionPreference,
getAutocompleteAppendCommaPreference,
getAutocompleteAutoFormatPreference,
getAutocompleteAcceptKeyPreference,
getPromptTagAutocompletePreference,
getTagSpaceReplacementPreference,