feat(lora_randomizer): implement dual seed mechanism for batch queue synchronization, fixes #773

- Add execution_seed and next_seed parameters to support deterministic randomization across batch executions
- Separate UI display generation from execution stack generation to maintain consistency in batch queues
- Update LoraService to accept optional seed parameter for reproducible randomization
- Ensure each execution with a different seed produces unique results without affecting global random state
This commit is contained in:
Will Miao
2026-01-21 00:52:08 +08:00
parent 50c012ae33
commit fd06086a05
7 changed files with 134 additions and 19 deletions

View File

@@ -74,21 +74,38 @@ class LoraRandomizerNode:
roll_mode = randomizer_config.get("roll_mode", "always")
logger.debug(f"[LoraRandomizerNode] roll_mode: {roll_mode}")
# Dual seed mechanism for batch queue synchronization
# execution_seed: seed for generating execution_stack (= previous next_seed)
# next_seed: seed for generating ui_loras (= what will be displayed after execution)
execution_seed = randomizer_config.get("execution_seed", None)
next_seed = randomizer_config.get("next_seed", None)
if roll_mode == "fixed":
ui_loras = loras
execution_loras = loras
else:
scanner = await ServiceRegistry.get_lora_scanner()
# Generate execution_loras from execution_seed (if available)
if execution_seed is not None:
# Use execution_seed to regenerate the same loras that were shown to user
execution_loras = await self._generate_random_loras_for_ui(
scanner, randomizer_config, loras, pool_config, seed=execution_seed
)
else:
# First execution: use loras input (what user sees in the widget)
execution_loras = loras
# Generate ui_loras from next_seed (for display after execution)
ui_loras = await self._generate_random_loras_for_ui(
scanner, randomizer_config, loras, pool_config
scanner, randomizer_config, loras, pool_config, seed=next_seed
)
print("pool config", pool_config)
execution_stack = self._build_execution_stack_from_input(loras)
execution_stack = self._build_execution_stack_from_input(execution_loras)
return {
"result": (execution_stack,),
"ui": {"loras": ui_loras, "last_used": loras},
"ui": {"loras": ui_loras, "last_used": execution_loras},
}
def _build_execution_stack_from_input(self, loras):
@@ -126,7 +143,7 @@ class LoraRandomizerNode:
return lora_stack
async def _generate_random_loras_for_ui(
self, scanner, randomizer_config, input_loras, pool_config=None
self, scanner, randomizer_config, input_loras, pool_config=None, seed=None
):
"""
Generate new random loras for UI display.
@@ -136,6 +153,7 @@ class LoraRandomizerNode:
randomizer_config: Dict with randomizer settings
input_loras: Current input loras (for extracting locked loras)
pool_config: Optional pool filters
seed: Optional seed for deterministic randomization
Returns:
List of LoRA dicts for UI display
@@ -182,6 +200,7 @@ class LoraRandomizerNode:
use_recommended_strength=use_recommended_strength,
recommended_strength_scale_min=recommended_strength_scale_min,
recommended_strength_scale_max=recommended_strength_scale_max,
seed=seed,
)
return result_loras

View File

@@ -231,6 +231,7 @@ class LoraService(BaseModelService):
use_recommended_strength: bool = False,
recommended_strength_scale_min: float = 0.5,
recommended_strength_scale_max: float = 1.0,
seed: Optional[int] = None,
) -> List[Dict]:
"""
Get random LoRAs with specified strength ranges.
@@ -250,6 +251,7 @@ class LoraService(BaseModelService):
use_recommended_strength: Whether to use recommended strength from usage_tips
recommended_strength_scale_min: Minimum scale factor for recommended strength
recommended_strength_scale_max: Maximum scale factor for recommended strength
seed: Optional random seed for reproducible/unique randomization per execution
Returns:
List of LoRA dicts with randomized strengths
@@ -257,6 +259,10 @@ class LoraService(BaseModelService):
import random
import json
# Use a local Random instance to avoid affecting global random state
# This ensures each execution with a different seed produces different results
rng = random.Random(seed)
def get_recommended_strength(lora_data: Dict) -> Optional[float]:
"""Parse usage_tips JSON and extract recommended strength"""
try:
@@ -286,7 +292,7 @@ class LoraService(BaseModelService):
if count_mode == "fixed":
target_count = count
else:
target_count = random.randint(count_min, count_max)
target_count = rng.randint(count_min, count_max)
# Get available loras from cache
cache = await self.scanner.get_cached_data(force_refresh=False)
@@ -320,7 +326,7 @@ class LoraService(BaseModelService):
# Random sample
selected = []
if slots_needed > 0:
selected = random.sample(available_pool, slots_needed)
selected = rng.sample(available_pool, slots_needed)
# Generate random strengths for selected LoRAs
result_loras = []
@@ -328,17 +334,17 @@ class LoraService(BaseModelService):
if use_recommended_strength:
recommended_strength = get_recommended_strength(lora)
if recommended_strength is not None:
scale = random.uniform(
scale = rng.uniform(
recommended_strength_scale_min, recommended_strength_scale_max
)
model_str = round(recommended_strength * scale, 2)
else:
model_str = round(
random.uniform(model_strength_min, model_strength_max), 2
rng.uniform(model_strength_min, model_strength_max), 2
)
else:
model_str = round(
random.uniform(model_strength_min, model_strength_max), 2
rng.uniform(model_strength_min, model_strength_max), 2
)
if use_same_clip_strength:
@@ -346,17 +352,17 @@ class LoraService(BaseModelService):
elif use_recommended_strength:
recommended_clip_strength = get_recommended_clip_strength(lora)
if recommended_clip_strength is not None:
scale = random.uniform(
scale = rng.uniform(
recommended_strength_scale_min, recommended_strength_scale_max
)
clip_str = round(recommended_clip_strength * scale, 2)
else:
clip_str = round(
random.uniform(clip_strength_min, clip_strength_max), 2
rng.uniform(clip_strength_min, clip_strength_max), 2
)
else:
clip_str = round(
random.uniform(clip_strength_min, clip_strength_max), 2
rng.uniform(clip_strength_min, clip_strength_max), 2
)
result_loras.append(

View File

@@ -54,6 +54,9 @@ const props = defineProps<{
// State management
const state = useLoraRandomizerState(props.widget)
// Symbol to track if the widget has been executed at least once
const HAS_EXECUTED = Symbol('HAS_EXECUTED')
// Track current loras from the loras widget
const currentLoras = ref<LoraEntry[]>([])
@@ -190,6 +193,31 @@ onMounted(async () => {
state.restoreFromConfig(props.widget.value as RandomizerConfig)
}
// Add beforeQueued hook to handle seed shifting for batch queue synchronization
// This ensures each execution uses the loras that were displayed before that execution
;(props.widget as any).beforeQueued = () => {
// Only process when roll_mode is 'always' (randomize on each execution)
if (state.rollMode.value === 'always') {
if ((props.widget as any)[HAS_EXECUTED]) {
// After first execution: shift seeds (previous next_seed becomes execution_seed)
state.generateNewSeed()
} else {
// First execution: just initialize next_seed (execution_seed stays null)
// This means first execution uses loras from widget input
state.initializeNextSeed()
;(props.widget as any)[HAS_EXECUTED] = true
}
// Update the widget value so the seeds are included in the serialized config
const config = state.buildConfig()
if ((props.widget as any).updateConfig) {
;(props.widget as any).updateConfig(config)
} else {
props.widget.value = config
}
}
}
// Override onExecuted to handle backend UI updates
const originalOnExecuted = (props.node as any).onExecuted?.bind(props.node)

View File

@@ -69,6 +69,8 @@ export interface RandomizerConfig {
use_recommended_strength: boolean
recommended_strength_scale_min: number
recommended_strength_scale_max: number
execution_seed?: number | null // Seed for execution_stack (previous next_seed)
next_seed?: number | null // Seed for ui_loras (current)
}
export interface LoraEntry {

View File

@@ -21,6 +21,12 @@ export function useLoraRandomizerState(widget: ComponentWidget) {
// Track last used combination (for backend roll mode)
const lastUsed = ref<LoraEntry[] | null>(null)
// Dual seed mechanism for batch queue synchronization
// execution_seed: seed for generating execution_stack (= previous next_seed)
// next_seed: seed for generating ui_loras (= what will be displayed after execution)
const executionSeed = ref<number | null>(null)
const nextSeed = ref<number | null>(null)
// Build config object from current state
const buildConfig = (): RandomizerConfig => ({
count_mode: countMode.value,
@@ -37,8 +43,24 @@ export function useLoraRandomizerState(widget: ComponentWidget) {
use_recommended_strength: useRecommendedStrength.value,
recommended_strength_scale_min: recommendedStrengthScaleMin.value,
recommended_strength_scale_max: recommendedStrengthScaleMax.value,
execution_seed: executionSeed.value,
next_seed: nextSeed.value,
})
// Shift seeds for batch queue synchronization
// Previous next_seed becomes current execution_seed, and generate a new next_seed
const generateNewSeed = () => {
executionSeed.value = nextSeed.value // Previous next becomes current execution
nextSeed.value = Math.floor(Math.random() * 2147483647)
}
// Initialize next_seed for first execution (execution_seed stays null)
const initializeNextSeed = () => {
if (nextSeed.value === null) {
nextSeed.value = Math.floor(Math.random() * 2147483647)
}
}
// Restore state from config object
const restoreFromConfig = (config: RandomizerConfig) => {
countMode.value = config.count_mode || 'range'
@@ -185,6 +207,8 @@ export function useLoraRandomizerState(widget: ComponentWidget) {
useRecommendedStrength,
recommendedStrengthScaleMin,
recommendedStrengthScaleMax,
executionSeed,
nextSeed,
// Computed
isClipStrengthDisabled,
@@ -195,5 +219,7 @@ export function useLoraRandomizerState(widget: ComponentWidget) {
restoreFromConfig,
rollLoras,
useLastUsed,
generateNewSeed,
initializeNextSeed,
}
}

View File

@@ -1451,7 +1451,7 @@ to { transform: rotate(360deg);
transform: translateY(4px);
}
.lora-randomizer-widget[data-v-3a525a5c] {
.lora-randomizer-widget[data-v-45df1002] {
padding: 6px;
background: rgba(40, 44, 52, 0.6);
border-radius: 6px;
@@ -12086,6 +12086,8 @@ function useLoraRandomizerState(widget) {
const recommendedStrengthScaleMin = ref(0.5);
const recommendedStrengthScaleMax = ref(1);
const lastUsed = ref(null);
const executionSeed = ref(null);
const nextSeed = ref(null);
const buildConfig = () => ({
count_mode: countMode.value,
count_fixed: countFixed.value,
@@ -12100,8 +12102,19 @@ function useLoraRandomizerState(widget) {
last_used: lastUsed.value,
use_recommended_strength: useRecommendedStrength.value,
recommended_strength_scale_min: recommendedStrengthScaleMin.value,
recommended_strength_scale_max: recommendedStrengthScaleMax.value
recommended_strength_scale_max: recommendedStrengthScaleMax.value,
execution_seed: executionSeed.value,
next_seed: nextSeed.value
});
const generateNewSeed = () => {
executionSeed.value = nextSeed.value;
nextSeed.value = Math.floor(Math.random() * 2147483647);
};
const initializeNextSeed = () => {
if (nextSeed.value === null) {
nextSeed.value = Math.floor(Math.random() * 2147483647);
}
};
const restoreFromConfig = (config) => {
countMode.value = config.count_mode || "range";
countFixed.value = config.count_fixed || 3;
@@ -12221,6 +12234,8 @@ function useLoraRandomizerState(widget) {
useRecommendedStrength,
recommendedStrengthScaleMin,
recommendedStrengthScaleMax,
executionSeed,
nextSeed,
// Computed
isClipStrengthDisabled,
isRecommendedStrengthEnabled,
@@ -12228,7 +12243,9 @@ function useLoraRandomizerState(widget) {
buildConfig,
restoreFromConfig,
rollLoras,
useLastUsed
useLastUsed,
generateNewSeed,
initializeNextSeed
};
}
const _hoisted_1$1 = { class: "lora-randomizer-widget" };
@@ -12241,6 +12258,7 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
setup(__props) {
const props = __props;
const state = useLoraRandomizerState(props.widget);
const HAS_EXECUTED = Symbol("HAS_EXECUTED");
const currentLoras = ref([]);
const isMounted = ref(false);
const canReuseLast = computed(() => {
@@ -12332,6 +12350,22 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
if (props.widget.value) {
state.restoreFromConfig(props.widget.value);
}
props.widget.beforeQueued = () => {
if (state.rollMode.value === "always") {
if (props.widget[HAS_EXECUTED]) {
state.generateNewSeed();
} else {
state.initializeNextSeed();
props.widget[HAS_EXECUTED] = true;
}
const config = state.buildConfig();
if (props.widget.updateConfig) {
props.widget.updateConfig(config);
} else {
props.widget.value = config;
}
}
};
const originalOnExecuted = (_b = props.node.onExecuted) == null ? void 0 : _b.bind(props.node);
props.node.onExecuted = function(output) {
var _a2;
@@ -12393,7 +12427,7 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
};
}
});
const LoraRandomizerWidget = /* @__PURE__ */ _export_sfc(_sfc_main$1, [["__scopeId", "data-v-3a525a5c"]]);
const LoraRandomizerWidget = /* @__PURE__ */ _export_sfc(_sfc_main$1, [["__scopeId", "data-v-45df1002"]]);
const _hoisted_1 = { class: "json-display-widget" };
const _hoisted_2 = {
class: "json-content",

File diff suppressed because one or more lines are too long