mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-21 21:22:11 -03:00
feat(lora_randomizer): implement dual seed mechanism for batch queue synchronization, fixes #773
- Add execution_seed and next_seed parameters to support deterministic randomization across batch executions - Separate UI display generation from execution stack generation to maintain consistency in batch queues - Update LoraService to accept optional seed parameter for reproducible randomization - Ensure each execution with a different seed produces unique results without affecting global random state
This commit is contained in:
@@ -74,21 +74,38 @@ class LoraRandomizerNode:
|
||||
roll_mode = randomizer_config.get("roll_mode", "always")
|
||||
logger.debug(f"[LoraRandomizerNode] roll_mode: {roll_mode}")
|
||||
|
||||
# Dual seed mechanism for batch queue synchronization
|
||||
# execution_seed: seed for generating execution_stack (= previous next_seed)
|
||||
# next_seed: seed for generating ui_loras (= what will be displayed after execution)
|
||||
execution_seed = randomizer_config.get("execution_seed", None)
|
||||
next_seed = randomizer_config.get("next_seed", None)
|
||||
|
||||
if roll_mode == "fixed":
|
||||
ui_loras = loras
|
||||
execution_loras = loras
|
||||
else:
|
||||
scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
# Generate execution_loras from execution_seed (if available)
|
||||
if execution_seed is not None:
|
||||
# Use execution_seed to regenerate the same loras that were shown to user
|
||||
execution_loras = await self._generate_random_loras_for_ui(
|
||||
scanner, randomizer_config, loras, pool_config, seed=execution_seed
|
||||
)
|
||||
else:
|
||||
# First execution: use loras input (what user sees in the widget)
|
||||
execution_loras = loras
|
||||
|
||||
# Generate ui_loras from next_seed (for display after execution)
|
||||
ui_loras = await self._generate_random_loras_for_ui(
|
||||
scanner, randomizer_config, loras, pool_config
|
||||
scanner, randomizer_config, loras, pool_config, seed=next_seed
|
||||
)
|
||||
|
||||
print("pool config", pool_config)
|
||||
|
||||
execution_stack = self._build_execution_stack_from_input(loras)
|
||||
execution_stack = self._build_execution_stack_from_input(execution_loras)
|
||||
|
||||
return {
|
||||
"result": (execution_stack,),
|
||||
"ui": {"loras": ui_loras, "last_used": loras},
|
||||
"ui": {"loras": ui_loras, "last_used": execution_loras},
|
||||
}
|
||||
|
||||
def _build_execution_stack_from_input(self, loras):
|
||||
@@ -126,7 +143,7 @@ class LoraRandomizerNode:
|
||||
return lora_stack
|
||||
|
||||
async def _generate_random_loras_for_ui(
|
||||
self, scanner, randomizer_config, input_loras, pool_config=None
|
||||
self, scanner, randomizer_config, input_loras, pool_config=None, seed=None
|
||||
):
|
||||
"""
|
||||
Generate new random loras for UI display.
|
||||
@@ -136,6 +153,7 @@ class LoraRandomizerNode:
|
||||
randomizer_config: Dict with randomizer settings
|
||||
input_loras: Current input loras (for extracting locked loras)
|
||||
pool_config: Optional pool filters
|
||||
seed: Optional seed for deterministic randomization
|
||||
|
||||
Returns:
|
||||
List of LoRA dicts for UI display
|
||||
@@ -182,6 +200,7 @@ class LoraRandomizerNode:
|
||||
use_recommended_strength=use_recommended_strength,
|
||||
recommended_strength_scale_min=recommended_strength_scale_min,
|
||||
recommended_strength_scale_max=recommended_strength_scale_max,
|
||||
seed=seed,
|
||||
)
|
||||
|
||||
return result_loras
|
||||
|
||||
@@ -231,6 +231,7 @@ class LoraService(BaseModelService):
|
||||
use_recommended_strength: bool = False,
|
||||
recommended_strength_scale_min: float = 0.5,
|
||||
recommended_strength_scale_max: float = 1.0,
|
||||
seed: Optional[int] = None,
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Get random LoRAs with specified strength ranges.
|
||||
@@ -250,6 +251,7 @@ class LoraService(BaseModelService):
|
||||
use_recommended_strength: Whether to use recommended strength from usage_tips
|
||||
recommended_strength_scale_min: Minimum scale factor for recommended strength
|
||||
recommended_strength_scale_max: Maximum scale factor for recommended strength
|
||||
seed: Optional random seed for reproducible/unique randomization per execution
|
||||
|
||||
Returns:
|
||||
List of LoRA dicts with randomized strengths
|
||||
@@ -257,6 +259,10 @@ class LoraService(BaseModelService):
|
||||
import random
|
||||
import json
|
||||
|
||||
# Use a local Random instance to avoid affecting global random state
|
||||
# This ensures each execution with a different seed produces different results
|
||||
rng = random.Random(seed)
|
||||
|
||||
def get_recommended_strength(lora_data: Dict) -> Optional[float]:
|
||||
"""Parse usage_tips JSON and extract recommended strength"""
|
||||
try:
|
||||
@@ -286,7 +292,7 @@ class LoraService(BaseModelService):
|
||||
if count_mode == "fixed":
|
||||
target_count = count
|
||||
else:
|
||||
target_count = random.randint(count_min, count_max)
|
||||
target_count = rng.randint(count_min, count_max)
|
||||
|
||||
# Get available loras from cache
|
||||
cache = await self.scanner.get_cached_data(force_refresh=False)
|
||||
@@ -320,7 +326,7 @@ class LoraService(BaseModelService):
|
||||
# Random sample
|
||||
selected = []
|
||||
if slots_needed > 0:
|
||||
selected = random.sample(available_pool, slots_needed)
|
||||
selected = rng.sample(available_pool, slots_needed)
|
||||
|
||||
# Generate random strengths for selected LoRAs
|
||||
result_loras = []
|
||||
@@ -328,17 +334,17 @@ class LoraService(BaseModelService):
|
||||
if use_recommended_strength:
|
||||
recommended_strength = get_recommended_strength(lora)
|
||||
if recommended_strength is not None:
|
||||
scale = random.uniform(
|
||||
scale = rng.uniform(
|
||||
recommended_strength_scale_min, recommended_strength_scale_max
|
||||
)
|
||||
model_str = round(recommended_strength * scale, 2)
|
||||
else:
|
||||
model_str = round(
|
||||
random.uniform(model_strength_min, model_strength_max), 2
|
||||
rng.uniform(model_strength_min, model_strength_max), 2
|
||||
)
|
||||
else:
|
||||
model_str = round(
|
||||
random.uniform(model_strength_min, model_strength_max), 2
|
||||
rng.uniform(model_strength_min, model_strength_max), 2
|
||||
)
|
||||
|
||||
if use_same_clip_strength:
|
||||
@@ -346,17 +352,17 @@ class LoraService(BaseModelService):
|
||||
elif use_recommended_strength:
|
||||
recommended_clip_strength = get_recommended_clip_strength(lora)
|
||||
if recommended_clip_strength is not None:
|
||||
scale = random.uniform(
|
||||
scale = rng.uniform(
|
||||
recommended_strength_scale_min, recommended_strength_scale_max
|
||||
)
|
||||
clip_str = round(recommended_clip_strength * scale, 2)
|
||||
else:
|
||||
clip_str = round(
|
||||
random.uniform(clip_strength_min, clip_strength_max), 2
|
||||
rng.uniform(clip_strength_min, clip_strength_max), 2
|
||||
)
|
||||
else:
|
||||
clip_str = round(
|
||||
random.uniform(clip_strength_min, clip_strength_max), 2
|
||||
rng.uniform(clip_strength_min, clip_strength_max), 2
|
||||
)
|
||||
|
||||
result_loras.append(
|
||||
|
||||
Reference in New Issue
Block a user