feat(testing): enhance test configuration and add Vue component tests

- Update package.json test script to run both JS and Vue tests
- Simplify LoraCyclerLM output by removing redundant lora name fallback
- Extend Vitest config to include TypeScript test files
- Add Vue testing dependencies and setup for component testing
- Implement comprehensive test suite for BatchQueueSimulator component
- Add test setup file with global mocks for ComfyUI modules
This commit is contained in:
Will Miao
2026-02-01 00:59:50 +08:00
parent ffcfe5ea3e
commit e17d6c8ebf
20 changed files with 4931 additions and 159 deletions

View File

@@ -4,7 +4,9 @@
"private": true,
"type": "module",
"scripts": {
"test": "vitest run",
"test": "npm run test:js && npm run test:vue",
"test:js": "vitest run",
"test:vue": "cd vue-widgets && npx vitest run",
"test:watch": "vitest",
"test:coverage": "node scripts/run_frontend_coverage.js"
},

View File

@@ -126,9 +126,7 @@ class LoraCyclerLM:
"current_index": [clamped_index],
"next_index": [next_index],
"total_count": [total_count],
"current_lora_name": [
current_lora.get("model_name", current_lora["file_name"])
],
"current_lora_name": [current_lora["file_name"]],
"current_lora_filename": [current_lora["file_name"]],
"next_lora_name": [next_display_name],
"next_lora_filename": [next_lora["file_name"]],

View File

@@ -6,7 +6,8 @@ export default defineConfig({
globals: true,
setupFiles: ['tests/frontend/setup.js'],
include: [
'tests/frontend/**/*.test.js'
'tests/frontend/**/*.test.js',
'tests/frontend/**/*.test.ts'
],
coverage: {
enabled: process.env.VITEST_COVERAGE === 'true',

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,13 @@
"@comfyorg/comfyui-frontend-types": "^1.35.4",
"@types/node": "^22.10.1",
"@vitejs/plugin-vue": "^5.2.3",
"@vitest/coverage-v8": "^3.2.4",
"@vue/test-utils": "^2.4.6",
"jsdom": "^26.0.0",
"typescript": "^5.7.2",
"vite": "^6.3.5",
"vite-plugin-css-injected-by-js": "^3.5.2",
"vitest": "^3.0.0",
"vue-tsc": "^2.1.10"
},
"scripts": {
@@ -24,6 +28,9 @@
"typecheck": "vue-tsc --noEmit",
"clean": "rm -rf ../web/comfyui/vue-widgets",
"rebuild": "npm run clean && npm run build",
"prepare": "npm run build"
"prepare": "npm run build",
"test": "vitest run",
"test:watch": "vitest",
"test:coverage": "vitest run --coverage"
}
}

View File

@@ -10,10 +10,18 @@
:use-custom-clip-range="state.useCustomClipRange.value"
:is-clip-strength-disabled="state.isClipStrengthDisabled.value"
:is-loading="state.isLoading.value"
:repeat-count="state.repeatCount.value"
:repeat-used="state.displayRepeatUsed.value"
:is-paused="state.isPaused.value"
:is-workflow-executing="state.isWorkflowExecuting.value"
:executing-repeat-step="state.executingRepeatStep.value"
@update:current-index="handleIndexUpdate"
@update:model-strength="state.modelStrength.value = $event"
@update:clip-strength="state.clipStrength.value = $event"
@update:use-custom-clip-range="handleUseCustomClipRangeChange"
@update:repeat-count="handleRepeatCountChange"
@toggle-pause="handleTogglePause"
@reset-index="handleResetIndex"
@refresh="handleRefresh"
/>
</div>
@@ -31,6 +39,7 @@ type CyclerWidget = ComponentWidget<CyclerConfig>
const props = defineProps<{
widget: CyclerWidget
node: { id: number; inputs?: any[]; widgets?: any[]; graph?: any }
api?: any // ComfyUI API for execution events
}>()
// State management
@@ -39,6 +48,35 @@ const state = useLoraCyclerState(props.widget)
// Symbol to track if the widget has been executed at least once
const HAS_EXECUTED = Symbol('HAS_EXECUTED')
// Execution context queue for batch queue synchronization
// In batch queue mode, all beforeQueued calls happen BEFORE any onExecuted calls,
// so we need to snapshot the state at queue time and replay it during execution
interface ExecutionContext {
isPaused: boolean
repeatUsed: number
repeatCount: number
shouldAdvanceDisplay: boolean
displayRepeatUsed: number // Value to show in UI after completion
}
const executionQueue: ExecutionContext[] = []
// Track pending executions for batch queue support (deferred UI updates)
// Uses FIFO order since executions are processed in the order they were queued
interface PendingExecution {
repeatUsed: number
repeatCount: number
shouldAdvanceDisplay: boolean
displayRepeatUsed: number // Value to show in UI after completion
output?: {
nextIndex: number
nextLoraName: string
nextLoraFilename: string
currentLoraName: string
currentLoraFilename: string
}
}
const pendingExecutions: PendingExecution[] = []
// Track last known pool config hash
const lastPoolConfigHash = ref('')
@@ -62,6 +100,9 @@ const handleIndexUpdate = async (newIndex: number) => {
state.executionIndex.value = null
state.nextIndex.value = null
// Clear execution queue since user is manually changing state
executionQueue.length = 0
state.setIndex(newIndex)
// Refresh list to update current LoRA display
@@ -100,6 +141,79 @@ const handleRefresh = async () => {
}
}
// Handle repeat count change
const handleRepeatCountChange = (newValue: number) => {
state.repeatCount.value = newValue
// Reset repeatUsed when changing repeat count
state.repeatUsed.value = 0
state.displayRepeatUsed.value = 0
}
// Clear all pending items from server queue
const clearPendingQueue = async () => {
try {
// Clear local execution queue
executionQueue.length = 0
// Clear server queue (pending items only)
await fetch('/queue', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ clear: true })
})
console.log('[LoraCyclerWidget] Cleared pending queue on pause')
} catch (error) {
console.error('[LoraCyclerWidget] Error clearing queue:', error)
}
}
// Handle pause toggle
const handleTogglePause = async () => {
const wasPaused = state.isPaused.value
state.togglePause()
// When transitioning to paused state, clear pending queue
if (!wasPaused && state.isPaused.value) {
// Reset execution state so subsequent manual queues start fresh
;(props.widget as any)[HAS_EXECUTED] = false
state.executionIndex.value = null
state.nextIndex.value = null
await clearPendingQueue()
}
}
// Handle reset index
const handleResetIndex = async () => {
// Reset execution state
;(props.widget as any)[HAS_EXECUTED] = false
state.executionIndex.value = null
state.nextIndex.value = null
// Clear execution queue since user is resetting state
executionQueue.length = 0
// Reset index and repeat state
state.resetIndex()
// Refresh list to update current LoRA display
try {
const poolConfig = getPoolConfig()
const loraList = await state.fetchCyclerList(poolConfig)
if (loraList.length > 0) {
const currentLora = loraList[0]
if (currentLora) {
state.currentLoraName.value = currentLora.file_name
state.currentLoraFilename.value = currentLora.file_name
}
}
} catch (error) {
console.error('[LoraCyclerWidget] Error resetting index:', error)
}
}
// Check for pool config changes
const checkPoolConfigChanges = async () => {
if (!isMounted.value) return
@@ -135,17 +249,66 @@ onMounted(async () => {
// Add beforeQueued hook to handle index shifting for batch queue synchronization
// This ensures each execution uses a different LoRA in the cycle
// Now with support for repeat count and pause features
//
// IMPORTANT: In batch queue mode, ALL beforeQueued calls happen BEFORE any execution.
// We push an "execution context" snapshot to a queue so that onExecuted can use the
// correct state values that were captured at queue time (not the live state).
;(props.widget as any).beforeQueued = () => {
if (state.isPaused.value) {
// When paused: use current index, don't advance, don't count toward repeat limit
// Push context indicating this execution should NOT advance display
executionQueue.push({
isPaused: true,
repeatUsed: state.repeatUsed.value,
repeatCount: state.repeatCount.value,
shouldAdvanceDisplay: false,
displayRepeatUsed: state.displayRepeatUsed.value // Keep current display value when paused
})
// CRITICAL: Clear execution_index when paused to force backend to use current_index
// This ensures paused executions use the same LoRA regardless of any
// execution_index set by previous non-paused beforeQueued calls
const pausedConfig = state.buildConfig()
pausedConfig.execution_index = null
props.widget.value = pausedConfig
return
}
if ((props.widget as any)[HAS_EXECUTED]) {
// After first execution: shift indices (previous next_index becomes execution_index)
state.generateNextIndex()
// After first execution: check repeat logic
if (state.repeatUsed.value < state.repeatCount.value) {
// Still repeating: increment repeatUsed, use same index
state.repeatUsed.value++
} else {
// Repeat complete: reset repeatUsed to 1, advance to next index
state.repeatUsed.value = 1
state.generateNextIndex()
}
} else {
// First execution: just initialize next_index (execution_index stays null)
// This means first execution uses current_index from widget
// First execution: initialize
state.repeatUsed.value = 1
state.initializeNextIndex()
;(props.widget as any)[HAS_EXECUTED] = true
}
// Determine if this execution should advance the display
// (only when repeat cycle is complete for this queued item)
const shouldAdvanceDisplay = state.repeatUsed.value >= state.repeatCount.value
// Calculate the display value to show after this execution completes
// When advancing to a new LoRA: reset to 0 (fresh start for new LoRA)
// When repeating same LoRA: show current repeat step
const displayRepeatUsed = shouldAdvanceDisplay ? 0 : state.repeatUsed.value
// Push execution context snapshot to queue
executionQueue.push({
isPaused: false,
repeatUsed: state.repeatUsed.value,
repeatCount: state.repeatCount.value,
shouldAdvanceDisplay,
displayRepeatUsed
})
// Update the widget value so the indices are included in the serialized config
props.widget.value = state.buildConfig()
}
@@ -163,35 +326,62 @@ onMounted(async () => {
}
// Override onExecuted to handle backend UI updates
// This defers the UI update until workflow completes (via API events)
const originalOnExecuted = (props.node as any).onExecuted?.bind(props.node)
;(props.node as any).onExecuted = function(output: any) {
console.log("[LoraCyclerWidget] Node executed with output:", output)
// Update state from backend response (values are wrapped in arrays)
if (output?.next_index !== undefined) {
const val = Array.isArray(output.next_index) ? output.next_index[0] : output.next_index
state.currentIndex.value = val
}
// Pop execution context from queue (FIFO order)
const context = executionQueue.shift()
// Determine if we should advance the display index
const shouldAdvanceDisplay = context
? context.shouldAdvanceDisplay
: (!state.isPaused.value && state.repeatUsed.value >= state.repeatCount.value)
// Extract output values
const nextIndex = output?.next_index !== undefined
? (Array.isArray(output.next_index) ? output.next_index[0] : output.next_index)
: state.currentIndex.value
const nextLoraName = output?.next_lora_name !== undefined
? (Array.isArray(output.next_lora_name) ? output.next_lora_name[0] : output.next_lora_name)
: ''
const nextLoraFilename = output?.next_lora_filename !== undefined
? (Array.isArray(output.next_lora_filename) ? output.next_lora_filename[0] : output.next_lora_filename)
: ''
const currentLoraName = output?.current_lora_name !== undefined
? (Array.isArray(output.current_lora_name) ? output.current_lora_name[0] : output.current_lora_name)
: ''
const currentLoraFilename = output?.current_lora_filename !== undefined
? (Array.isArray(output.current_lora_filename) ? output.current_lora_filename[0] : output.current_lora_filename)
: ''
// Update total count immediately (doesn't need to wait for workflow completion)
if (output?.total_count !== undefined) {
const val = Array.isArray(output.total_count) ? output.total_count[0] : output.total_count
state.totalCount.value = val
}
if (output?.current_lora_name !== undefined) {
const val = Array.isArray(output.current_lora_name) ? output.current_lora_name[0] : output.current_lora_name
state.currentLoraName.value = val
}
if (output?.current_lora_filename !== undefined) {
const val = Array.isArray(output.current_lora_filename) ? output.current_lora_filename[0] : output.current_lora_filename
state.currentLoraFilename.value = val
}
if (output?.next_lora_name !== undefined) {
const val = Array.isArray(output.next_lora_name) ? output.next_lora_name[0] : output.next_lora_name
state.currentLoraName.value = val
}
if (output?.next_lora_filename !== undefined) {
const val = Array.isArray(output.next_lora_filename) ? output.next_lora_filename[0] : output.next_lora_filename
state.currentLoraFilename.value = val
// Store pending update (will be applied on workflow completion)
if (context) {
pendingExecutions.push({
repeatUsed: context.repeatUsed,
repeatCount: context.repeatCount,
shouldAdvanceDisplay,
displayRepeatUsed: context.displayRepeatUsed,
output: {
nextIndex,
nextLoraName,
nextLoraFilename,
currentLoraName,
currentLoraFilename
}
})
// Update visual feedback state (don't update displayRepeatUsed yet - wait for workflow completion)
state.executingRepeatStep.value = context.repeatUsed
state.isWorkflowExecuting.value = true
}
// Call original onExecuted if it exists
@@ -200,11 +390,69 @@ onMounted(async () => {
}
}
// Set up execution tracking via API events
if (props.api) {
// Handle workflow completion events using FIFO order
// Note: The 'executing' event doesn't contain prompt_id (only node ID as string),
// so we use FIFO order instead of prompt_id matching since executions are processed
// in the order they were queued
const handleExecutionComplete = () => {
// Process the first pending execution (FIFO order)
if (pendingExecutions.length === 0) {
return
}
const pending = pendingExecutions.shift()!
// Apply UI update now that workflow is complete
// Update repeat display (deferred like index updates)
state.displayRepeatUsed.value = pending.displayRepeatUsed
if (pending.output) {
if (pending.shouldAdvanceDisplay) {
state.currentIndex.value = pending.output.nextIndex
state.currentLoraName.value = pending.output.nextLoraName
state.currentLoraFilename.value = pending.output.nextLoraFilename
} else {
// When not advancing, show current LoRA info
state.currentLoraName.value = pending.output.currentLoraName
state.currentLoraFilename.value = pending.output.currentLoraFilename
}
}
// Reset visual feedback if no more pending
if (pendingExecutions.length === 0) {
state.isWorkflowExecuting.value = false
state.executingRepeatStep.value = 0
}
}
props.api.addEventListener('execution_success', handleExecutionComplete)
props.api.addEventListener('execution_error', handleExecutionComplete)
props.api.addEventListener('execution_interrupted', handleExecutionComplete)
// Store cleanup function for API listeners
const apiCleanup = () => {
props.api.removeEventListener('execution_success', handleExecutionComplete)
props.api.removeEventListener('execution_error', handleExecutionComplete)
props.api.removeEventListener('execution_interrupted', handleExecutionComplete)
}
// Extend existing cleanup
const existingCleanup = (props.widget as any).onRemoveCleanup
;(props.widget as any).onRemoveCleanup = () => {
existingCleanup?.()
apiCleanup()
}
}
// Watch for connection changes by polling (since ComfyUI doesn't provide connection events)
const checkInterval = setInterval(checkPoolConfigChanges, 1000)
// Cleanup on unmount (handled by Vue's effect scope)
const existingCleanupForInterval = (props.widget as any).onRemoveCleanup
;(props.widget as any).onRemoveCleanup = () => {
existingCleanupForInterval?.()
clearInterval(checkInterval)
}
})

View File

@@ -6,15 +6,22 @@
<!-- Progress Display -->
<div class="setting-section progress-section">
<div class="progress-display">
<div class="progress-display" :class="{ executing: isWorkflowExecuting }">
<div class="progress-info">
<span class="progress-label">Next LoRA:</span>
<span class="progress-label">{{ isWorkflowExecuting ? 'Using LoRA:' : 'Next LoRA:' }}</span>
<span class="progress-name" :title="currentLoraFilename">{{ currentLoraName || 'None' }}</span>
</div>
<div class="progress-counter">
<span class="progress-index">{{ currentIndex }}</span>
<span class="progress-separator">/</span>
<span class="progress-total">{{ totalCount }}</span>
<!-- Repeat indicator (only shown when repeatCount > 1) -->
<div v-if="repeatCount > 1" class="repeat-badge">
<span class="repeat-badge-label">Rep</span>
<span class="repeat-badge-value">{{ repeatUsed }}/{{ repeatCount }}</span>
</div>
<button
class="refresh-button"
:disabled="isLoading"
@@ -39,10 +46,11 @@
</div>
</div>
<!-- Starting Index -->
<!-- Starting Index with Advanced Controls -->
<div class="setting-section">
<label class="setting-label">Starting Index</label>
<div class="index-input-container">
<div class="index-controls-row">
<!-- Index input -->
<input
type="number"
class="index-input"
@@ -57,6 +65,47 @@
@pointerup.stop
/>
<span class="index-hint">1 - {{ totalCount || 1 }}</span>
<!-- Repeat control -->
<span class="repeat-label">x</span>
<input
type="number"
class="repeat-input"
min="1"
max="99"
:value="repeatCount"
@input="onRepeatInput"
@blur="onRepeatBlur"
@pointerdown.stop
@pointermove.stop
@pointerup.stop
title="Repeat each LoRA this many times"
/>
<span class="repeat-hint">times</span>
<!-- Control buttons -->
<button
class="control-btn"
:class="{ active: isPaused }"
@click="$emit('toggle-pause')"
:title="isPaused ? 'Continue iteration' : 'Pause iteration'"
>
<svg v-if="isPaused" viewBox="0 0 24 24" fill="currentColor" class="control-icon">
<path d="M8 5v14l11-7z"/>
</svg>
<svg v-else viewBox="0 0 24 24" fill="currentColor" class="control-icon">
<path d="M6 4h4v16H6zm8 0h4v16h-4z"/>
</svg>
</button>
<button
class="control-btn"
@click="$emit('reset-index')"
title="Reset to index 1"
>
<svg viewBox="0 0 24 24" fill="currentColor" class="control-icon">
<path d="M12 5V1L7 6l5 5V7c3.31 0 6 2.69 6 6s-2.69 6-6 6-6-2.69-6-6H4c0 4.42 3.58 8 8 8s8-3.58 8-8-3.58-8-8-8z"/>
</svg>
</button>
</div>
</div>
@@ -123,6 +172,11 @@ const props = defineProps<{
useCustomClipRange: boolean
isClipStrengthDisabled: boolean
isLoading: boolean
repeatCount: number
repeatUsed: number
isPaused: boolean
isWorkflowExecuting: boolean
executingRepeatStep: number
}>()
const emit = defineEmits<{
@@ -130,11 +184,15 @@ const emit = defineEmits<{
'update:modelStrength': [value: number]
'update:clipStrength': [value: number]
'update:useCustomClipRange': [value: boolean]
'update:repeatCount': [value: number]
'toggle-pause': []
'reset-index': []
'refresh': []
}>()
// Temporary value for input while typing
const tempIndex = ref<string>('')
const tempRepeat = ref<string>('')
const onIndexInput = (event: Event) => {
const input = event.target as HTMLInputElement
@@ -154,6 +212,25 @@ const onIndexBlur = (event: Event) => {
}
tempIndex.value = ''
}
const onRepeatInput = (event: Event) => {
const input = event.target as HTMLInputElement
tempRepeat.value = input.value
}
const onRepeatBlur = (event: Event) => {
const input = event.target as HTMLInputElement
const value = parseInt(input.value, 10)
if (!isNaN(value)) {
const clampedValue = Math.max(1, Math.min(value, 99))
emit('update:repeatCount', clampedValue)
input.value = clampedValue.toString()
} else {
input.value = props.repeatCount.toString()
}
tempRepeat.value = ''
}
</script>
<style scoped>
@@ -203,6 +280,17 @@ const onIndexBlur = (event: Event) => {
display: flex;
justify-content: space-between;
align-items: center;
transition: border-color 0.3s ease;
}
.progress-display.executing {
border-color: rgba(66, 153, 225, 0.5);
animation: pulse 2s ease-in-out infinite;
}
@keyframes pulse {
0%, 100% { border-color: rgba(66, 153, 225, 0.3); }
50% { border-color: rgba(66, 153, 225, 0.7); }
}
.progress-info {
@@ -243,6 +331,9 @@ const onIndexBlur = (event: Event) => {
font-weight: 600;
color: rgba(66, 153, 225, 1);
font-family: 'SF Mono', 'Roboto Mono', monospace;
min-width: 4ch;
text-align: right;
font-variant-numeric: tabular-nums;
}
.progress-separator {
@@ -256,6 +347,9 @@ const onIndexBlur = (event: Event) => {
font-weight: 500;
color: rgba(226, 232, 240, 0.6);
font-family: 'SF Mono', 'Roboto Mono', monospace;
min-width: 4ch;
text-align: left;
font-variant-numeric: tabular-nums;
}
.refresh-button {
@@ -303,16 +397,43 @@ const onIndexBlur = (event: Event) => {
}
}
/* Index Input */
.index-input-container {
/* Repeat Badge */
.repeat-badge {
display: flex;
align-items: center;
gap: 4px;
margin-left: 8px;
padding: 2px 6px;
background: rgba(245, 158, 11, 0.15);
border: 1px solid rgba(245, 158, 11, 0.3);
border-radius: 4px;
}
.repeat-badge-label {
font-size: 10px;
color: rgba(253, 230, 138, 0.7);
text-transform: uppercase;
}
.repeat-badge-value {
font-size: 12px;
font-family: 'SF Mono', 'Roboto Mono', monospace;
color: rgba(253, 230, 138, 1);
min-width: 3ch;
font-variant-numeric: tabular-nums;
}
/* Index Controls Row */
.index-controls-row {
display: flex;
align-items: center;
gap: 8px;
flex-wrap: wrap;
}
.index-input {
width: 80px;
padding: 6px 10px;
width: 60px;
padding: 6px 8px;
background: rgba(26, 32, 44, 0.9);
border: 1px solid rgba(226, 232, 240, 0.2);
border-radius: 6px;
@@ -334,6 +455,75 @@ const onIndexBlur = (event: Event) => {
.index-hint {
font-size: 11px;
color: rgba(226, 232, 240, 0.4);
min-width: 7ch;
font-variant-numeric: tabular-nums;
}
/* Repeat Controls */
.repeat-label {
font-size: 13px;
color: rgba(226, 232, 240, 0.6);
margin-left: 4px;
}
.repeat-input {
width: 44px;
padding: 6px 6px;
background: rgba(26, 32, 44, 0.9);
border: 1px solid rgba(226, 232, 240, 0.2);
border-radius: 6px;
color: #e4e4e7;
font-size: 13px;
font-family: 'SF Mono', 'Roboto Mono', monospace;
text-align: center;
}
.repeat-input:focus {
outline: none;
border-color: rgba(66, 153, 225, 0.6);
}
.repeat-hint {
font-size: 11px;
color: rgba(226, 232, 240, 0.4);
}
/* Control Buttons */
.control-btn {
display: flex;
align-items: center;
justify-content: center;
width: 24px;
height: 24px;
padding: 0;
background: transparent;
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 4px;
color: rgba(226, 232, 240, 0.6);
cursor: pointer;
transition: all 0.2s;
}
.control-btn:hover {
background: rgba(66, 153, 225, 0.2);
border-color: rgba(66, 153, 225, 0.4);
color: rgba(191, 219, 254, 1);
}
.control-btn.active {
background: rgba(245, 158, 11, 0.2);
border-color: rgba(245, 158, 11, 0.5);
color: rgba(253, 230, 138, 1);
}
.control-btn.active:hover {
background: rgba(245, 158, 11, 0.3);
border-color: rgba(245, 158, 11, 0.6);
}
.control-icon {
width: 14px;
height: 14px;
}
/* Slider Container */

View File

@@ -80,6 +80,10 @@ export interface CyclerConfig {
// Dual-index mechanism for batch queue synchronization
execution_index?: number | null // Index to use for current execution
next_index?: number | null // Index for display after execution
// Advanced index control features
repeat_count: number // How many times each LoRA should repeat (default: 1)
repeat_used: number // How many times current index has been used
is_paused: boolean // Whether iteration is paused
}
// Widget config union type

View File

@@ -29,6 +29,16 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
const executionIndex = ref<number | null>(null)
const nextIndex = ref<number | null>(null)
// Advanced index control features
const repeatCount = ref(1) // How many times each LoRA should repeat
const repeatUsed = ref(0) // How many times current index has been used (internal tracking)
const displayRepeatUsed = ref(0) // For UI display, deferred updates like currentIndex
const isPaused = ref(false) // Whether iteration is paused
// Execution progress tracking (visual feedback)
const isWorkflowExecuting = ref(false) // Workflow is currently running
const executingRepeatStep = ref(0) // Which repeat step (1-based, 0 = not executing)
// Build config object from current state
const buildConfig = (): CyclerConfig => {
// Skip updating widget.value during restoration to prevent infinite loops
@@ -45,6 +55,9 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
current_lora_filename: currentLoraFilename.value,
execution_index: executionIndex.value,
next_index: nextIndex.value,
repeat_count: repeatCount.value,
repeat_used: repeatUsed.value,
is_paused: isPaused.value,
}
}
return {
@@ -59,6 +72,9 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
current_lora_filename: currentLoraFilename.value,
execution_index: executionIndex.value,
next_index: nextIndex.value,
repeat_count: repeatCount.value,
repeat_used: repeatUsed.value,
is_paused: isPaused.value,
}
}
@@ -77,6 +93,10 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
sortBy.value = config.sort_by || 'filename'
currentLoraName.value = config.current_lora_name || ''
currentLoraFilename.value = config.current_lora_filename || ''
// Advanced index control features
repeatCount.value = config.repeat_count ?? 1
repeatUsed.value = config.repeat_used ?? 0
isPaused.value = config.is_paused ?? false
// Note: execution_index and next_index are not restored from config
// as they are transient values used only during batch execution
} finally {
@@ -215,6 +235,19 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
}
}
// Reset index to 1 and clear repeat state
const resetIndex = () => {
currentIndex.value = 1
repeatUsed.value = 0
displayRepeatUsed.value = 0
// Note: isPaused is intentionally not reset - user may want to stay paused after reset
}
// Toggle pause state
const togglePause = () => {
isPaused.value = !isPaused.value
}
// Computed property to check if clip strength is disabled
const isClipStrengthDisabled = computed(() => !useCustomClipRange.value)
@@ -236,6 +269,9 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
sortBy,
currentLoraName,
currentLoraFilename,
repeatCount,
repeatUsed,
isPaused,
], () => {
widget.value = buildConfig()
}, { deep: true })
@@ -254,6 +290,12 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
isLoading,
executionIndex,
nextIndex,
repeatCount,
repeatUsed,
displayRepeatUsed,
isPaused,
isWorkflowExecuting,
executingRepeatStep,
// Computed
isClipStrengthDisabled,
@@ -267,5 +309,7 @@ export function useLoraCyclerState(widget: ComponentWidget<CyclerConfig>) {
setIndex,
generateNextIndex,
initializeNextIndex,
resetIndex,
togglePause,
}
}

View File

@@ -27,6 +27,8 @@ const AUTOCOMPLETE_TEXT_WIDGET_MAX_HEIGHT = 100
// @ts-ignore - ComfyUI external module
import { app } from '../../../scripts/app.js'
// @ts-ignore - ComfyUI external module
import { api } from '../../../scripts/api.js'
// @ts-ignore
import { getPoolConfigFromConnectedNode, getActiveLorasFromNode, updateConnectedTriggerWords, updateDownstreamLoaders } from '../../web/comfyui/utils.js'
@@ -255,7 +257,8 @@ function createLoraCyclerWidget(node) {
const vueApp = createApp(LoraCyclerWidget, {
widget,
node
node,
api
})
vueApp.use(PrimeVue, {

View File

@@ -0,0 +1,634 @@
/**
* Unit tests for useLoraCyclerState composable
*
* Tests pure state transitions and index calculations in isolation.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { useLoraCyclerState } from '@/composables/useLoraCyclerState'
import {
createMockWidget,
createMockCyclerConfig,
createMockPoolConfig
} from '../fixtures/mockConfigs'
import { setupFetchMock, resetFetchMock } from '../setup'
describe('useLoraCyclerState', () => {
beforeEach(() => {
resetFetchMock()
})
describe('Initial State', () => {
it('should initialize with default values', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
expect(state.currentIndex.value).toBe(1)
expect(state.totalCount.value).toBe(0)
expect(state.poolConfigHash.value).toBe('')
expect(state.modelStrength.value).toBe(1.0)
expect(state.clipStrength.value).toBe(1.0)
expect(state.useCustomClipRange.value).toBe(false)
expect(state.sortBy.value).toBe('filename')
expect(state.executionIndex.value).toBeNull()
expect(state.nextIndex.value).toBeNull()
expect(state.repeatCount.value).toBe(1)
expect(state.repeatUsed.value).toBe(0)
expect(state.displayRepeatUsed.value).toBe(0)
expect(state.isPaused.value).toBe(false)
})
})
describe('restoreFromConfig', () => {
it('should restore state from config object', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config = createMockCyclerConfig({
current_index: 3,
total_count: 10,
model_strength: 0.8,
clip_strength: 0.6,
use_same_clip_strength: false,
repeat_count: 2,
repeat_used: 1,
is_paused: true
})
state.restoreFromConfig(config)
expect(state.currentIndex.value).toBe(3)
expect(state.totalCount.value).toBe(10)
expect(state.modelStrength.value).toBe(0.8)
expect(state.clipStrength.value).toBe(0.6)
expect(state.useCustomClipRange.value).toBe(true) // inverted from use_same_clip_strength
expect(state.repeatCount.value).toBe(2)
expect(state.repeatUsed.value).toBe(1)
expect(state.isPaused.value).toBe(true)
})
it('should handle missing optional fields with defaults', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Minimal config
state.restoreFromConfig({
current_index: 5,
total_count: 10,
pool_config_hash: '',
model_strength: 1.0,
clip_strength: 1.0,
use_same_clip_strength: true,
sort_by: 'filename',
current_lora_name: '',
current_lora_filename: '',
repeat_count: 1,
repeat_used: 0,
is_paused: false
})
expect(state.currentIndex.value).toBe(5)
expect(state.repeatCount.value).toBe(1)
expect(state.isPaused.value).toBe(false)
})
it('should not restore execution_index and next_index (transient values)', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set execution indices
state.executionIndex.value = 2
state.nextIndex.value = 3
// Restore from config (these fields in config should be ignored)
state.restoreFromConfig(createMockCyclerConfig({
execution_index: 5,
next_index: 6
}))
// Execution indices should remain unchanged
expect(state.executionIndex.value).toBe(2)
expect(state.nextIndex.value).toBe(3)
})
})
describe('buildConfig', () => {
it('should build config object from current state', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 3
state.totalCount.value = 10
state.modelStrength.value = 0.8
state.repeatCount.value = 2
state.repeatUsed.value = 1
state.isPaused.value = true
const config = state.buildConfig()
expect(config.current_index).toBe(3)
expect(config.total_count).toBe(10)
expect(config.model_strength).toBe(0.8)
expect(config.repeat_count).toBe(2)
expect(config.repeat_used).toBe(1)
expect(config.is_paused).toBe(true)
})
})
describe('setIndex', () => {
it('should set index within valid range', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 10
state.setIndex(5)
expect(state.currentIndex.value).toBe(5)
state.setIndex(1)
expect(state.currentIndex.value).toBe(1)
state.setIndex(10)
expect(state.currentIndex.value).toBe(10)
})
it('should not set index outside valid range', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 10
state.currentIndex.value = 5
state.setIndex(0)
expect(state.currentIndex.value).toBe(5) // unchanged
state.setIndex(11)
expect(state.currentIndex.value).toBe(5) // unchanged
state.setIndex(-1)
expect(state.currentIndex.value).toBe(5) // unchanged
})
})
describe('resetIndex', () => {
it('should reset index to 1 and clear repeatUsed and displayRepeatUsed', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 5
state.repeatUsed.value = 2
state.displayRepeatUsed.value = 2
state.isPaused.value = true
state.resetIndex()
expect(state.currentIndex.value).toBe(1)
expect(state.repeatUsed.value).toBe(0)
expect(state.displayRepeatUsed.value).toBe(0)
expect(state.isPaused.value).toBe(true) // isPaused should NOT be reset
})
})
describe('togglePause', () => {
it('should toggle pause state', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
expect(state.isPaused.value).toBe(false)
state.togglePause()
expect(state.isPaused.value).toBe(true)
state.togglePause()
expect(state.isPaused.value).toBe(false)
})
})
describe('generateNextIndex', () => {
it('should shift indices correctly', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 1
state.nextIndex.value = 2
// First call: executionIndex becomes 2 (previous nextIndex), nextIndex becomes 3
state.generateNextIndex()
expect(state.executionIndex.value).toBe(2)
expect(state.nextIndex.value).toBe(3)
// Second call: executionIndex becomes 3, nextIndex becomes 4
state.generateNextIndex()
expect(state.executionIndex.value).toBe(3)
expect(state.nextIndex.value).toBe(4)
})
it('should wrap index from totalCount to 1', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.nextIndex.value = 5 // At the last index
state.generateNextIndex()
expect(state.executionIndex.value).toBe(5)
expect(state.nextIndex.value).toBe(1) // Wrapped to 1
})
it('should use currentIndex when nextIndex is null', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 3
state.nextIndex.value = null
state.generateNextIndex()
// executionIndex becomes previous nextIndex (null)
expect(state.executionIndex.value).toBeNull()
// nextIndex is calculated from currentIndex (3) -> 4
expect(state.nextIndex.value).toBe(4)
})
})
describe('initializeNextIndex', () => {
it('should initialize nextIndex to currentIndex + 1 when null', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 1
state.nextIndex.value = null
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(2)
})
it('should wrap nextIndex when currentIndex is at totalCount', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 5
state.nextIndex.value = null
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(1) // Wrapped
})
it('should not change nextIndex if already set', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 1
state.nextIndex.value = 4
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(4) // Unchanged
})
})
describe('Index Wrapping Edge Cases', () => {
it('should handle single item pool', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 1
state.currentIndex.value = 1
state.nextIndex.value = null
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(1) // Wraps back to 1
})
it('should handle zero total count gracefully', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 0
state.currentIndex.value = 1
state.nextIndex.value = null
state.initializeNextIndex()
// Should still calculate, even if totalCount is 0
expect(state.nextIndex.value).toBe(2) // No wrapping since totalCount <= 0
})
})
describe('hashPoolConfig', () => {
it('should generate consistent hash for same config', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config1 = createMockPoolConfig()
const config2 = createMockPoolConfig()
const hash1 = state.hashPoolConfig(config1)
const hash2 = state.hashPoolConfig(config2)
expect(hash1).toBe(hash2)
})
it('should generate different hash for different configs', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config1 = createMockPoolConfig({
filters: {
baseModels: ['SD 1.5'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
})
const config2 = createMockPoolConfig({
filters: {
baseModels: ['SDXL'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
})
const hash1 = state.hashPoolConfig(config1)
const hash2 = state.hashPoolConfig(config2)
expect(hash1).not.toBe(hash2)
})
it('should return empty string for null config', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
expect(state.hashPoolConfig(null)).toBe('')
})
it('should return empty string for config without filters', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config = { version: 1, preview: { matchCount: 0, lastUpdated: 0 } } as any
expect(state.hashPoolConfig(config)).toBe('')
})
})
describe('Clip Strength Synchronization', () => {
it('should sync clipStrength with modelStrength when useCustomClipRange is false', async () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = false
state.modelStrength.value = 0.5
// Wait for Vue reactivity
await vi.waitFor(() => {
expect(state.clipStrength.value).toBe(0.5)
})
})
it('should not sync clipStrength when useCustomClipRange is true', async () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = true
state.clipStrength.value = 0.7
state.modelStrength.value = 0.5
// clipStrength should remain unchanged
await vi.waitFor(() => {
expect(state.clipStrength.value).toBe(0.7)
})
})
})
describe('Widget Value Synchronization', () => {
it('should update widget.value when state changes', async () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 3
state.repeatCount.value = 2
// Wait for Vue reactivity
await vi.waitFor(() => {
expect(widget.value?.current_index).toBe(3)
expect(widget.value?.repeat_count).toBe(2)
})
})
})
describe('Repeat Logic State', () => {
it('should track repeatUsed correctly', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.repeatCount.value = 3
expect(state.repeatUsed.value).toBe(0)
state.repeatUsed.value = 1
expect(state.repeatUsed.value).toBe(1)
state.repeatUsed.value = 3
expect(state.repeatUsed.value).toBe(3)
})
})
describe('fetchCyclerList', () => {
it('should call API and return lora list', async () => {
const mockLoras = [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' }
]
setupFetchMock({ success: true, loras: mockLoras })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const result = await state.fetchCyclerList(null)
expect(result).toEqual(mockLoras)
expect(state.isLoading.value).toBe(false)
})
it('should include pool config filters in request', async () => {
const mockFetch = setupFetchMock({ success: true, loras: [] })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const poolConfig = createMockPoolConfig()
await state.fetchCyclerList(poolConfig)
expect(mockFetch).toHaveBeenCalledWith(
'/api/lm/loras/cycler-list',
expect.objectContaining({
method: 'POST',
body: expect.stringContaining('pool_config')
})
)
})
it('should set isLoading during fetch', async () => {
let resolvePromise: (value: unknown) => void
const pendingPromise = new Promise(resolve => {
resolvePromise = resolve
})
// Use mockFetch from setup instead of overriding global
const { mockFetch } = await import('../setup')
mockFetch.mockReset()
mockFetch.mockReturnValue(pendingPromise)
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const fetchPromise = state.fetchCyclerList(null)
expect(state.isLoading.value).toBe(true)
// Resolve the fetch
resolvePromise!({
ok: true,
json: () => Promise.resolve({ success: true, loras: [] })
})
await fetchPromise
expect(state.isLoading.value).toBe(false)
})
})
describe('refreshList', () => {
it('should update totalCount from API response', async () => {
const mockLoras = [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' },
{ file_name: 'lora3.safetensors', model_name: 'LoRA 3' }
]
// Reset and setup fresh mock
resetFetchMock()
setupFetchMock({ success: true, loras: mockLoras })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
await state.refreshList(null)
expect(state.totalCount.value).toBe(3)
})
it('should reset index to 1 when pool config hash changes', async () => {
resetFetchMock()
setupFetchMock({ success: true, loras: [{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' }] })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set initial state
state.currentIndex.value = 5
state.poolConfigHash.value = 'old-hash'
// Refresh with new config (different hash)
const newConfig = createMockPoolConfig({
filters: {
baseModels: ['SDXL'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
})
await state.refreshList(newConfig)
expect(state.currentIndex.value).toBe(1)
})
it('should clamp index when totalCount decreases', async () => {
// Setup mock first, then create state
resetFetchMock()
setupFetchMock({
success: true,
loras: [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' },
{ file_name: 'lora3.safetensors', model_name: 'LoRA 3' }
]
})
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set initial state with high index
state.currentIndex.value = 10
state.totalCount.value = 10
await state.refreshList(null)
expect(state.totalCount.value).toBe(3)
expect(state.currentIndex.value).toBe(3) // Clamped to max
})
it('should update currentLoraName and currentLoraFilename', async () => {
resetFetchMock()
setupFetchMock({
success: true,
loras: [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' }
]
})
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set totalCount first so setIndex works, then set index
state.totalCount.value = 2
state.currentIndex.value = 2
await state.refreshList(null)
expect(state.currentLoraFilename.value).toBe('lora2.safetensors')
})
it('should handle empty list gracefully', async () => {
resetFetchMock()
setupFetchMock({ success: true, loras: [] })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 5
state.totalCount.value = 5
await state.refreshList(null)
expect(state.totalCount.value).toBe(0)
// When totalCount is 0, Math.max(1, 0) = 1, but if currentIndex > totalCount it gets clamped to max(1, totalCount)
// Looking at the actual code: Math.max(1, totalCount) where totalCount=0 gives 1
expect(state.currentIndex.value).toBe(1)
expect(state.currentLoraName.value).toBe('')
expect(state.currentLoraFilename.value).toBe('')
})
})
describe('isClipStrengthDisabled computed', () => {
it('should return true when useCustomClipRange is false', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = false
expect(state.isClipStrengthDisabled.value).toBe(true)
})
it('should return false when useCustomClipRange is true', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = true
expect(state.isClipStrengthDisabled.value).toBe(false)
})
})
})

View File

@@ -0,0 +1,175 @@
/**
* Test fixtures for LoRA Cycler testing
*/
import type { CyclerConfig, LoraPoolConfig } from '@/composables/types'
import type { CyclerLoraItem } from '@/composables/useLoraCyclerState'
/**
* Creates a default CyclerConfig for testing
*/
export function createMockCyclerConfig(overrides: Partial<CyclerConfig> = {}): CyclerConfig {
return {
current_index: 1,
total_count: 5,
pool_config_hash: '',
model_strength: 1.0,
clip_strength: 1.0,
use_same_clip_strength: true,
sort_by: 'filename',
current_lora_name: 'lora1.safetensors',
current_lora_filename: 'lora1.safetensors',
execution_index: null,
next_index: null,
repeat_count: 1,
repeat_used: 0,
is_paused: false,
...overrides
}
}
/**
* Creates a mock LoraPoolConfig for testing
*/
export function createMockPoolConfig(overrides: Partial<LoraPoolConfig> = {}): LoraPoolConfig {
return {
version: 1,
filters: {
baseModels: ['SD 1.5'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: {
noCreditRequired: false,
allowSelling: false
}
},
preview: { matchCount: 10, lastUpdated: Date.now() },
...overrides
}
}
/**
* Creates a list of mock LoRA items for testing
*/
export function createMockLoraList(count: number = 5): CyclerLoraItem[] {
return Array.from({ length: count }, (_, i) => ({
file_name: `lora${i + 1}.safetensors`,
model_name: `LoRA Model ${i + 1}`
}))
}
/**
* Creates a mock widget object for testing useLoraCyclerState
*/
export function createMockWidget(initialValue?: CyclerConfig) {
return {
value: initialValue,
callback: undefined as ((v: CyclerConfig) => void) | undefined
}
}
/**
* Creates a mock node object for testing component integration
*/
export function createMockNode(options: {
id?: number
poolConfig?: LoraPoolConfig | null
} = {}) {
const { id = 1, poolConfig = null } = options
return {
id,
inputs: [],
widgets: [],
graph: null,
getPoolConfig: () => poolConfig,
onExecuted: undefined as ((output: unknown) => void) | undefined
}
}
/**
* Creates mock execution output from the backend
*/
export function createMockExecutionOutput(options: {
nextIndex?: number
totalCount?: number
nextLoraName?: string
nextLoraFilename?: string
currentLoraName?: string
currentLoraFilename?: string
} = {}) {
const {
nextIndex = 2,
totalCount = 5,
nextLoraName = 'lora2.safetensors',
nextLoraFilename = 'lora2.safetensors',
currentLoraName = 'lora1.safetensors',
currentLoraFilename = 'lora1.safetensors'
} = options
return {
next_index: [nextIndex],
total_count: [totalCount],
next_lora_name: [nextLoraName],
next_lora_filename: [nextLoraFilename],
current_lora_name: [currentLoraName],
current_lora_filename: [currentLoraFilename]
}
}
/**
* Sample LoRA lists for specific test scenarios
*/
export const SAMPLE_LORA_LISTS = {
// 3 LoRAs for simple cycling tests
small: createMockLoraList(3),
// 5 LoRAs for standard tests
medium: createMockLoraList(5),
// 10 LoRAs for larger tests
large: createMockLoraList(10),
// Empty list for edge case testing
empty: [] as CyclerLoraItem[],
// Single LoRA for edge case testing
single: createMockLoraList(1)
}
/**
* Sample pool configs for testing
*/
export const SAMPLE_POOL_CONFIGS = {
// Default SD 1.5 filter
sd15: createMockPoolConfig({
filters: {
baseModels: ['SD 1.5'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
}),
// SDXL filter
sdxl: createMockPoolConfig({
filters: {
baseModels: ['SDXL'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
}),
// Filter with tags
withTags: createMockPoolConfig({
filters: {
baseModels: ['SD 1.5'],
tags: { include: ['anime', 'style'], exclude: ['realistic'] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
}),
// Empty/null config
empty: null as LoraPoolConfig | null
}

View File

@@ -0,0 +1,910 @@
/**
* Integration tests for batch queue execution scenarios
*
* These tests simulate ComfyUI's execution modes to verify correct LoRA cycling behavior.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { useLoraCyclerState } from '@/composables/useLoraCyclerState'
import type { CyclerConfig } from '@/composables/types'
import {
createMockWidget,
createMockCyclerConfig,
createMockLoraList,
createMockPoolConfig
} from '../fixtures/mockConfigs'
import { setupFetchMock, resetFetchMock } from '../setup'
import { BatchQueueSimulator, IndexTracker } from '../utils/BatchQueueSimulator'
/**
* Creates a test harness that mimics the LoraCyclerWidget's behavior
*/
function createTestHarness(options: {
totalCount?: number
initialIndex?: number
repeatCount?: number
isPaused?: boolean
} = {}) {
const {
totalCount = 5,
initialIndex = 1,
repeatCount = 1,
isPaused = false
} = options
const widget = createMockWidget() as any
const state = useLoraCyclerState(widget)
// Initialize state
state.totalCount.value = totalCount
state.currentIndex.value = initialIndex
state.repeatCount.value = repeatCount
state.isPaused.value = isPaused
// Track if first execution
const HAS_EXECUTED = Symbol('HAS_EXECUTED')
widget[HAS_EXECUTED] = false
// Execution queue for batch synchronization
interface ExecutionContext {
isPaused: boolean
repeatUsed: number
repeatCount: number
shouldAdvanceDisplay: boolean
displayRepeatUsed: number // Value to show in UI after completion
}
const executionQueue: ExecutionContext[] = []
// beforeQueued hook (mirrors LoraCyclerWidget.vue logic)
widget.beforeQueued = () => {
if (state.isPaused.value) {
executionQueue.push({
isPaused: true,
repeatUsed: state.repeatUsed.value,
repeatCount: state.repeatCount.value,
shouldAdvanceDisplay: false,
displayRepeatUsed: state.displayRepeatUsed.value // Keep current display value when paused
})
// CRITICAL: Clear execution_index when paused to force backend to use current_index
const pausedConfig = state.buildConfig()
pausedConfig.execution_index = null
widget.value = pausedConfig
return
}
if (widget[HAS_EXECUTED]) {
if (state.repeatUsed.value < state.repeatCount.value) {
state.repeatUsed.value++
} else {
state.repeatUsed.value = 1
state.generateNextIndex()
}
} else {
state.repeatUsed.value = 1
state.initializeNextIndex()
widget[HAS_EXECUTED] = true
}
const shouldAdvanceDisplay = state.repeatUsed.value >= state.repeatCount.value
// Calculate the display value to show after this execution completes
// When advancing to a new LoRA: reset to 0 (fresh start for new LoRA)
// When repeating same LoRA: show current repeat step
const displayRepeatUsed = shouldAdvanceDisplay ? 0 : state.repeatUsed.value
executionQueue.push({
isPaused: false,
repeatUsed: state.repeatUsed.value,
repeatCount: state.repeatCount.value,
shouldAdvanceDisplay,
displayRepeatUsed
})
widget.value = state.buildConfig()
}
// Mock node with onExecuted
const node = {
id: 1,
onExecuted: (output: any) => {
const context = executionQueue.shift()
const shouldAdvanceDisplay = context
? context.shouldAdvanceDisplay
: (!state.isPaused.value && state.repeatUsed.value >= state.repeatCount.value)
// Update displayRepeatUsed (deferred like index updates)
if (context) {
state.displayRepeatUsed.value = context.displayRepeatUsed
}
if (shouldAdvanceDisplay && output?.next_index !== undefined) {
const val = Array.isArray(output.next_index) ? output.next_index[0] : output.next_index
state.currentIndex.value = val
}
if (output?.total_count !== undefined) {
const val = Array.isArray(output.total_count) ? output.total_count[0] : output.total_count
state.totalCount.value = val
}
if (shouldAdvanceDisplay) {
if (output?.next_lora_name !== undefined) {
const val = Array.isArray(output.next_lora_name) ? output.next_lora_name[0] : output.next_lora_name
state.currentLoraName.value = val
}
if (output?.next_lora_filename !== undefined) {
const val = Array.isArray(output.next_lora_filename) ? output.next_lora_filename[0] : output.next_lora_filename
state.currentLoraFilename.value = val
}
}
}
}
// Reset execution state (mimics manual index change)
const resetExecutionState = () => {
widget[HAS_EXECUTED] = false
state.executionIndex.value = null
state.nextIndex.value = null
executionQueue.length = 0
}
return {
widget,
state,
node,
executionQueue,
resetExecutionState,
getConfig: () => state.buildConfig(),
HAS_EXECUTED
}
}
describe('Batch Queue Integration Tests', () => {
beforeEach(() => {
resetFetchMock()
})
describe('Basic Cycling', () => {
it('should cycle through N LoRAs in batch of N (batch queue mode)', async () => {
const harness = createTestHarness({ totalCount: 3 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Simulate batch queue of 3 prompts
await simulator.runBatchQueue(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// After cycling through all 3, currentIndex should wrap back to 1
// First execution: index 1, next becomes 2
// Second execution: index 2, next becomes 3
// Third execution: index 3, next becomes 1
expect(harness.state.currentIndex.value).toBe(1)
})
it('should cycle through N LoRAs in batch of N (sequential mode)', async () => {
const harness = createTestHarness({ totalCount: 3 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Simulate sequential execution of 3 prompts
await simulator.runSequential(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Same result as batch mode
expect(harness.state.currentIndex.value).toBe(1)
})
it('should handle partial cycle (batch of 2 in pool of 5)', async () => {
const harness = createTestHarness({ totalCount: 5, initialIndex: 1 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// After 2 executions starting from 1: 1 -> 2 -> 3
expect(harness.state.currentIndex.value).toBe(3)
})
})
describe('Repeat Functionality', () => {
it('should repeat each LoRA repeatCount times', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// With repeatCount=2, need 6 executions to cycle through 3 LoRAs
await simulator.runBatchQueue(
6,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Should have cycled back to beginning
expect(harness.state.currentIndex.value).toBe(1)
})
it('should track repeatUsed correctly during batch', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 3 })
// First beforeQueued: repeatUsed = 1
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(1)
// Second beforeQueued: repeatUsed = 2
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(2)
// Third beforeQueued: repeatUsed = 3 (will advance on next)
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(3)
// Fourth beforeQueued: repeatUsed resets to 1, index advances
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(1)
expect(harness.state.nextIndex.value).toBe(3) // Advanced from 2 to 3
})
it('should not advance display until repeat cycle completes', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// First execution: repeatUsed=1 < repeatCount=2, shouldAdvanceDisplay=false
// Second execution: repeatUsed=2 >= repeatCount=2, shouldAdvanceDisplay=true
const indexHistory: number[] = []
// Override onExecuted to track index changes
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
indexHistory.push(harness.state.currentIndex.value)
}
await simulator.runBatchQueue(
4,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Index should only change on 2nd and 4th execution
// Starting at 1: stay 1, advance to 2, stay 2, advance to 3
expect(indexHistory).toEqual([1, 2, 2, 3])
})
it('should defer displayRepeatUsed updates until workflow completion', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 3 })
// Initial state
expect(harness.state.displayRepeatUsed.value).toBe(0)
// Queue 3 executions in batch mode (all beforeQueued before any onExecuted)
harness.widget.beforeQueued() // repeatUsed = 1
harness.widget.beforeQueued() // repeatUsed = 2
harness.widget.beforeQueued() // repeatUsed = 3
// displayRepeatUsed should NOT have changed yet (still 0)
// because no onExecuted has been called
expect(harness.state.displayRepeatUsed.value).toBe(0)
// Now simulate workflow completions
harness.node.onExecuted({ next_index: 1 })
expect(harness.state.displayRepeatUsed.value).toBe(1)
harness.node.onExecuted({ next_index: 1 })
expect(harness.state.displayRepeatUsed.value).toBe(2)
harness.node.onExecuted({ next_index: 2 })
// After completing repeat cycle, displayRepeatUsed resets to 0
expect(harness.state.displayRepeatUsed.value).toBe(0)
})
it('should reset displayRepeatUsed to 0 when advancing to new LoRA', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
const displayHistory: number[] = []
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
displayHistory.push(harness.state.displayRepeatUsed.value)
}
// Run 4 executions: 2 repeats of LoRA 1, 2 repeats of LoRA 2
await simulator.runBatchQueue(
4,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// displayRepeatUsed should show:
// 1st exec: 1 (first repeat of LoRA 1)
// 2nd exec: 0 (complete, reset for next LoRA)
// 3rd exec: 1 (first repeat of LoRA 2)
// 4th exec: 0 (complete, reset for next LoRA)
expect(displayHistory).toEqual([1, 0, 1, 0])
})
it('should show current repeat step when not advancing', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 4 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
const displayHistory: number[] = []
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
displayHistory.push(harness.state.displayRepeatUsed.value)
}
// Run 4 executions: all 4 repeats of the same LoRA
await simulator.runBatchQueue(
4,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// displayRepeatUsed should show:
// 1st exec: 1 (repeat 1/4, not advancing)
// 2nd exec: 2 (repeat 2/4, not advancing)
// 3rd exec: 3 (repeat 3/4, not advancing)
// 4th exec: 0 (repeat 4/4, complete, reset for next LoRA)
expect(displayHistory).toEqual([1, 2, 3, 0])
})
})
describe('Pause Functionality', () => {
it('should maintain index when paused', async () => {
const harness = createTestHarness({ totalCount: 5, isPaused: true })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
await simulator.runBatchQueue(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Index should not advance when paused
expect(harness.state.currentIndex.value).toBe(1)
})
it('should not count paused executions toward repeat limit', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 2 })
// Run 2 executions while paused
harness.state.isPaused.value = true
harness.widget.beforeQueued()
harness.widget.beforeQueued()
// repeatUsed should still be 0 (paused executions don't count)
expect(harness.state.repeatUsed.value).toBe(0)
// Unpause and run
harness.state.isPaused.value = false
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(1)
})
it('should preserve displayRepeatUsed when paused', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 3 })
// Run one execution to set displayRepeatUsed
harness.widget.beforeQueued()
harness.node.onExecuted({ next_index: 1 })
expect(harness.state.displayRepeatUsed.value).toBe(1)
// Pause
harness.state.isPaused.value = true
// Queue and execute while paused
harness.widget.beforeQueued()
harness.node.onExecuted({ next_index: 1 })
// displayRepeatUsed should remain at 1 (paused executions don't change it)
expect(harness.state.displayRepeatUsed.value).toBe(1)
// Queue another paused execution
harness.widget.beforeQueued()
harness.node.onExecuted({ next_index: 1 })
// Still should be 1
expect(harness.state.displayRepeatUsed.value).toBe(1)
})
it('should use same LoRA when pause is toggled mid-batch', async () => {
// This tests the critical bug scenario:
// 1. User queues multiple prompts (not paused)
// 2. All beforeQueued calls complete, each advancing execution_index
// 3. User clicks pause
// 4. onExecuted starts firing - paused executions should use current_index, not execution_index
const harness = createTestHarness({ totalCount: 5 })
// Queue first prompt (not paused) - this sets up execution_index
harness.widget.beforeQueued()
const config1 = harness.getConfig()
expect(config1.execution_index).toBeNull() // First execution uses current_index
// User clicks pause mid-batch
harness.state.isPaused.value = true
// Queue subsequent prompts while paused
harness.widget.beforeQueued()
const config2 = harness.getConfig()
// CRITICAL: execution_index should be null when paused to force backend to use current_index
expect(config2.execution_index).toBeNull()
harness.widget.beforeQueued()
const config3 = harness.getConfig()
expect(config3.execution_index).toBeNull()
// Verify execution queue has correct context
expect(harness.executionQueue.length).toBe(3)
expect(harness.executionQueue[0].isPaused).toBe(false)
expect(harness.executionQueue[1].isPaused).toBe(true)
expect(harness.executionQueue[2].isPaused).toBe(true)
})
it('should have null execution_index in widget.value when paused even after non-paused queues', async () => {
// More detailed test for the execution_index clearing behavior
// This tests that widget.value (what backend receives) has null execution_index
const harness = createTestHarness({ totalCount: 5 })
// Queue 3 prompts while not paused
harness.widget.beforeQueued()
harness.widget.beforeQueued()
harness.widget.beforeQueued()
// Verify execution_index was set by non-paused queues in widget.value
expect(harness.widget.value.execution_index).not.toBeNull()
// User pauses
harness.state.isPaused.value = true
// Queue while paused - should clear execution_index in widget.value
// This is the value that gets sent to the backend
harness.widget.beforeQueued()
expect(harness.widget.value.execution_index).toBeNull()
// State's executionIndex may still have the old value (that's fine)
// What matters is widget.value which is what the backend uses
})
it('should clear server queue when pausing mid-batch', async () => {
// This tests the fix for the batch queue pause bug:
// When user presses pause during batch execution, pending queue items should be cleared
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Mock fetch to track calls to /queue
const fetchCalls: { url: string; body: any }[] = []
const originalFetch = global.fetch
global.fetch = vi.fn().mockImplementation((url: string, options?: RequestInit) => {
if (url === '/queue') {
fetchCalls.push({ url, body: options?.body ? JSON.parse(options.body as string) : null })
return Promise.resolve({ ok: true, json: () => Promise.resolve({}) })
}
// Call through for other URLs (like cycler-list API)
return originalFetch(url, options)
}) as any
try {
// Queue 4 prompts while not paused
harness.widget.beforeQueued()
harness.widget.beforeQueued()
harness.widget.beforeQueued()
harness.widget.beforeQueued()
// Verify 4 contexts were queued
expect(harness.executionQueue.length).toBe(4)
// Simulate pressing pause (this is what handleTogglePause does in the component)
const wasPaused = harness.state.isPaused.value
harness.state.togglePause()
// When transitioning to paused, the component should:
// 1. Reset execution state
// 2. Clear execution queue
// 3. Call fetch('/queue', { clear: true })
if (!wasPaused && harness.state.isPaused.value) {
// Reset execution state (mimics component behavior)
harness.resetExecutionState()
// Clear server queue (mimics component behavior)
await fetch('/queue', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ clear: true })
})
}
// Verify execution queue was cleared
expect(harness.executionQueue.length).toBe(0)
// Verify fetch was called with correct parameters
expect(fetchCalls.length).toBe(1)
expect(fetchCalls[0].url).toBe('/queue')
expect(fetchCalls[0].body).toEqual({ clear: true })
} finally {
global.fetch = originalFetch
}
})
it('should resume cycling after unpause', async () => {
const harness = createTestHarness({ totalCount: 3, initialIndex: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Execute once while not paused
await simulator.runSingle(
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Pause
harness.state.isPaused.value = true
// Execute twice while paused
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Unpause and execute
harness.state.isPaused.value = false
await simulator.runSingle(
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Should continue from where it left off (index 3 -> 1)
expect(harness.state.currentIndex.value).toBe(1)
})
})
describe('Manual Index Change', () => {
it('should reset execution state on manual index change', async () => {
const harness = createTestHarness({ totalCount: 5 })
// Execute a few times
harness.widget.beforeQueued()
harness.widget.beforeQueued()
expect(harness.widget[harness.HAS_EXECUTED]).toBe(true)
expect(harness.executionQueue.length).toBe(2)
// User manually changes index (mimics handleIndexUpdate)
harness.resetExecutionState()
harness.state.setIndex(4)
expect(harness.widget[harness.HAS_EXECUTED]).toBe(false)
expect(harness.state.executionIndex.value).toBeNull()
expect(harness.state.nextIndex.value).toBeNull()
expect(harness.executionQueue.length).toBe(0)
})
it('should start fresh cycle from manual index', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Execute twice starting from 1
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.state.currentIndex.value).toBe(3)
// User manually sets index to 1
harness.resetExecutionState()
harness.state.setIndex(1)
// Execute again - should start fresh from 1
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.state.currentIndex.value).toBe(3)
})
})
describe('Execution Queue Mismatch', () => {
it('should handle interrupted execution (queue > executed)', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Queue 5 but only execute 2 (simulates cancel)
await simulator.runInterrupted(
5, // queued
2, // executed
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// 3 contexts remain in queue
expect(harness.executionQueue.length).toBe(3)
// Index should reflect only the 2 executions that completed
expect(harness.state.currentIndex.value).toBe(3)
})
it('should recover from mismatch on next manual index change', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Create mismatch
await simulator.runInterrupted(
5,
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.executionQueue.length).toBe(3)
// Manual index change clears queue
harness.resetExecutionState()
harness.state.setIndex(1)
expect(harness.executionQueue.length).toBe(0)
// Can execute normally again
await simulator.runSingle(
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.state.currentIndex.value).toBe(2)
})
})
describe('Edge Cases', () => {
it('should handle single item pool', async () => {
const harness = createTestHarness({ totalCount: 1 })
const simulator = new BatchQueueSimulator({ totalCount: 1 })
await simulator.runBatchQueue(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Should always stay at index 1
expect(harness.state.currentIndex.value).toBe(1)
})
it('should handle empty pool gracefully', async () => {
const harness = createTestHarness({ totalCount: 0 })
// beforeQueued should still work without errors
expect(() => harness.widget.beforeQueued()).not.toThrow()
})
it('should handle rapid sequential executions', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Run 20 sequential executions
await simulator.runSequential(
20,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// 20 % 5 = 0, so should wrap back to 1
// But first execution uses index 1, so after 20 executions we're at 21 % 5 = 1
expect(harness.state.currentIndex.value).toBe(1)
})
it('should preserve state consistency across many cycles', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Run 100 executions in batches
for (let batch = 0; batch < 10; batch++) {
await simulator.runBatchQueue(
10,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
}
// Verify state is still valid
expect(harness.state.currentIndex.value).toBeGreaterThanOrEqual(1)
expect(harness.state.currentIndex.value).toBeLessThanOrEqual(3)
expect(harness.state.repeatUsed.value).toBeGreaterThanOrEqual(1)
expect(harness.state.repeatUsed.value).toBeLessThanOrEqual(2)
expect(harness.executionQueue.length).toBe(0)
})
})
describe('Invariant Assertions', () => {
it('should always have valid index (1 <= currentIndex <= totalCount)', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
const checkInvariant = () => {
const { currentIndex, totalCount } = harness.state
if (totalCount.value > 0) {
expect(currentIndex.value).toBeGreaterThanOrEqual(1)
expect(currentIndex.value).toBeLessThanOrEqual(totalCount.value)
}
}
// Override onExecuted to check invariant after each execution
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
checkInvariant()
}
await simulator.runBatchQueue(
20,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
})
it('should always have repeatUsed <= repeatCount', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 3 })
const checkInvariant = () => {
expect(harness.state.repeatUsed.value).toBeLessThanOrEqual(harness.state.repeatCount.value)
}
// Check after each beforeQueued
for (let i = 0; i < 20; i++) {
harness.widget.beforeQueued()
checkInvariant()
}
})
it('should consume all execution contexts (queue empty after matching executions)', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
await simulator.runBatchQueue(
7,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.executionQueue.length).toBe(0)
})
})
describe('Batch vs Sequential Mode Equivalence', () => {
it('should produce same final state in both modes (basic cycle)', async () => {
// Create two identical harnesses
const batchHarness = createTestHarness({ totalCount: 5 })
const seqHarness = createTestHarness({ totalCount: 5 })
const batchSimulator = new BatchQueueSimulator({ totalCount: 5 })
const seqSimulator = new BatchQueueSimulator({ totalCount: 5 })
// Run same number of executions in different modes
await batchSimulator.runBatchQueue(
7,
{
beforeQueued: () => batchHarness.widget.beforeQueued(),
onExecuted: (output) => batchHarness.node.onExecuted(output)
},
() => batchHarness.getConfig()
)
await seqSimulator.runSequential(
7,
{
beforeQueued: () => seqHarness.widget.beforeQueued(),
onExecuted: (output) => seqHarness.node.onExecuted(output)
},
() => seqHarness.getConfig()
)
// Final state should be identical
expect(batchHarness.state.currentIndex.value).toBe(seqHarness.state.currentIndex.value)
expect(batchHarness.state.repeatUsed.value).toBe(seqHarness.state.repeatUsed.value)
expect(batchHarness.state.displayRepeatUsed.value).toBe(seqHarness.state.displayRepeatUsed.value)
})
it('should produce same final state in both modes (with repeat)', async () => {
const batchHarness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const seqHarness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const batchSimulator = new BatchQueueSimulator({ totalCount: 3 })
const seqSimulator = new BatchQueueSimulator({ totalCount: 3 })
await batchSimulator.runBatchQueue(
10,
{
beforeQueued: () => batchHarness.widget.beforeQueued(),
onExecuted: (output) => batchHarness.node.onExecuted(output)
},
() => batchHarness.getConfig()
)
await seqSimulator.runSequential(
10,
{
beforeQueued: () => seqHarness.widget.beforeQueued(),
onExecuted: (output) => seqHarness.node.onExecuted(output)
},
() => seqHarness.getConfig()
)
expect(batchHarness.state.currentIndex.value).toBe(seqHarness.state.currentIndex.value)
expect(batchHarness.state.repeatUsed.value).toBe(seqHarness.state.repeatUsed.value)
expect(batchHarness.state.displayRepeatUsed.value).toBe(seqHarness.state.displayRepeatUsed.value)
})
})
})

View File

@@ -0,0 +1,75 @@
/**
* Vitest test setup file
* Configures global mocks for ComfyUI modules and browser APIs
*/
import { vi } from 'vitest'
// Mock ComfyUI app module
vi.mock('../../../scripts/app.js', () => ({
app: {
graph: {
_nodes: []
},
registerExtension: vi.fn()
}
}))
// Mock ComfyUI loras_widget module
vi.mock('../loras_widget.js', () => ({
addLoraCard: vi.fn(),
removeLoraCard: vi.fn()
}))
// Mock ComfyUI autocomplete module
vi.mock('../autocomplete.js', () => ({
setupAutocomplete: vi.fn()
}))
// Global fetch mock - exported so tests can access it directly
export const mockFetch = vi.fn()
vi.stubGlobal('fetch', mockFetch)
// Helper to reset fetch mock between tests
export function resetFetchMock() {
mockFetch.mockReset()
// Re-stub global to ensure it's the same mock
vi.stubGlobal('fetch', mockFetch)
}
// Helper to setup fetch mock with default success response
export function setupFetchMock(response: unknown = { success: true, loras: [] }) {
// Ensure we're using the same mock
mockFetch.mockReset()
mockFetch.mockResolvedValue({
ok: true,
json: () => Promise.resolve(response)
})
vi.stubGlobal('fetch', mockFetch)
return mockFetch
}
// Helper to setup fetch mock with error response
export function setupFetchErrorMock(error: string = 'Network error') {
mockFetch.mockReset()
mockFetch.mockRejectedValue(new Error(error))
vi.stubGlobal('fetch', mockFetch)
return mockFetch
}
// Mock btoa for hashing (jsdom should have this, but just in case)
if (typeof global.btoa === 'undefined') {
vi.stubGlobal('btoa', (str: string) => Buffer.from(str).toString('base64'))
}
// Mock console methods to reduce noise in tests
vi.spyOn(console, 'log').mockImplementation(() => {})
vi.spyOn(console, 'error').mockImplementation(() => {})
vi.spyOn(console, 'warn').mockImplementation(() => {})
// Re-enable console for debugging when needed
export function enableConsole() {
vi.spyOn(console, 'log').mockRestore()
vi.spyOn(console, 'error').mockRestore()
vi.spyOn(console, 'warn').mockRestore()
}

View File

@@ -0,0 +1,230 @@
/**
* BatchQueueSimulator - Simulates ComfyUI's two execution modes
*
* ComfyUI has two distinct execution patterns:
* 1. Batch Queue Mode: ALL beforeQueued calls happen BEFORE any onExecuted calls
* 2. Sequential Mode: beforeQueued and onExecuted interleave for each prompt
*
* This simulator helps test how the widget behaves in both modes.
*/
import type { CyclerConfig } from '@/composables/types'
export interface ExecutionHooks {
/** Called when a prompt is queued (before execution) */
beforeQueued: () => void
/** Called when execution completes with output */
onExecuted: (output: unknown) => void
}
export interface SimulatorOptions {
/** Total number of LoRAs in the pool */
totalCount: number
/** Function to generate output for each execution */
generateOutput?: (executionIndex: number, config: CyclerConfig) => unknown
}
/**
* Creates execution output based on the current state
*/
function defaultGenerateOutput(executionIndex: number, config: CyclerConfig) {
// Calculate what the next index would be after this execution
let nextIdx = (config.execution_index ?? config.current_index) + 1
if (nextIdx > config.total_count) {
nextIdx = 1
}
return {
next_index: [nextIdx],
total_count: [config.total_count],
next_lora_name: [`lora${nextIdx}.safetensors`],
next_lora_filename: [`lora${nextIdx}.safetensors`],
current_lora_name: [`lora${config.execution_index ?? config.current_index}.safetensors`],
current_lora_filename: [`lora${config.execution_index ?? config.current_index}.safetensors`]
}
}
export class BatchQueueSimulator {
private executionCount = 0
private options: Required<SimulatorOptions>
constructor(options: SimulatorOptions) {
this.options = {
totalCount: options.totalCount,
generateOutput: options.generateOutput ?? defaultGenerateOutput
}
}
/**
* Reset the simulator state
*/
reset() {
this.executionCount = 0
}
/**
* Simulates Batch Queue Mode execution
*
* In this mode, ComfyUI queues multiple prompts at once:
* - ALL beforeQueued() calls happen first (for all prompts in the batch)
* - THEN all onExecuted() calls happen (as each prompt completes)
*
* This is the mode used when queueing multiple prompts from the UI.
*
* @param count Number of prompts to simulate
* @param hooks The widget's execution hooks
* @param getConfig Function to get current widget config state
*/
async runBatchQueue(
count: number,
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
// Phase 1: All beforeQueued calls (snapshot configs)
const snapshotConfigs: CyclerConfig[] = []
for (let i = 0; i < count; i++) {
hooks.beforeQueued()
// Snapshot the config after beforeQueued updates it
snapshotConfigs.push({ ...getConfig() })
}
// Phase 2: All onExecuted calls (in order)
for (let i = 0; i < count; i++) {
const config = snapshotConfigs[i]
const output = this.options.generateOutput(this.executionCount, config)
hooks.onExecuted(output)
this.executionCount++
}
}
/**
* Simulates Sequential Mode execution
*
* In this mode, execution is one-at-a-time:
* - beforeQueued() is called
* - onExecuted() is called
* - Then the next prompt's beforeQueued() is called
* - And so on...
*
* This is the mode used in API-driven execution or single prompt queuing.
*
* @param count Number of prompts to simulate
* @param hooks The widget's execution hooks
* @param getConfig Function to get current widget config state
*/
async runSequential(
count: number,
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
for (let i = 0; i < count; i++) {
// Queue the prompt
hooks.beforeQueued()
const config = { ...getConfig() }
// Execute it immediately
const output = this.options.generateOutput(this.executionCount, config)
hooks.onExecuted(output)
this.executionCount++
}
}
/**
* Simulates a single execution (queue + execute)
*/
async runSingle(
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
return this.runSequential(1, hooks, getConfig)
}
/**
* Simulates interrupted execution (some beforeQueued calls without matching onExecuted)
*
* This can happen if the user cancels execution mid-batch.
*
* @param queuedCount Number of prompts queued (beforeQueued called)
* @param executedCount Number of prompts that actually executed
* @param hooks The widget's execution hooks
* @param getConfig Function to get current widget config state
*/
async runInterrupted(
queuedCount: number,
executedCount: number,
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
if (executedCount > queuedCount) {
throw new Error('executedCount cannot be greater than queuedCount')
}
// Phase 1: All beforeQueued calls
const snapshotConfigs: CyclerConfig[] = []
for (let i = 0; i < queuedCount; i++) {
hooks.beforeQueued()
snapshotConfigs.push({ ...getConfig() })
}
// Phase 2: Only some onExecuted calls
for (let i = 0; i < executedCount; i++) {
const config = snapshotConfigs[i]
const output = this.options.generateOutput(this.executionCount, config)
hooks.onExecuted(output)
this.executionCount++
}
}
}
/**
* Helper to create execution hooks from a widget-like object
*/
export function createHooksFromWidget(widget: {
beforeQueued?: () => void
}, node: {
onExecuted?: (output: unknown) => void
}): ExecutionHooks {
return {
beforeQueued: () => widget.beforeQueued?.(),
onExecuted: (output) => node.onExecuted?.(output)
}
}
/**
* Tracks index history during simulation for assertions
*/
export class IndexTracker {
public indexHistory: number[] = []
public repeatHistory: number[] = []
public pauseHistory: boolean[] = []
reset() {
this.indexHistory = []
this.repeatHistory = []
this.pauseHistory = []
}
record(config: CyclerConfig) {
this.indexHistory.push(config.current_index)
this.repeatHistory.push(config.repeat_used)
this.pauseHistory.push(config.is_paused)
}
/**
* Get the sequence of indices that were actually used for execution
*/
getExecutionIndices(): number[] {
return this.indexHistory
}
/**
* Verify that indices cycle correctly through totalCount
*/
verifyCyclePattern(expectedPattern: number[]): boolean {
if (this.indexHistory.length !== expectedPattern.length) {
return false
}
return this.indexHistory.every((idx, i) => idx === expectedPattern[i])
}
}

View File

@@ -19,6 +19,6 @@
"@/*": ["./src/*"]
}
},
"include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue"],
"include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue", "tests/**/*.ts"],
"references": [{ "path": "./tsconfig.node.json" }]
}

View File

@@ -22,6 +22,7 @@ export default defineConfig({
rollupOptions: {
external: [
'../../../scripts/app.js',
'../../../scripts/api.js',
'../loras_widget.js',
'../autocomplete.js'
],

View File

@@ -0,0 +1,25 @@
import { defineConfig } from 'vitest/config'
import vue from '@vitejs/plugin-vue'
import { resolve } from 'path'
export default defineConfig({
plugins: [vue()],
resolve: {
alias: {
'@': resolve(__dirname, './src')
}
},
test: {
globals: true,
environment: 'jsdom',
setupFiles: ['./tests/setup.ts'],
include: ['tests/**/*.test.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'html', 'json'],
reportsDirectory: './coverage',
include: ['src/**/*.ts', 'src/**/*.vue'],
exclude: ['src/main.ts', 'src/vite-env.d.ts']
}
}
})

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long