feat(testing): enhance test configuration and add Vue component tests

- Update package.json test script to run both JS and Vue tests
- Simplify LoraCyclerLM output by removing redundant lora name fallback
- Extend Vitest config to include TypeScript test files
- Add Vue testing dependencies and setup for component testing
- Implement comprehensive test suite for BatchQueueSimulator component
- Add test setup file with global mocks for ComfyUI modules
This commit is contained in:
Will Miao
2026-02-01 00:59:50 +08:00
parent ffcfe5ea3e
commit e17d6c8ebf
20 changed files with 4931 additions and 159 deletions

View File

@@ -0,0 +1,634 @@
/**
* Unit tests for useLoraCyclerState composable
*
* Tests pure state transitions and index calculations in isolation.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { useLoraCyclerState } from '@/composables/useLoraCyclerState'
import {
createMockWidget,
createMockCyclerConfig,
createMockPoolConfig
} from '../fixtures/mockConfigs'
import { setupFetchMock, resetFetchMock } from '../setup'
describe('useLoraCyclerState', () => {
beforeEach(() => {
resetFetchMock()
})
describe('Initial State', () => {
it('should initialize with default values', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
expect(state.currentIndex.value).toBe(1)
expect(state.totalCount.value).toBe(0)
expect(state.poolConfigHash.value).toBe('')
expect(state.modelStrength.value).toBe(1.0)
expect(state.clipStrength.value).toBe(1.0)
expect(state.useCustomClipRange.value).toBe(false)
expect(state.sortBy.value).toBe('filename')
expect(state.executionIndex.value).toBeNull()
expect(state.nextIndex.value).toBeNull()
expect(state.repeatCount.value).toBe(1)
expect(state.repeatUsed.value).toBe(0)
expect(state.displayRepeatUsed.value).toBe(0)
expect(state.isPaused.value).toBe(false)
})
})
describe('restoreFromConfig', () => {
it('should restore state from config object', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config = createMockCyclerConfig({
current_index: 3,
total_count: 10,
model_strength: 0.8,
clip_strength: 0.6,
use_same_clip_strength: false,
repeat_count: 2,
repeat_used: 1,
is_paused: true
})
state.restoreFromConfig(config)
expect(state.currentIndex.value).toBe(3)
expect(state.totalCount.value).toBe(10)
expect(state.modelStrength.value).toBe(0.8)
expect(state.clipStrength.value).toBe(0.6)
expect(state.useCustomClipRange.value).toBe(true) // inverted from use_same_clip_strength
expect(state.repeatCount.value).toBe(2)
expect(state.repeatUsed.value).toBe(1)
expect(state.isPaused.value).toBe(true)
})
it('should handle missing optional fields with defaults', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Minimal config
state.restoreFromConfig({
current_index: 5,
total_count: 10,
pool_config_hash: '',
model_strength: 1.0,
clip_strength: 1.0,
use_same_clip_strength: true,
sort_by: 'filename',
current_lora_name: '',
current_lora_filename: '',
repeat_count: 1,
repeat_used: 0,
is_paused: false
})
expect(state.currentIndex.value).toBe(5)
expect(state.repeatCount.value).toBe(1)
expect(state.isPaused.value).toBe(false)
})
it('should not restore execution_index and next_index (transient values)', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set execution indices
state.executionIndex.value = 2
state.nextIndex.value = 3
// Restore from config (these fields in config should be ignored)
state.restoreFromConfig(createMockCyclerConfig({
execution_index: 5,
next_index: 6
}))
// Execution indices should remain unchanged
expect(state.executionIndex.value).toBe(2)
expect(state.nextIndex.value).toBe(3)
})
})
describe('buildConfig', () => {
it('should build config object from current state', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 3
state.totalCount.value = 10
state.modelStrength.value = 0.8
state.repeatCount.value = 2
state.repeatUsed.value = 1
state.isPaused.value = true
const config = state.buildConfig()
expect(config.current_index).toBe(3)
expect(config.total_count).toBe(10)
expect(config.model_strength).toBe(0.8)
expect(config.repeat_count).toBe(2)
expect(config.repeat_used).toBe(1)
expect(config.is_paused).toBe(true)
})
})
describe('setIndex', () => {
it('should set index within valid range', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 10
state.setIndex(5)
expect(state.currentIndex.value).toBe(5)
state.setIndex(1)
expect(state.currentIndex.value).toBe(1)
state.setIndex(10)
expect(state.currentIndex.value).toBe(10)
})
it('should not set index outside valid range', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 10
state.currentIndex.value = 5
state.setIndex(0)
expect(state.currentIndex.value).toBe(5) // unchanged
state.setIndex(11)
expect(state.currentIndex.value).toBe(5) // unchanged
state.setIndex(-1)
expect(state.currentIndex.value).toBe(5) // unchanged
})
})
describe('resetIndex', () => {
it('should reset index to 1 and clear repeatUsed and displayRepeatUsed', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 5
state.repeatUsed.value = 2
state.displayRepeatUsed.value = 2
state.isPaused.value = true
state.resetIndex()
expect(state.currentIndex.value).toBe(1)
expect(state.repeatUsed.value).toBe(0)
expect(state.displayRepeatUsed.value).toBe(0)
expect(state.isPaused.value).toBe(true) // isPaused should NOT be reset
})
})
describe('togglePause', () => {
it('should toggle pause state', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
expect(state.isPaused.value).toBe(false)
state.togglePause()
expect(state.isPaused.value).toBe(true)
state.togglePause()
expect(state.isPaused.value).toBe(false)
})
})
describe('generateNextIndex', () => {
it('should shift indices correctly', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 1
state.nextIndex.value = 2
// First call: executionIndex becomes 2 (previous nextIndex), nextIndex becomes 3
state.generateNextIndex()
expect(state.executionIndex.value).toBe(2)
expect(state.nextIndex.value).toBe(3)
// Second call: executionIndex becomes 3, nextIndex becomes 4
state.generateNextIndex()
expect(state.executionIndex.value).toBe(3)
expect(state.nextIndex.value).toBe(4)
})
it('should wrap index from totalCount to 1', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.nextIndex.value = 5 // At the last index
state.generateNextIndex()
expect(state.executionIndex.value).toBe(5)
expect(state.nextIndex.value).toBe(1) // Wrapped to 1
})
it('should use currentIndex when nextIndex is null', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 3
state.nextIndex.value = null
state.generateNextIndex()
// executionIndex becomes previous nextIndex (null)
expect(state.executionIndex.value).toBeNull()
// nextIndex is calculated from currentIndex (3) -> 4
expect(state.nextIndex.value).toBe(4)
})
})
describe('initializeNextIndex', () => {
it('should initialize nextIndex to currentIndex + 1 when null', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 1
state.nextIndex.value = null
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(2)
})
it('should wrap nextIndex when currentIndex is at totalCount', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 5
state.nextIndex.value = null
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(1) // Wrapped
})
it('should not change nextIndex if already set', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 5
state.currentIndex.value = 1
state.nextIndex.value = 4
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(4) // Unchanged
})
})
describe('Index Wrapping Edge Cases', () => {
it('should handle single item pool', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 1
state.currentIndex.value = 1
state.nextIndex.value = null
state.initializeNextIndex()
expect(state.nextIndex.value).toBe(1) // Wraps back to 1
})
it('should handle zero total count gracefully', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.totalCount.value = 0
state.currentIndex.value = 1
state.nextIndex.value = null
state.initializeNextIndex()
// Should still calculate, even if totalCount is 0
expect(state.nextIndex.value).toBe(2) // No wrapping since totalCount <= 0
})
})
describe('hashPoolConfig', () => {
it('should generate consistent hash for same config', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config1 = createMockPoolConfig()
const config2 = createMockPoolConfig()
const hash1 = state.hashPoolConfig(config1)
const hash2 = state.hashPoolConfig(config2)
expect(hash1).toBe(hash2)
})
it('should generate different hash for different configs', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config1 = createMockPoolConfig({
filters: {
baseModels: ['SD 1.5'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
})
const config2 = createMockPoolConfig({
filters: {
baseModels: ['SDXL'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
})
const hash1 = state.hashPoolConfig(config1)
const hash2 = state.hashPoolConfig(config2)
expect(hash1).not.toBe(hash2)
})
it('should return empty string for null config', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
expect(state.hashPoolConfig(null)).toBe('')
})
it('should return empty string for config without filters', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const config = { version: 1, preview: { matchCount: 0, lastUpdated: 0 } } as any
expect(state.hashPoolConfig(config)).toBe('')
})
})
describe('Clip Strength Synchronization', () => {
it('should sync clipStrength with modelStrength when useCustomClipRange is false', async () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = false
state.modelStrength.value = 0.5
// Wait for Vue reactivity
await vi.waitFor(() => {
expect(state.clipStrength.value).toBe(0.5)
})
})
it('should not sync clipStrength when useCustomClipRange is true', async () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = true
state.clipStrength.value = 0.7
state.modelStrength.value = 0.5
// clipStrength should remain unchanged
await vi.waitFor(() => {
expect(state.clipStrength.value).toBe(0.7)
})
})
})
describe('Widget Value Synchronization', () => {
it('should update widget.value when state changes', async () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 3
state.repeatCount.value = 2
// Wait for Vue reactivity
await vi.waitFor(() => {
expect(widget.value?.current_index).toBe(3)
expect(widget.value?.repeat_count).toBe(2)
})
})
})
describe('Repeat Logic State', () => {
it('should track repeatUsed correctly', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.repeatCount.value = 3
expect(state.repeatUsed.value).toBe(0)
state.repeatUsed.value = 1
expect(state.repeatUsed.value).toBe(1)
state.repeatUsed.value = 3
expect(state.repeatUsed.value).toBe(3)
})
})
describe('fetchCyclerList', () => {
it('should call API and return lora list', async () => {
const mockLoras = [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' }
]
setupFetchMock({ success: true, loras: mockLoras })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const result = await state.fetchCyclerList(null)
expect(result).toEqual(mockLoras)
expect(state.isLoading.value).toBe(false)
})
it('should include pool config filters in request', async () => {
const mockFetch = setupFetchMock({ success: true, loras: [] })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const poolConfig = createMockPoolConfig()
await state.fetchCyclerList(poolConfig)
expect(mockFetch).toHaveBeenCalledWith(
'/api/lm/loras/cycler-list',
expect.objectContaining({
method: 'POST',
body: expect.stringContaining('pool_config')
})
)
})
it('should set isLoading during fetch', async () => {
let resolvePromise: (value: unknown) => void
const pendingPromise = new Promise(resolve => {
resolvePromise = resolve
})
// Use mockFetch from setup instead of overriding global
const { mockFetch } = await import('../setup')
mockFetch.mockReset()
mockFetch.mockReturnValue(pendingPromise)
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
const fetchPromise = state.fetchCyclerList(null)
expect(state.isLoading.value).toBe(true)
// Resolve the fetch
resolvePromise!({
ok: true,
json: () => Promise.resolve({ success: true, loras: [] })
})
await fetchPromise
expect(state.isLoading.value).toBe(false)
})
})
describe('refreshList', () => {
it('should update totalCount from API response', async () => {
const mockLoras = [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' },
{ file_name: 'lora3.safetensors', model_name: 'LoRA 3' }
]
// Reset and setup fresh mock
resetFetchMock()
setupFetchMock({ success: true, loras: mockLoras })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
await state.refreshList(null)
expect(state.totalCount.value).toBe(3)
})
it('should reset index to 1 when pool config hash changes', async () => {
resetFetchMock()
setupFetchMock({ success: true, loras: [{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' }] })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set initial state
state.currentIndex.value = 5
state.poolConfigHash.value = 'old-hash'
// Refresh with new config (different hash)
const newConfig = createMockPoolConfig({
filters: {
baseModels: ['SDXL'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
})
await state.refreshList(newConfig)
expect(state.currentIndex.value).toBe(1)
})
it('should clamp index when totalCount decreases', async () => {
// Setup mock first, then create state
resetFetchMock()
setupFetchMock({
success: true,
loras: [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' },
{ file_name: 'lora3.safetensors', model_name: 'LoRA 3' }
]
})
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set initial state with high index
state.currentIndex.value = 10
state.totalCount.value = 10
await state.refreshList(null)
expect(state.totalCount.value).toBe(3)
expect(state.currentIndex.value).toBe(3) // Clamped to max
})
it('should update currentLoraName and currentLoraFilename', async () => {
resetFetchMock()
setupFetchMock({
success: true,
loras: [
{ file_name: 'lora1.safetensors', model_name: 'LoRA 1' },
{ file_name: 'lora2.safetensors', model_name: 'LoRA 2' }
]
})
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
// Set totalCount first so setIndex works, then set index
state.totalCount.value = 2
state.currentIndex.value = 2
await state.refreshList(null)
expect(state.currentLoraFilename.value).toBe('lora2.safetensors')
})
it('should handle empty list gracefully', async () => {
resetFetchMock()
setupFetchMock({ success: true, loras: [] })
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.currentIndex.value = 5
state.totalCount.value = 5
await state.refreshList(null)
expect(state.totalCount.value).toBe(0)
// When totalCount is 0, Math.max(1, 0) = 1, but if currentIndex > totalCount it gets clamped to max(1, totalCount)
// Looking at the actual code: Math.max(1, totalCount) where totalCount=0 gives 1
expect(state.currentIndex.value).toBe(1)
expect(state.currentLoraName.value).toBe('')
expect(state.currentLoraFilename.value).toBe('')
})
})
describe('isClipStrengthDisabled computed', () => {
it('should return true when useCustomClipRange is false', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = false
expect(state.isClipStrengthDisabled.value).toBe(true)
})
it('should return false when useCustomClipRange is true', () => {
const widget = createMockWidget()
const state = useLoraCyclerState(widget)
state.useCustomClipRange.value = true
expect(state.isClipStrengthDisabled.value).toBe(false)
})
})
})

View File

@@ -0,0 +1,175 @@
/**
* Test fixtures for LoRA Cycler testing
*/
import type { CyclerConfig, LoraPoolConfig } from '@/composables/types'
import type { CyclerLoraItem } from '@/composables/useLoraCyclerState'
/**
* Creates a default CyclerConfig for testing
*/
export function createMockCyclerConfig(overrides: Partial<CyclerConfig> = {}): CyclerConfig {
return {
current_index: 1,
total_count: 5,
pool_config_hash: '',
model_strength: 1.0,
clip_strength: 1.0,
use_same_clip_strength: true,
sort_by: 'filename',
current_lora_name: 'lora1.safetensors',
current_lora_filename: 'lora1.safetensors',
execution_index: null,
next_index: null,
repeat_count: 1,
repeat_used: 0,
is_paused: false,
...overrides
}
}
/**
* Creates a mock LoraPoolConfig for testing
*/
export function createMockPoolConfig(overrides: Partial<LoraPoolConfig> = {}): LoraPoolConfig {
return {
version: 1,
filters: {
baseModels: ['SD 1.5'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: {
noCreditRequired: false,
allowSelling: false
}
},
preview: { matchCount: 10, lastUpdated: Date.now() },
...overrides
}
}
/**
* Creates a list of mock LoRA items for testing
*/
export function createMockLoraList(count: number = 5): CyclerLoraItem[] {
return Array.from({ length: count }, (_, i) => ({
file_name: `lora${i + 1}.safetensors`,
model_name: `LoRA Model ${i + 1}`
}))
}
/**
* Creates a mock widget object for testing useLoraCyclerState
*/
export function createMockWidget(initialValue?: CyclerConfig) {
return {
value: initialValue,
callback: undefined as ((v: CyclerConfig) => void) | undefined
}
}
/**
* Creates a mock node object for testing component integration
*/
export function createMockNode(options: {
id?: number
poolConfig?: LoraPoolConfig | null
} = {}) {
const { id = 1, poolConfig = null } = options
return {
id,
inputs: [],
widgets: [],
graph: null,
getPoolConfig: () => poolConfig,
onExecuted: undefined as ((output: unknown) => void) | undefined
}
}
/**
* Creates mock execution output from the backend
*/
export function createMockExecutionOutput(options: {
nextIndex?: number
totalCount?: number
nextLoraName?: string
nextLoraFilename?: string
currentLoraName?: string
currentLoraFilename?: string
} = {}) {
const {
nextIndex = 2,
totalCount = 5,
nextLoraName = 'lora2.safetensors',
nextLoraFilename = 'lora2.safetensors',
currentLoraName = 'lora1.safetensors',
currentLoraFilename = 'lora1.safetensors'
} = options
return {
next_index: [nextIndex],
total_count: [totalCount],
next_lora_name: [nextLoraName],
next_lora_filename: [nextLoraFilename],
current_lora_name: [currentLoraName],
current_lora_filename: [currentLoraFilename]
}
}
/**
* Sample LoRA lists for specific test scenarios
*/
export const SAMPLE_LORA_LISTS = {
// 3 LoRAs for simple cycling tests
small: createMockLoraList(3),
// 5 LoRAs for standard tests
medium: createMockLoraList(5),
// 10 LoRAs for larger tests
large: createMockLoraList(10),
// Empty list for edge case testing
empty: [] as CyclerLoraItem[],
// Single LoRA for edge case testing
single: createMockLoraList(1)
}
/**
* Sample pool configs for testing
*/
export const SAMPLE_POOL_CONFIGS = {
// Default SD 1.5 filter
sd15: createMockPoolConfig({
filters: {
baseModels: ['SD 1.5'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
}),
// SDXL filter
sdxl: createMockPoolConfig({
filters: {
baseModels: ['SDXL'],
tags: { include: [], exclude: [] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
}),
// Filter with tags
withTags: createMockPoolConfig({
filters: {
baseModels: ['SD 1.5'],
tags: { include: ['anime', 'style'], exclude: ['realistic'] },
folders: { include: [], exclude: [] },
license: { noCreditRequired: false, allowSelling: false }
}
}),
// Empty/null config
empty: null as LoraPoolConfig | null
}

View File

@@ -0,0 +1,910 @@
/**
* Integration tests for batch queue execution scenarios
*
* These tests simulate ComfyUI's execution modes to verify correct LoRA cycling behavior.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { useLoraCyclerState } from '@/composables/useLoraCyclerState'
import type { CyclerConfig } from '@/composables/types'
import {
createMockWidget,
createMockCyclerConfig,
createMockLoraList,
createMockPoolConfig
} from '../fixtures/mockConfigs'
import { setupFetchMock, resetFetchMock } from '../setup'
import { BatchQueueSimulator, IndexTracker } from '../utils/BatchQueueSimulator'
/**
* Creates a test harness that mimics the LoraCyclerWidget's behavior
*/
function createTestHarness(options: {
totalCount?: number
initialIndex?: number
repeatCount?: number
isPaused?: boolean
} = {}) {
const {
totalCount = 5,
initialIndex = 1,
repeatCount = 1,
isPaused = false
} = options
const widget = createMockWidget() as any
const state = useLoraCyclerState(widget)
// Initialize state
state.totalCount.value = totalCount
state.currentIndex.value = initialIndex
state.repeatCount.value = repeatCount
state.isPaused.value = isPaused
// Track if first execution
const HAS_EXECUTED = Symbol('HAS_EXECUTED')
widget[HAS_EXECUTED] = false
// Execution queue for batch synchronization
interface ExecutionContext {
isPaused: boolean
repeatUsed: number
repeatCount: number
shouldAdvanceDisplay: boolean
displayRepeatUsed: number // Value to show in UI after completion
}
const executionQueue: ExecutionContext[] = []
// beforeQueued hook (mirrors LoraCyclerWidget.vue logic)
widget.beforeQueued = () => {
if (state.isPaused.value) {
executionQueue.push({
isPaused: true,
repeatUsed: state.repeatUsed.value,
repeatCount: state.repeatCount.value,
shouldAdvanceDisplay: false,
displayRepeatUsed: state.displayRepeatUsed.value // Keep current display value when paused
})
// CRITICAL: Clear execution_index when paused to force backend to use current_index
const pausedConfig = state.buildConfig()
pausedConfig.execution_index = null
widget.value = pausedConfig
return
}
if (widget[HAS_EXECUTED]) {
if (state.repeatUsed.value < state.repeatCount.value) {
state.repeatUsed.value++
} else {
state.repeatUsed.value = 1
state.generateNextIndex()
}
} else {
state.repeatUsed.value = 1
state.initializeNextIndex()
widget[HAS_EXECUTED] = true
}
const shouldAdvanceDisplay = state.repeatUsed.value >= state.repeatCount.value
// Calculate the display value to show after this execution completes
// When advancing to a new LoRA: reset to 0 (fresh start for new LoRA)
// When repeating same LoRA: show current repeat step
const displayRepeatUsed = shouldAdvanceDisplay ? 0 : state.repeatUsed.value
executionQueue.push({
isPaused: false,
repeatUsed: state.repeatUsed.value,
repeatCount: state.repeatCount.value,
shouldAdvanceDisplay,
displayRepeatUsed
})
widget.value = state.buildConfig()
}
// Mock node with onExecuted
const node = {
id: 1,
onExecuted: (output: any) => {
const context = executionQueue.shift()
const shouldAdvanceDisplay = context
? context.shouldAdvanceDisplay
: (!state.isPaused.value && state.repeatUsed.value >= state.repeatCount.value)
// Update displayRepeatUsed (deferred like index updates)
if (context) {
state.displayRepeatUsed.value = context.displayRepeatUsed
}
if (shouldAdvanceDisplay && output?.next_index !== undefined) {
const val = Array.isArray(output.next_index) ? output.next_index[0] : output.next_index
state.currentIndex.value = val
}
if (output?.total_count !== undefined) {
const val = Array.isArray(output.total_count) ? output.total_count[0] : output.total_count
state.totalCount.value = val
}
if (shouldAdvanceDisplay) {
if (output?.next_lora_name !== undefined) {
const val = Array.isArray(output.next_lora_name) ? output.next_lora_name[0] : output.next_lora_name
state.currentLoraName.value = val
}
if (output?.next_lora_filename !== undefined) {
const val = Array.isArray(output.next_lora_filename) ? output.next_lora_filename[0] : output.next_lora_filename
state.currentLoraFilename.value = val
}
}
}
}
// Reset execution state (mimics manual index change)
const resetExecutionState = () => {
widget[HAS_EXECUTED] = false
state.executionIndex.value = null
state.nextIndex.value = null
executionQueue.length = 0
}
return {
widget,
state,
node,
executionQueue,
resetExecutionState,
getConfig: () => state.buildConfig(),
HAS_EXECUTED
}
}
describe('Batch Queue Integration Tests', () => {
beforeEach(() => {
resetFetchMock()
})
describe('Basic Cycling', () => {
it('should cycle through N LoRAs in batch of N (batch queue mode)', async () => {
const harness = createTestHarness({ totalCount: 3 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Simulate batch queue of 3 prompts
await simulator.runBatchQueue(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// After cycling through all 3, currentIndex should wrap back to 1
// First execution: index 1, next becomes 2
// Second execution: index 2, next becomes 3
// Third execution: index 3, next becomes 1
expect(harness.state.currentIndex.value).toBe(1)
})
it('should cycle through N LoRAs in batch of N (sequential mode)', async () => {
const harness = createTestHarness({ totalCount: 3 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Simulate sequential execution of 3 prompts
await simulator.runSequential(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Same result as batch mode
expect(harness.state.currentIndex.value).toBe(1)
})
it('should handle partial cycle (batch of 2 in pool of 5)', async () => {
const harness = createTestHarness({ totalCount: 5, initialIndex: 1 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// After 2 executions starting from 1: 1 -> 2 -> 3
expect(harness.state.currentIndex.value).toBe(3)
})
})
describe('Repeat Functionality', () => {
it('should repeat each LoRA repeatCount times', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// With repeatCount=2, need 6 executions to cycle through 3 LoRAs
await simulator.runBatchQueue(
6,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Should have cycled back to beginning
expect(harness.state.currentIndex.value).toBe(1)
})
it('should track repeatUsed correctly during batch', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 3 })
// First beforeQueued: repeatUsed = 1
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(1)
// Second beforeQueued: repeatUsed = 2
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(2)
// Third beforeQueued: repeatUsed = 3 (will advance on next)
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(3)
// Fourth beforeQueued: repeatUsed resets to 1, index advances
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(1)
expect(harness.state.nextIndex.value).toBe(3) // Advanced from 2 to 3
})
it('should not advance display until repeat cycle completes', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// First execution: repeatUsed=1 < repeatCount=2, shouldAdvanceDisplay=false
// Second execution: repeatUsed=2 >= repeatCount=2, shouldAdvanceDisplay=true
const indexHistory: number[] = []
// Override onExecuted to track index changes
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
indexHistory.push(harness.state.currentIndex.value)
}
await simulator.runBatchQueue(
4,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Index should only change on 2nd and 4th execution
// Starting at 1: stay 1, advance to 2, stay 2, advance to 3
expect(indexHistory).toEqual([1, 2, 2, 3])
})
it('should defer displayRepeatUsed updates until workflow completion', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 3 })
// Initial state
expect(harness.state.displayRepeatUsed.value).toBe(0)
// Queue 3 executions in batch mode (all beforeQueued before any onExecuted)
harness.widget.beforeQueued() // repeatUsed = 1
harness.widget.beforeQueued() // repeatUsed = 2
harness.widget.beforeQueued() // repeatUsed = 3
// displayRepeatUsed should NOT have changed yet (still 0)
// because no onExecuted has been called
expect(harness.state.displayRepeatUsed.value).toBe(0)
// Now simulate workflow completions
harness.node.onExecuted({ next_index: 1 })
expect(harness.state.displayRepeatUsed.value).toBe(1)
harness.node.onExecuted({ next_index: 1 })
expect(harness.state.displayRepeatUsed.value).toBe(2)
harness.node.onExecuted({ next_index: 2 })
// After completing repeat cycle, displayRepeatUsed resets to 0
expect(harness.state.displayRepeatUsed.value).toBe(0)
})
it('should reset displayRepeatUsed to 0 when advancing to new LoRA', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
const displayHistory: number[] = []
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
displayHistory.push(harness.state.displayRepeatUsed.value)
}
// Run 4 executions: 2 repeats of LoRA 1, 2 repeats of LoRA 2
await simulator.runBatchQueue(
4,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// displayRepeatUsed should show:
// 1st exec: 1 (first repeat of LoRA 1)
// 2nd exec: 0 (complete, reset for next LoRA)
// 3rd exec: 1 (first repeat of LoRA 2)
// 4th exec: 0 (complete, reset for next LoRA)
expect(displayHistory).toEqual([1, 0, 1, 0])
})
it('should show current repeat step when not advancing', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 4 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
const displayHistory: number[] = []
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
displayHistory.push(harness.state.displayRepeatUsed.value)
}
// Run 4 executions: all 4 repeats of the same LoRA
await simulator.runBatchQueue(
4,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// displayRepeatUsed should show:
// 1st exec: 1 (repeat 1/4, not advancing)
// 2nd exec: 2 (repeat 2/4, not advancing)
// 3rd exec: 3 (repeat 3/4, not advancing)
// 4th exec: 0 (repeat 4/4, complete, reset for next LoRA)
expect(displayHistory).toEqual([1, 2, 3, 0])
})
})
describe('Pause Functionality', () => {
it('should maintain index when paused', async () => {
const harness = createTestHarness({ totalCount: 5, isPaused: true })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
await simulator.runBatchQueue(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Index should not advance when paused
expect(harness.state.currentIndex.value).toBe(1)
})
it('should not count paused executions toward repeat limit', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 2 })
// Run 2 executions while paused
harness.state.isPaused.value = true
harness.widget.beforeQueued()
harness.widget.beforeQueued()
// repeatUsed should still be 0 (paused executions don't count)
expect(harness.state.repeatUsed.value).toBe(0)
// Unpause and run
harness.state.isPaused.value = false
harness.widget.beforeQueued()
expect(harness.state.repeatUsed.value).toBe(1)
})
it('should preserve displayRepeatUsed when paused', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 3 })
// Run one execution to set displayRepeatUsed
harness.widget.beforeQueued()
harness.node.onExecuted({ next_index: 1 })
expect(harness.state.displayRepeatUsed.value).toBe(1)
// Pause
harness.state.isPaused.value = true
// Queue and execute while paused
harness.widget.beforeQueued()
harness.node.onExecuted({ next_index: 1 })
// displayRepeatUsed should remain at 1 (paused executions don't change it)
expect(harness.state.displayRepeatUsed.value).toBe(1)
// Queue another paused execution
harness.widget.beforeQueued()
harness.node.onExecuted({ next_index: 1 })
// Still should be 1
expect(harness.state.displayRepeatUsed.value).toBe(1)
})
it('should use same LoRA when pause is toggled mid-batch', async () => {
// This tests the critical bug scenario:
// 1. User queues multiple prompts (not paused)
// 2. All beforeQueued calls complete, each advancing execution_index
// 3. User clicks pause
// 4. onExecuted starts firing - paused executions should use current_index, not execution_index
const harness = createTestHarness({ totalCount: 5 })
// Queue first prompt (not paused) - this sets up execution_index
harness.widget.beforeQueued()
const config1 = harness.getConfig()
expect(config1.execution_index).toBeNull() // First execution uses current_index
// User clicks pause mid-batch
harness.state.isPaused.value = true
// Queue subsequent prompts while paused
harness.widget.beforeQueued()
const config2 = harness.getConfig()
// CRITICAL: execution_index should be null when paused to force backend to use current_index
expect(config2.execution_index).toBeNull()
harness.widget.beforeQueued()
const config3 = harness.getConfig()
expect(config3.execution_index).toBeNull()
// Verify execution queue has correct context
expect(harness.executionQueue.length).toBe(3)
expect(harness.executionQueue[0].isPaused).toBe(false)
expect(harness.executionQueue[1].isPaused).toBe(true)
expect(harness.executionQueue[2].isPaused).toBe(true)
})
it('should have null execution_index in widget.value when paused even after non-paused queues', async () => {
// More detailed test for the execution_index clearing behavior
// This tests that widget.value (what backend receives) has null execution_index
const harness = createTestHarness({ totalCount: 5 })
// Queue 3 prompts while not paused
harness.widget.beforeQueued()
harness.widget.beforeQueued()
harness.widget.beforeQueued()
// Verify execution_index was set by non-paused queues in widget.value
expect(harness.widget.value.execution_index).not.toBeNull()
// User pauses
harness.state.isPaused.value = true
// Queue while paused - should clear execution_index in widget.value
// This is the value that gets sent to the backend
harness.widget.beforeQueued()
expect(harness.widget.value.execution_index).toBeNull()
// State's executionIndex may still have the old value (that's fine)
// What matters is widget.value which is what the backend uses
})
it('should clear server queue when pausing mid-batch', async () => {
// This tests the fix for the batch queue pause bug:
// When user presses pause during batch execution, pending queue items should be cleared
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Mock fetch to track calls to /queue
const fetchCalls: { url: string; body: any }[] = []
const originalFetch = global.fetch
global.fetch = vi.fn().mockImplementation((url: string, options?: RequestInit) => {
if (url === '/queue') {
fetchCalls.push({ url, body: options?.body ? JSON.parse(options.body as string) : null })
return Promise.resolve({ ok: true, json: () => Promise.resolve({}) })
}
// Call through for other URLs (like cycler-list API)
return originalFetch(url, options)
}) as any
try {
// Queue 4 prompts while not paused
harness.widget.beforeQueued()
harness.widget.beforeQueued()
harness.widget.beforeQueued()
harness.widget.beforeQueued()
// Verify 4 contexts were queued
expect(harness.executionQueue.length).toBe(4)
// Simulate pressing pause (this is what handleTogglePause does in the component)
const wasPaused = harness.state.isPaused.value
harness.state.togglePause()
// When transitioning to paused, the component should:
// 1. Reset execution state
// 2. Clear execution queue
// 3. Call fetch('/queue', { clear: true })
if (!wasPaused && harness.state.isPaused.value) {
// Reset execution state (mimics component behavior)
harness.resetExecutionState()
// Clear server queue (mimics component behavior)
await fetch('/queue', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ clear: true })
})
}
// Verify execution queue was cleared
expect(harness.executionQueue.length).toBe(0)
// Verify fetch was called with correct parameters
expect(fetchCalls.length).toBe(1)
expect(fetchCalls[0].url).toBe('/queue')
expect(fetchCalls[0].body).toEqual({ clear: true })
} finally {
global.fetch = originalFetch
}
})
it('should resume cycling after unpause', async () => {
const harness = createTestHarness({ totalCount: 3, initialIndex: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Execute once while not paused
await simulator.runSingle(
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Pause
harness.state.isPaused.value = true
// Execute twice while paused
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Unpause and execute
harness.state.isPaused.value = false
await simulator.runSingle(
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Should continue from where it left off (index 3 -> 1)
expect(harness.state.currentIndex.value).toBe(1)
})
})
describe('Manual Index Change', () => {
it('should reset execution state on manual index change', async () => {
const harness = createTestHarness({ totalCount: 5 })
// Execute a few times
harness.widget.beforeQueued()
harness.widget.beforeQueued()
expect(harness.widget[harness.HAS_EXECUTED]).toBe(true)
expect(harness.executionQueue.length).toBe(2)
// User manually changes index (mimics handleIndexUpdate)
harness.resetExecutionState()
harness.state.setIndex(4)
expect(harness.widget[harness.HAS_EXECUTED]).toBe(false)
expect(harness.state.executionIndex.value).toBeNull()
expect(harness.state.nextIndex.value).toBeNull()
expect(harness.executionQueue.length).toBe(0)
})
it('should start fresh cycle from manual index', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Execute twice starting from 1
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.state.currentIndex.value).toBe(3)
// User manually sets index to 1
harness.resetExecutionState()
harness.state.setIndex(1)
// Execute again - should start fresh from 1
await simulator.runBatchQueue(
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.state.currentIndex.value).toBe(3)
})
})
describe('Execution Queue Mismatch', () => {
it('should handle interrupted execution (queue > executed)', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Queue 5 but only execute 2 (simulates cancel)
await simulator.runInterrupted(
5, // queued
2, // executed
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// 3 contexts remain in queue
expect(harness.executionQueue.length).toBe(3)
// Index should reflect only the 2 executions that completed
expect(harness.state.currentIndex.value).toBe(3)
})
it('should recover from mismatch on next manual index change', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Create mismatch
await simulator.runInterrupted(
5,
2,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.executionQueue.length).toBe(3)
// Manual index change clears queue
harness.resetExecutionState()
harness.state.setIndex(1)
expect(harness.executionQueue.length).toBe(0)
// Can execute normally again
await simulator.runSingle(
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.state.currentIndex.value).toBe(2)
})
})
describe('Edge Cases', () => {
it('should handle single item pool', async () => {
const harness = createTestHarness({ totalCount: 1 })
const simulator = new BatchQueueSimulator({ totalCount: 1 })
await simulator.runBatchQueue(
3,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// Should always stay at index 1
expect(harness.state.currentIndex.value).toBe(1)
})
it('should handle empty pool gracefully', async () => {
const harness = createTestHarness({ totalCount: 0 })
// beforeQueued should still work without errors
expect(() => harness.widget.beforeQueued()).not.toThrow()
})
it('should handle rapid sequential executions', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
// Run 20 sequential executions
await simulator.runSequential(
20,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
// 20 % 5 = 0, so should wrap back to 1
// But first execution uses index 1, so after 20 executions we're at 21 % 5 = 1
expect(harness.state.currentIndex.value).toBe(1)
})
it('should preserve state consistency across many cycles', async () => {
const harness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const simulator = new BatchQueueSimulator({ totalCount: 3 })
// Run 100 executions in batches
for (let batch = 0; batch < 10; batch++) {
await simulator.runBatchQueue(
10,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
}
// Verify state is still valid
expect(harness.state.currentIndex.value).toBeGreaterThanOrEqual(1)
expect(harness.state.currentIndex.value).toBeLessThanOrEqual(3)
expect(harness.state.repeatUsed.value).toBeGreaterThanOrEqual(1)
expect(harness.state.repeatUsed.value).toBeLessThanOrEqual(2)
expect(harness.executionQueue.length).toBe(0)
})
})
describe('Invariant Assertions', () => {
it('should always have valid index (1 <= currentIndex <= totalCount)', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
const checkInvariant = () => {
const { currentIndex, totalCount } = harness.state
if (totalCount.value > 0) {
expect(currentIndex.value).toBeGreaterThanOrEqual(1)
expect(currentIndex.value).toBeLessThanOrEqual(totalCount.value)
}
}
// Override onExecuted to check invariant after each execution
const originalOnExecuted = harness.node.onExecuted
harness.node.onExecuted = (output: any) => {
originalOnExecuted(output)
checkInvariant()
}
await simulator.runBatchQueue(
20,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
})
it('should always have repeatUsed <= repeatCount', async () => {
const harness = createTestHarness({ totalCount: 5, repeatCount: 3 })
const checkInvariant = () => {
expect(harness.state.repeatUsed.value).toBeLessThanOrEqual(harness.state.repeatCount.value)
}
// Check after each beforeQueued
for (let i = 0; i < 20; i++) {
harness.widget.beforeQueued()
checkInvariant()
}
})
it('should consume all execution contexts (queue empty after matching executions)', async () => {
const harness = createTestHarness({ totalCount: 5 })
const simulator = new BatchQueueSimulator({ totalCount: 5 })
await simulator.runBatchQueue(
7,
{
beforeQueued: () => harness.widget.beforeQueued(),
onExecuted: (output) => harness.node.onExecuted(output)
},
() => harness.getConfig()
)
expect(harness.executionQueue.length).toBe(0)
})
})
describe('Batch vs Sequential Mode Equivalence', () => {
it('should produce same final state in both modes (basic cycle)', async () => {
// Create two identical harnesses
const batchHarness = createTestHarness({ totalCount: 5 })
const seqHarness = createTestHarness({ totalCount: 5 })
const batchSimulator = new BatchQueueSimulator({ totalCount: 5 })
const seqSimulator = new BatchQueueSimulator({ totalCount: 5 })
// Run same number of executions in different modes
await batchSimulator.runBatchQueue(
7,
{
beforeQueued: () => batchHarness.widget.beforeQueued(),
onExecuted: (output) => batchHarness.node.onExecuted(output)
},
() => batchHarness.getConfig()
)
await seqSimulator.runSequential(
7,
{
beforeQueued: () => seqHarness.widget.beforeQueued(),
onExecuted: (output) => seqHarness.node.onExecuted(output)
},
() => seqHarness.getConfig()
)
// Final state should be identical
expect(batchHarness.state.currentIndex.value).toBe(seqHarness.state.currentIndex.value)
expect(batchHarness.state.repeatUsed.value).toBe(seqHarness.state.repeatUsed.value)
expect(batchHarness.state.displayRepeatUsed.value).toBe(seqHarness.state.displayRepeatUsed.value)
})
it('should produce same final state in both modes (with repeat)', async () => {
const batchHarness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const seqHarness = createTestHarness({ totalCount: 3, repeatCount: 2 })
const batchSimulator = new BatchQueueSimulator({ totalCount: 3 })
const seqSimulator = new BatchQueueSimulator({ totalCount: 3 })
await batchSimulator.runBatchQueue(
10,
{
beforeQueued: () => batchHarness.widget.beforeQueued(),
onExecuted: (output) => batchHarness.node.onExecuted(output)
},
() => batchHarness.getConfig()
)
await seqSimulator.runSequential(
10,
{
beforeQueued: () => seqHarness.widget.beforeQueued(),
onExecuted: (output) => seqHarness.node.onExecuted(output)
},
() => seqHarness.getConfig()
)
expect(batchHarness.state.currentIndex.value).toBe(seqHarness.state.currentIndex.value)
expect(batchHarness.state.repeatUsed.value).toBe(seqHarness.state.repeatUsed.value)
expect(batchHarness.state.displayRepeatUsed.value).toBe(seqHarness.state.displayRepeatUsed.value)
})
})
})

View File

@@ -0,0 +1,75 @@
/**
* Vitest test setup file
* Configures global mocks for ComfyUI modules and browser APIs
*/
import { vi } from 'vitest'
// Mock ComfyUI app module
vi.mock('../../../scripts/app.js', () => ({
app: {
graph: {
_nodes: []
},
registerExtension: vi.fn()
}
}))
// Mock ComfyUI loras_widget module
vi.mock('../loras_widget.js', () => ({
addLoraCard: vi.fn(),
removeLoraCard: vi.fn()
}))
// Mock ComfyUI autocomplete module
vi.mock('../autocomplete.js', () => ({
setupAutocomplete: vi.fn()
}))
// Global fetch mock - exported so tests can access it directly
export const mockFetch = vi.fn()
vi.stubGlobal('fetch', mockFetch)
// Helper to reset fetch mock between tests
export function resetFetchMock() {
mockFetch.mockReset()
// Re-stub global to ensure it's the same mock
vi.stubGlobal('fetch', mockFetch)
}
// Helper to setup fetch mock with default success response
export function setupFetchMock(response: unknown = { success: true, loras: [] }) {
// Ensure we're using the same mock
mockFetch.mockReset()
mockFetch.mockResolvedValue({
ok: true,
json: () => Promise.resolve(response)
})
vi.stubGlobal('fetch', mockFetch)
return mockFetch
}
// Helper to setup fetch mock with error response
export function setupFetchErrorMock(error: string = 'Network error') {
mockFetch.mockReset()
mockFetch.mockRejectedValue(new Error(error))
vi.stubGlobal('fetch', mockFetch)
return mockFetch
}
// Mock btoa for hashing (jsdom should have this, but just in case)
if (typeof global.btoa === 'undefined') {
vi.stubGlobal('btoa', (str: string) => Buffer.from(str).toString('base64'))
}
// Mock console methods to reduce noise in tests
vi.spyOn(console, 'log').mockImplementation(() => {})
vi.spyOn(console, 'error').mockImplementation(() => {})
vi.spyOn(console, 'warn').mockImplementation(() => {})
// Re-enable console for debugging when needed
export function enableConsole() {
vi.spyOn(console, 'log').mockRestore()
vi.spyOn(console, 'error').mockRestore()
vi.spyOn(console, 'warn').mockRestore()
}

View File

@@ -0,0 +1,230 @@
/**
* BatchQueueSimulator - Simulates ComfyUI's two execution modes
*
* ComfyUI has two distinct execution patterns:
* 1. Batch Queue Mode: ALL beforeQueued calls happen BEFORE any onExecuted calls
* 2. Sequential Mode: beforeQueued and onExecuted interleave for each prompt
*
* This simulator helps test how the widget behaves in both modes.
*/
import type { CyclerConfig } from '@/composables/types'
export interface ExecutionHooks {
/** Called when a prompt is queued (before execution) */
beforeQueued: () => void
/** Called when execution completes with output */
onExecuted: (output: unknown) => void
}
export interface SimulatorOptions {
/** Total number of LoRAs in the pool */
totalCount: number
/** Function to generate output for each execution */
generateOutput?: (executionIndex: number, config: CyclerConfig) => unknown
}
/**
* Creates execution output based on the current state
*/
function defaultGenerateOutput(executionIndex: number, config: CyclerConfig) {
// Calculate what the next index would be after this execution
let nextIdx = (config.execution_index ?? config.current_index) + 1
if (nextIdx > config.total_count) {
nextIdx = 1
}
return {
next_index: [nextIdx],
total_count: [config.total_count],
next_lora_name: [`lora${nextIdx}.safetensors`],
next_lora_filename: [`lora${nextIdx}.safetensors`],
current_lora_name: [`lora${config.execution_index ?? config.current_index}.safetensors`],
current_lora_filename: [`lora${config.execution_index ?? config.current_index}.safetensors`]
}
}
export class BatchQueueSimulator {
private executionCount = 0
private options: Required<SimulatorOptions>
constructor(options: SimulatorOptions) {
this.options = {
totalCount: options.totalCount,
generateOutput: options.generateOutput ?? defaultGenerateOutput
}
}
/**
* Reset the simulator state
*/
reset() {
this.executionCount = 0
}
/**
* Simulates Batch Queue Mode execution
*
* In this mode, ComfyUI queues multiple prompts at once:
* - ALL beforeQueued() calls happen first (for all prompts in the batch)
* - THEN all onExecuted() calls happen (as each prompt completes)
*
* This is the mode used when queueing multiple prompts from the UI.
*
* @param count Number of prompts to simulate
* @param hooks The widget's execution hooks
* @param getConfig Function to get current widget config state
*/
async runBatchQueue(
count: number,
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
// Phase 1: All beforeQueued calls (snapshot configs)
const snapshotConfigs: CyclerConfig[] = []
for (let i = 0; i < count; i++) {
hooks.beforeQueued()
// Snapshot the config after beforeQueued updates it
snapshotConfigs.push({ ...getConfig() })
}
// Phase 2: All onExecuted calls (in order)
for (let i = 0; i < count; i++) {
const config = snapshotConfigs[i]
const output = this.options.generateOutput(this.executionCount, config)
hooks.onExecuted(output)
this.executionCount++
}
}
/**
* Simulates Sequential Mode execution
*
* In this mode, execution is one-at-a-time:
* - beforeQueued() is called
* - onExecuted() is called
* - Then the next prompt's beforeQueued() is called
* - And so on...
*
* This is the mode used in API-driven execution or single prompt queuing.
*
* @param count Number of prompts to simulate
* @param hooks The widget's execution hooks
* @param getConfig Function to get current widget config state
*/
async runSequential(
count: number,
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
for (let i = 0; i < count; i++) {
// Queue the prompt
hooks.beforeQueued()
const config = { ...getConfig() }
// Execute it immediately
const output = this.options.generateOutput(this.executionCount, config)
hooks.onExecuted(output)
this.executionCount++
}
}
/**
* Simulates a single execution (queue + execute)
*/
async runSingle(
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
return this.runSequential(1, hooks, getConfig)
}
/**
* Simulates interrupted execution (some beforeQueued calls without matching onExecuted)
*
* This can happen if the user cancels execution mid-batch.
*
* @param queuedCount Number of prompts queued (beforeQueued called)
* @param executedCount Number of prompts that actually executed
* @param hooks The widget's execution hooks
* @param getConfig Function to get current widget config state
*/
async runInterrupted(
queuedCount: number,
executedCount: number,
hooks: ExecutionHooks,
getConfig: () => CyclerConfig
): Promise<void> {
if (executedCount > queuedCount) {
throw new Error('executedCount cannot be greater than queuedCount')
}
// Phase 1: All beforeQueued calls
const snapshotConfigs: CyclerConfig[] = []
for (let i = 0; i < queuedCount; i++) {
hooks.beforeQueued()
snapshotConfigs.push({ ...getConfig() })
}
// Phase 2: Only some onExecuted calls
for (let i = 0; i < executedCount; i++) {
const config = snapshotConfigs[i]
const output = this.options.generateOutput(this.executionCount, config)
hooks.onExecuted(output)
this.executionCount++
}
}
}
/**
* Helper to create execution hooks from a widget-like object
*/
export function createHooksFromWidget(widget: {
beforeQueued?: () => void
}, node: {
onExecuted?: (output: unknown) => void
}): ExecutionHooks {
return {
beforeQueued: () => widget.beforeQueued?.(),
onExecuted: (output) => node.onExecuted?.(output)
}
}
/**
* Tracks index history during simulation for assertions
*/
export class IndexTracker {
public indexHistory: number[] = []
public repeatHistory: number[] = []
public pauseHistory: boolean[] = []
reset() {
this.indexHistory = []
this.repeatHistory = []
this.pauseHistory = []
}
record(config: CyclerConfig) {
this.indexHistory.push(config.current_index)
this.repeatHistory.push(config.repeat_used)
this.pauseHistory.push(config.is_paused)
}
/**
* Get the sequence of indices that were actually used for execution
*/
getExecutionIndices(): number[] {
return this.indexHistory
}
/**
* Verify that indices cycle correctly through totalCount
*/
verifyCyclePattern(expectedPattern: number[]): boolean {
if (this.indexHistory.length !== expectedPattern.length) {
return false
}
return this.indexHistory.every((idx, i) => idx === expectedPattern[i])
}
}