Fix prompt injection logic and improve documentation

Co-authored-by: jags111 <5968619+jags111@users.noreply.github.com>
This commit is contained in:
copilot-swe-agent[bot]
2026-02-03 23:07:51 +00:00
parent f9ab4b04a9
commit 1cbbe4ddca
2 changed files with 8 additions and 4 deletions

View File

@@ -69,7 +69,9 @@ Here's a typical workflow using trigger words:
1. Create an **XY Input: LoRA Plot** node
2. Set `input_mode` to "X: LoRA Batch, Y: LoRA Weight"
3. Set `X_batch_path` to your LoRA directory (e.g., `d:\LoRas`)
3. Set `X_batch_path` to your LoRA directory
- Windows: `d:\LoRas` or `C:\ComfyUI\models\loras`
- Linux/Mac: `/path/to/loras` or `~/ComfyUI/models/loras`
4. Set `X_batch_count` to the number of LoRAs you want to test
5. In the `X_trigger_words` field, enter trigger words (one per line):
```

View File

@@ -1271,13 +1271,15 @@ class TSC_KSampler:
lora_trigger_word = lora_tuple[3] if len(lora_tuple) > 3 else ""
# Inject trigger word into positive prompt if present
# positive_prompt structure: (current_prompt, original_prompt, prompt_after_X_loop)
if lora_trigger_word:
if positive_prompt[2] is not None:
# In Y loop after X loop
# In Y loop after X loop - build on the X loop result
positive_prompt = (positive_prompt[2] + " " + lora_trigger_word, positive_prompt[1], positive_prompt[2])
else:
# In X loop or initial
positive_prompt = (positive_prompt[1] + " " + lora_trigger_word, positive_prompt[1], positive_prompt[1] + " " + lora_trigger_word)
# In X loop or initial - build on original and save for Y loop
modified_prompt = positive_prompt[1] + " " + lora_trigger_word
positive_prompt = (modified_prompt, positive_prompt[1], modified_prompt)
lora_filename = os.path.splitext(os.path.basename(lora_name))[0]