mirror of
https://github.com/jags111/efficiency-nodes-comfyui.git
synced 2026-05-07 01:06:42 -03:00
Compare commits
6 Commits
copilot/se
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6c0c6cac4e | ||
|
|
bceccbcf06 | ||
|
|
3fa0e8c927 | ||
|
|
94c9d05e2e | ||
|
|
b083ff3f6c | ||
|
|
f9692f29ff |
22
.gitignore
vendored
Normal file
22
.gitignore
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Python
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
*.so
|
||||||
|
.Python
|
||||||
|
|
||||||
|
# Virtual environments
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env/
|
||||||
|
|
||||||
|
# IDEs
|
||||||
|
.vscode/
|
||||||
|
.idea/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
|
||||||
|
# OS
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
@@ -53,6 +53,7 @@ from .py import city96_latent_upscaler
|
|||||||
from .py import ttl_nn_latent_upscaler
|
from .py import ttl_nn_latent_upscaler
|
||||||
from .py import bnk_tiled_samplers
|
from .py import bnk_tiled_samplers
|
||||||
from .py import bnk_adv_encode
|
from .py import bnk_adv_encode
|
||||||
|
from .py.bnk_adv_encode import normalize_prompt_text
|
||||||
sys.path.remove(my_dir)
|
sys.path.remove(my_dir)
|
||||||
|
|
||||||
from comfy import samplers
|
from comfy import samplers
|
||||||
@@ -71,6 +72,9 @@ SCHEDULERS = samplers.KSampler.SCHEDULERS + ["AYS SD1", "AYS SDXL", "AYS SVD", "
|
|||||||
def encode_prompts(positive_prompt, negative_prompt, token_normalization, weight_interpretation, clip, clip_skip,
|
def encode_prompts(positive_prompt, negative_prompt, token_normalization, weight_interpretation, clip, clip_skip,
|
||||||
refiner_clip, refiner_clip_skip, ascore, is_sdxl, empty_latent_width, empty_latent_height,
|
refiner_clip, refiner_clip_skip, ascore, is_sdxl, empty_latent_width, empty_latent_height,
|
||||||
return_type="both"):
|
return_type="both"):
|
||||||
|
# Ensure prompts are valid strings to prevent tokenization errors
|
||||||
|
positive_prompt = normalize_prompt_text(positive_prompt)
|
||||||
|
negative_prompt = normalize_prompt_text(negative_prompt)
|
||||||
|
|
||||||
positive_encoded = negative_encoded = refiner_positive_encoded = refiner_negative_encoded = None
|
positive_encoded = negative_encoded = refiner_positive_encoded = refiner_negative_encoded = None
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,34 @@ from math import gcd
|
|||||||
from comfy import model_management
|
from comfy import model_management
|
||||||
from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG
|
from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG
|
||||||
|
|
||||||
|
def normalize_prompt_text(text):
|
||||||
|
"""
|
||||||
|
Normalize prompt text to prevent tokenization errors.
|
||||||
|
|
||||||
|
Converts None, empty strings, or whitespace-only strings to a single space.
|
||||||
|
Ensures the input is a string type by converting non-string values.
|
||||||
|
This function is designed to handle edge cases gracefully without crashing,
|
||||||
|
which is important for ComfyUI workflows where users might have empty prompts.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
text: The input prompt text to normalize. Can be of any type, though
|
||||||
|
string, None, or convertible types are expected.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: A normalized string that is safe to pass to the tokenizer.
|
||||||
|
Returns " " (single space) for None, empty, or whitespace-only inputs.
|
||||||
|
Returns the original text unchanged if it's a valid non-empty string.
|
||||||
|
Returns str(text) for non-string types.
|
||||||
|
"""
|
||||||
|
if text is None:
|
||||||
|
return " "
|
||||||
|
if not isinstance(text, str):
|
||||||
|
# Convert non-string types to string
|
||||||
|
text = str(text)
|
||||||
|
if not text.strip():
|
||||||
|
return " "
|
||||||
|
return text
|
||||||
|
|
||||||
def _grouper(n, iterable):
|
def _grouper(n, iterable):
|
||||||
it = iter(iterable)
|
it = iter(iterable)
|
||||||
while True:
|
while True:
|
||||||
@@ -237,6 +265,8 @@ def prepareXL(embs_l, embs_g, pooled, clip_balance):
|
|||||||
return embs_g, pooled
|
return embs_g, pooled
|
||||||
|
|
||||||
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||||
|
# Ensure text is a valid string to prevent tokenization errors
|
||||||
|
text = normalize_prompt_text(text)
|
||||||
tokenized = clip.tokenize(text, return_word_ids=True)
|
tokenized = clip.tokenize(text, return_word_ids=True)
|
||||||
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
|
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
|
||||||
embs_l = None
|
embs_l = None
|
||||||
@@ -265,6 +295,9 @@ def advanced_encode(clip, text, token_normalization, weight_interpretation, w_ma
|
|||||||
lambda x: (clip.encode_from_tokens({'l': x}), None),
|
lambda x: (clip.encode_from_tokens({'l': x}), None),
|
||||||
w_max=w_max)
|
w_max=w_max)
|
||||||
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||||
|
# Ensure texts are valid strings to prevent tokenization errors
|
||||||
|
text1 = normalize_prompt_text(text1)
|
||||||
|
text2 = normalize_prompt_text(text2)
|
||||||
tokenized1 = clip.tokenize(text1, return_word_ids=True)
|
tokenized1 = clip.tokenize(text1, return_word_ids=True)
|
||||||
tokenized2 = clip.tokenize(text2, return_word_ids=True)
|
tokenized2 = clip.tokenize(text2, return_word_ids=True)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user