mirror of
https://github.com/jags111/efficiency-nodes-comfyui.git
synced 2026-03-21 21:22:13 -03:00
Refactor validation into shared helper function and improve type handling
Co-authored-by: jags111 <5968619+jags111@users.noreply.github.com>
This commit is contained in:
@@ -53,6 +53,7 @@ from .py import city96_latent_upscaler
|
|||||||
from .py import ttl_nn_latent_upscaler
|
from .py import ttl_nn_latent_upscaler
|
||||||
from .py import bnk_tiled_samplers
|
from .py import bnk_tiled_samplers
|
||||||
from .py import bnk_adv_encode
|
from .py import bnk_adv_encode
|
||||||
|
from .py.bnk_adv_encode import normalize_prompt_text
|
||||||
sys.path.remove(my_dir)
|
sys.path.remove(my_dir)
|
||||||
|
|
||||||
from comfy import samplers
|
from comfy import samplers
|
||||||
@@ -73,10 +74,8 @@ def encode_prompts(positive_prompt, negative_prompt, token_normalization, weight
|
|||||||
return_type="both"):
|
return_type="both"):
|
||||||
|
|
||||||
# Ensure prompts are valid strings to prevent tokenization errors
|
# Ensure prompts are valid strings to prevent tokenization errors
|
||||||
if positive_prompt is None or (isinstance(positive_prompt, str) and not positive_prompt.strip()):
|
positive_prompt = normalize_prompt_text(positive_prompt)
|
||||||
positive_prompt = " "
|
negative_prompt = normalize_prompt_text(negative_prompt)
|
||||||
if negative_prompt is None or (isinstance(negative_prompt, str) and not negative_prompt.strip()):
|
|
||||||
negative_prompt = " "
|
|
||||||
|
|
||||||
positive_encoded = negative_encoded = refiner_positive_encoded = refiner_negative_encoded = None
|
positive_encoded = negative_encoded = refiner_positive_encoded = refiner_negative_encoded = None
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,21 @@ from math import gcd
|
|||||||
from comfy import model_management
|
from comfy import model_management
|
||||||
from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG
|
from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG
|
||||||
|
|
||||||
|
def normalize_prompt_text(text):
|
||||||
|
"""
|
||||||
|
Normalize prompt text to prevent tokenization errors.
|
||||||
|
Converts None, empty strings, or whitespace-only strings to a single space.
|
||||||
|
Ensures the input is a string type.
|
||||||
|
"""
|
||||||
|
if text is None:
|
||||||
|
return " "
|
||||||
|
if not isinstance(text, str):
|
||||||
|
# Convert non-string types to string
|
||||||
|
text = str(text)
|
||||||
|
if not text.strip():
|
||||||
|
return " "
|
||||||
|
return text
|
||||||
|
|
||||||
def _grouper(n, iterable):
|
def _grouper(n, iterable):
|
||||||
it = iter(iterable)
|
it = iter(iterable)
|
||||||
while True:
|
while True:
|
||||||
@@ -238,8 +253,7 @@ def prepareXL(embs_l, embs_g, pooled, clip_balance):
|
|||||||
|
|
||||||
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||||
# Ensure text is a valid string to prevent tokenization errors
|
# Ensure text is a valid string to prevent tokenization errors
|
||||||
if text is None or (isinstance(text, str) and not text.strip()):
|
text = normalize_prompt_text(text)
|
||||||
text = " "
|
|
||||||
tokenized = clip.tokenize(text, return_word_ids=True)
|
tokenized = clip.tokenize(text, return_word_ids=True)
|
||||||
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
|
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
|
||||||
embs_l = None
|
embs_l = None
|
||||||
@@ -269,10 +283,8 @@ def advanced_encode(clip, text, token_normalization, weight_interpretation, w_ma
|
|||||||
w_max=w_max)
|
w_max=w_max)
|
||||||
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||||
# Ensure texts are valid strings to prevent tokenization errors
|
# Ensure texts are valid strings to prevent tokenization errors
|
||||||
if text1 is None or (isinstance(text1, str) and not text1.strip()):
|
text1 = normalize_prompt_text(text1)
|
||||||
text1 = " "
|
text2 = normalize_prompt_text(text2)
|
||||||
if text2 is None or (isinstance(text2, str) and not text2.strip()):
|
|
||||||
text2 = " "
|
|
||||||
tokenized1 = clip.tokenize(text1, return_word_ids=True)
|
tokenized1 = clip.tokenize(text1, return_word_ids=True)
|
||||||
tokenized2 = clip.tokenize(text2, return_word_ids=True)
|
tokenized2 = clip.tokenize(text2, return_word_ids=True)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user