mirror of
https://github.com/jags111/efficiency-nodes-comfyui.git
synced 2026-03-21 21:22:13 -03:00
Update bnk_adv_encode.py
This commit is contained in:
committed by
GitHub
parent
e3674193a6
commit
cc54d1d5df
@@ -238,17 +238,24 @@ def prepareXL(embs_l, embs_g, pooled, clip_balance):
|
||||
|
||||
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||
tokenized = clip.tokenize(text, return_word_ids=True)
|
||||
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
|
||||
if isinstance(tokenized, dict):
|
||||
embs_l = None
|
||||
embs_g = None
|
||||
pooled = None
|
||||
if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):
|
||||
embs_l, _ = advanced_encode_from_tokens(tokenized['l'],
|
||||
token_normalization,
|
||||
weight_interpretation,
|
||||
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
|
||||
w_max=w_max,
|
||||
return_pooled=False)
|
||||
if 'l' in tokenized:
|
||||
if isinstance(clip.cond_stage_model, SDXLClipModel):
|
||||
embs_l, _ = advanced_encode_from_tokens(tokenized['l'],
|
||||
token_normalization,
|
||||
weight_interpretation,
|
||||
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
|
||||
w_max=w_max,
|
||||
return_pooled=False)
|
||||
else:
|
||||
return advanced_encode_from_tokens(tokenized['l'],
|
||||
token_normalization,
|
||||
weight_interpretation,
|
||||
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
|
||||
w_max=w_max)
|
||||
if 'g' in tokenized:
|
||||
embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],
|
||||
token_normalization,
|
||||
@@ -259,11 +266,12 @@ def advanced_encode(clip, text, token_normalization, weight_interpretation, w_ma
|
||||
apply_to_pooled=apply_to_pooled)
|
||||
return prepareXL(embs_l, embs_g, pooled, clip_balance)
|
||||
else:
|
||||
return advanced_encode_from_tokens(tokenized['l'],
|
||||
token_normalization,
|
||||
weight_interpretation,
|
||||
lambda x: (clip.encode_from_tokens({'l': x}), None),
|
||||
return advanced_encode_from_tokens(tokenized,
|
||||
token_normalization,
|
||||
weight_interpretation,
|
||||
lambda x: (clip.encode_from_tokens(x), None),
|
||||
w_max=w_max)
|
||||
|
||||
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||
tokenized1 = clip.tokenize(text1, return_word_ids=True)
|
||||
tokenized2 = clip.tokenize(text2, return_word_ids=True)
|
||||
|
||||
Reference in New Issue
Block a user