优化encode_token_weights函数以保持CLIP选项与ComfyUI核心编码路径一致 (#377)

seems a additional update
This commit is contained in:
izum
2026-04-01 07:42:18 +08:00
committed by GitHub
parent cc91287d72
commit 65b2c0361f

View File

@@ -221,12 +221,16 @@ def encode_token_weights_l(model, token_weight_pairs):
l_out, _ = model.clip_l.encode_token_weights(token_weight_pairs) l_out, _ = model.clip_l.encode_token_weights(token_weight_pairs)
return l_out, None return l_out, None
def encode_token_weights(model, token_weight_pairs, encode_func): def encode_token_weights(model, token_weight_pairs, encode_func):
if model.layer_idx is not None: # Keep CLIP options aligned with ComfyUI's core encode path so token
model.cond_stage_model.set_clip_options({"layer": model.layer_idx}) # tensors are created on the same device as the active text encoder pass.
model.cond_stage_model.reset_clip_options()
model_management.load_model_gpu(model.patcher) if model.layer_idx is not None:
return encode_func(model.cond_stage_model, token_weight_pairs) model.cond_stage_model.set_clip_options({"layer": model.layer_idx})
model_management.load_model_gpu(model.patcher)
model.cond_stage_model.set_clip_options({"execution_device": model.patcher.load_device})
return encode_func(model.cond_stage_model, token_weight_pairs)
def prepareXL(embs_l, embs_g, pooled, clip_balance): def prepareXL(embs_l, embs_g, pooled, clip_balance):
l_w = 1 - max(0, clip_balance - .5) * 2 l_w = 1 - max(0, clip_balance - .5) * 2