mirror of
https://github.com/jags111/efficiency-nodes-comfyui.git
synced 2026-05-06 16:56:44 -03:00
优化encode_token_weights函数以保持CLIP选项与ComfyUI核心编码路径一致 (#377)
seems a additional update
This commit is contained in:
@@ -221,12 +221,16 @@ def encode_token_weights_l(model, token_weight_pairs):
|
||||
l_out, _ = model.clip_l.encode_token_weights(token_weight_pairs)
|
||||
return l_out, None
|
||||
|
||||
def encode_token_weights(model, token_weight_pairs, encode_func):
|
||||
if model.layer_idx is not None:
|
||||
model.cond_stage_model.set_clip_options({"layer": model.layer_idx})
|
||||
|
||||
model_management.load_model_gpu(model.patcher)
|
||||
return encode_func(model.cond_stage_model, token_weight_pairs)
|
||||
def encode_token_weights(model, token_weight_pairs, encode_func):
|
||||
# Keep CLIP options aligned with ComfyUI's core encode path so token
|
||||
# tensors are created on the same device as the active text encoder pass.
|
||||
model.cond_stage_model.reset_clip_options()
|
||||
if model.layer_idx is not None:
|
||||
model.cond_stage_model.set_clip_options({"layer": model.layer_idx})
|
||||
|
||||
model_management.load_model_gpu(model.patcher)
|
||||
model.cond_stage_model.set_clip_options({"execution_device": model.patcher.load_device})
|
||||
return encode_func(model.cond_stage_model, token_weight_pairs)
|
||||
|
||||
def prepareXL(embs_l, embs_g, pooled, clip_balance):
|
||||
l_w = 1 - max(0, clip_balance - .5) * 2
|
||||
|
||||
Reference in New Issue
Block a user