Merge pull request #9 from dnl13/main

fix for tiles script issue
This commit is contained in:
VALADI K JAGANATHAN
2023-11-03 18:33:43 +05:30
committed by GitHub
3 changed files with 43 additions and 27 deletions

View File

@@ -4110,8 +4110,7 @@ class TSC_Tiled_Upscaler:
@classmethod
def INPUT_TYPES(cls):
# Split the list based on the keyword "tile"
cnet_tile_filenames = [name for name in folder_paths.get_filename_list("controlnet") if "tile" in name]
#cnet_other_filenames = [name for name in folder_paths.get_filename_list("controlnet") if "tile" not in name]
cnet_filenames = [name for name in folder_paths.get_filename_list("controlnet")]
return {"required": {"upscale_by": ("FLOAT", {"default": 1.25, "min": 0.01, "max": 8.0, "step": 0.05}),
"tile_size": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}),
@@ -4120,7 +4119,7 @@ class TSC_Tiled_Upscaler:
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"denoise": ("FLOAT", {"default": .4, "min": 0.0, "max": 1.0, "step": 0.01}),
"use_controlnet": ("BOOLEAN", {"default": False}),
"tile_controlnet": (cnet_tile_filenames,),
"tile_controlnet": (cnet_filenames,),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
},
"optional": {"script": ("SCRIPT",)}}

View File

@@ -1,10 +1,10 @@
import torch
import numpy as np
import itertools
#from math import gcd
from math import gcd
from comfy import model_management
from comfy.sdxl_clip import SDXLClipModel
from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG
def _grouper(n, iterable):
it = iter(iterable)
@@ -238,24 +238,17 @@ def prepareXL(embs_l, embs_g, pooled, clip_balance):
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
tokenized = clip.tokenize(text, return_word_ids=True)
if isinstance(tokenized, dict):
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
embs_l = None
embs_g = None
pooled = None
if 'l' in tokenized:
if isinstance(clip.cond_stage_model, SDXLClipModel):
embs_l, _ = advanced_encode_from_tokens(tokenized['l'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
w_max=w_max,
return_pooled=False)
else:
return advanced_encode_from_tokens(tokenized['l'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
w_max=w_max)
if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):
embs_l, _ = advanced_encode_from_tokens(tokenized['l'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
w_max=w_max,
return_pooled=False)
if 'g' in tokenized:
embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],
token_normalization,
@@ -266,11 +259,35 @@ def advanced_encode(clip, text, token_normalization, weight_interpretation, w_ma
apply_to_pooled=apply_to_pooled)
return prepareXL(embs_l, embs_g, pooled, clip_balance)
else:
return advanced_encode_from_tokens(tokenized,
token_normalization,
weight_interpretation,
lambda x: (clip.encode_from_tokens(x), None),
return advanced_encode_from_tokens(tokenized['l'],
token_normalization,
weight_interpretation,
lambda x: (clip.encode_from_tokens({'l': x}), None),
w_max=w_max)
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
tokenized1 = clip.tokenize(text1, return_word_ids=True)
tokenized2 = clip.tokenize(text2, return_word_ids=True)
embs_l, _ = advanced_encode_from_tokens(tokenized1['l'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
w_max=w_max,
return_pooled=False)
embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_g),
w_max=w_max,
return_pooled=True,
apply_to_pooled=apply_to_pooled)
gcd_num = gcd(embs_l.shape[1], embs_g.shape[1])
repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1])
repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1])
return prepareXL(embs_l.expand((-1,repeat_l,-1)), embs_g.expand((-1,repeat_g,-1)), pooled, clip_balance)
########################################################################################################################
from nodes import MAX_RESOLUTION

View File

@@ -4,7 +4,7 @@ import torch
from typing import List
import comfy.sample
from comfy import model_base, model_management
from comfy.samplers import KSampler, KSamplerX0Inpaint
from comfy.samplers import KSampler, KSamplerX0Inpaint, wrap_model
#from comfy.k_diffusion.external import CompVisDenoiser
import nodes
import inspect
@@ -308,10 +308,10 @@ def set_model_k(self: KSampler):
self.model_denoise = CFGNoisePredictor(self.model) # main change
if ((getattr(self.model, "parameterization", "") == "v") or
(getattr(self.model, "model_type", -1) == model_base.ModelType.V_PREDICTION)):
self.model_wrap = CompVisVDenoiser(self.model_denoise, quantize=True)
self.model_wrap = wrap_model(self.model_denoise, quantize=True)
self.model_wrap.parameterization = getattr(self.model, "parameterization", "v")
else:
self.model_wrap = CompVisDenoiser(self.model_denoise, quantize=True)
self.model_wrap = wrap_model(self.model_denoise, quantize=True)
self.model_wrap.parameterization = getattr(self.model, "parameterization", "eps")
self.model_k = KSamplerX0Inpaint(self.model_wrap)