mirror of
https://github.com/jags111/efficiency-nodes-comfyui.git
synced 2026-05-07 01:06:42 -03:00
Compare commits
5 Commits
copilot/fi
...
copilot/ad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
946fca3e18 | ||
|
|
3b2a5a36ad | ||
|
|
73d807aa40 | ||
|
|
b80d754d1f | ||
|
|
a075f2b7f9 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,16 +1,15 @@
|
||||
# Python
|
||||
# Python cache
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
|
||||
# Virtual environments
|
||||
venv/
|
||||
ENV/
|
||||
env/
|
||||
ENV/
|
||||
|
||||
# IDEs
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
|
||||
11
README.md
11
README.md
@@ -131,6 +131,17 @@ Please check out our WIKI for any use cases and new developments including workf
|
||||
<img src="https://github.com/LucianoCirino/efficiency-nodes-media/blob/main/images/nodes/Image%20Overlay%20-%20Node%20Example.png" width="1080">
|
||||
</p>
|
||||
|
||||
</details>
|
||||
<!-------------------------------------------------------------------------------------------------------------------------------------------------------->
|
||||
<details>
|
||||
<summary><b>Live Preview (Eff.)</b></summary>
|
||||
<ul>
|
||||
<li>Node that displays a live preview of images while allowing them to pass through to other nodes in the workflow.</li>
|
||||
<li>Can be connected to multiple samplers sequentially to show progressive updates as the workflow advances through different stages (base sampling, refining, hi-res fix, detailing, etc.).</li>
|
||||
<li>Useful for placing preview displays outside of subgraphs or in custom locations within complex workflows.</li>
|
||||
<li>Simply connect the IMAGE output from any sampler or image processing node to this node's input, and connect its output to continue the workflow.</li>
|
||||
</ul>
|
||||
|
||||
</details>
|
||||
<!-------------------------------------------------------------------------------------------------------------------------------------------------------->
|
||||
<details>
|
||||
|
||||
@@ -53,7 +53,6 @@ from .py import city96_latent_upscaler
|
||||
from .py import ttl_nn_latent_upscaler
|
||||
from .py import bnk_tiled_samplers
|
||||
from .py import bnk_adv_encode
|
||||
from .py.bnk_adv_encode import normalize_prompt_text
|
||||
sys.path.remove(my_dir)
|
||||
|
||||
from comfy import samplers
|
||||
@@ -72,9 +71,6 @@ SCHEDULERS = samplers.KSampler.SCHEDULERS + ["AYS SD1", "AYS SDXL", "AYS SVD", "
|
||||
def encode_prompts(positive_prompt, negative_prompt, token_normalization, weight_interpretation, clip, clip_skip,
|
||||
refiner_clip, refiner_clip_skip, ascore, is_sdxl, empty_latent_width, empty_latent_height,
|
||||
return_type="both"):
|
||||
# Ensure prompts are valid strings to prevent tokenization errors
|
||||
positive_prompt = normalize_prompt_text(positive_prompt)
|
||||
negative_prompt = normalize_prompt_text(negative_prompt)
|
||||
|
||||
positive_encoded = negative_encoded = refiner_positive_encoded = refiner_negative_encoded = None
|
||||
|
||||
@@ -4017,6 +4013,34 @@ class TSC_ImageOverlay:
|
||||
# Return the edited base image
|
||||
return (base_image,)
|
||||
|
||||
########################################################################################################################
|
||||
# TSC Live Preview Node
|
||||
class TSC_LivePreview:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"images": ("IMAGE",),
|
||||
},
|
||||
"hidden": {
|
||||
"prompt": "PROMPT",
|
||||
"extra_pnginfo": "EXTRA_PNGINFO"
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "preview_image"
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "Efficiency Nodes/Image"
|
||||
|
||||
def preview_image(self, images, prompt=None, extra_pnginfo=None):
|
||||
# Generate preview using ComfyUI's PreviewImage node
|
||||
preview = PreviewImage().save_images(images, prompt=prompt, extra_pnginfo=extra_pnginfo)["ui"]
|
||||
|
||||
# Return both the preview for UI and the original images for pass-through
|
||||
return {"ui": preview, "result": (images,)}
|
||||
|
||||
########################################################################################################################
|
||||
# Noise Sources & Seed Variations
|
||||
# https://github.com/shiimizu/ComfyUI_smZNodes
|
||||
@@ -4292,6 +4316,7 @@ NODE_CLASS_MAPPINGS = {
|
||||
"Manual XY Entry Info": TSC_XYplot_Manual_XY_Entry_Info,
|
||||
"Join XY Inputs of Same Type": TSC_XYplot_JoinInputs,
|
||||
"Image Overlay": TSC_ImageOverlay,
|
||||
"Live Preview (Eff.)": TSC_LivePreview,
|
||||
"Noise Control Script": TSC_Noise_Control_Script,
|
||||
"HighRes-Fix Script": TSC_HighRes_Fix,
|
||||
"Tiled Upscaler Script": TSC_Tiled_Upscaler,
|
||||
|
||||
@@ -6,34 +6,6 @@ from math import gcd
|
||||
from comfy import model_management
|
||||
from comfy.sdxl_clip import SDXLClipModel, SDXLRefinerClipModel, SDXLClipG
|
||||
|
||||
def normalize_prompt_text(text):
|
||||
"""
|
||||
Normalize prompt text to prevent tokenization errors.
|
||||
|
||||
Converts None, empty strings, or whitespace-only strings to a single space.
|
||||
Ensures the input is a string type by converting non-string values.
|
||||
This function is designed to handle edge cases gracefully without crashing,
|
||||
which is important for ComfyUI workflows where users might have empty prompts.
|
||||
|
||||
Parameters:
|
||||
text: The input prompt text to normalize. Can be of any type, though
|
||||
string, None, or convertible types are expected.
|
||||
|
||||
Returns:
|
||||
str: A normalized string that is safe to pass to the tokenizer.
|
||||
Returns " " (single space) for None, empty, or whitespace-only inputs.
|
||||
Returns the original text unchanged if it's a valid non-empty string.
|
||||
Returns str(text) for non-string types.
|
||||
"""
|
||||
if text is None:
|
||||
return " "
|
||||
if not isinstance(text, str):
|
||||
# Convert non-string types to string
|
||||
text = str(text)
|
||||
if not text.strip():
|
||||
return " "
|
||||
return text
|
||||
|
||||
def _grouper(n, iterable):
|
||||
it = iter(iterable)
|
||||
while True:
|
||||
@@ -265,8 +237,6 @@ def prepareXL(embs_l, embs_g, pooled, clip_balance):
|
||||
return embs_g, pooled
|
||||
|
||||
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||
# Ensure text is a valid string to prevent tokenization errors
|
||||
text = normalize_prompt_text(text)
|
||||
tokenized = clip.tokenize(text, return_word_ids=True)
|
||||
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
|
||||
embs_l = None
|
||||
@@ -295,9 +265,6 @@ def advanced_encode(clip, text, token_normalization, weight_interpretation, w_ma
|
||||
lambda x: (clip.encode_from_tokens({'l': x}), None),
|
||||
w_max=w_max)
|
||||
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
|
||||
# Ensure texts are valid strings to prevent tokenization errors
|
||||
text1 = normalize_prompt_text(text1)
|
||||
text2 = normalize_prompt_text(text2)
|
||||
tokenized1 = clip.tokenize(text1, return_word_ids=True)
|
||||
tokenized2 = clip.tokenize(text2, return_word_ids=True)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user