feat: replace IO type imports with string literals

Remove direct imports of IO type constants from comfy.comfy_types and replace them with string literals "STRING" in input type definitions and return types. This improves code portability and reduces dependency on external type definitions.

Changes made across multiple files:
- Remove `from comfy.comfy_types import IO` imports
- Replace `IO.STRING` with "STRING" in INPUT_TYPES and RETURN_TYPES
- Move CLIPTextEncode import to function scope in prompt.py for better dependency management

This refactor maintains the same functionality while making the code more self-contained and reducing external dependencies.
This commit is contained in:
Will Miao
2025-10-14 09:12:55 +08:00
parent 1e4d1b8f15
commit c48095d9c6
6 changed files with 12 additions and 17 deletions

View File

@@ -1,7 +1,6 @@
import logging import logging
import re import re
from nodes import LoraLoader from nodes import LoraLoader
from comfy.comfy_types import IO # type: ignore
from ..utils.utils import get_lora_info from ..utils.utils import get_lora_info
from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list, nunchaku_load_lora from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list, nunchaku_load_lora
@@ -17,7 +16,7 @@ class LoraManagerLoader:
"required": { "required": {
"model": ("MODEL",), "model": ("MODEL",),
# "clip": ("CLIP",), # "clip": ("CLIP",),
"text": (IO.STRING, { "text": ("STRING", {
"multiline": True, "multiline": True,
"pysssss.autocomplete": False, "pysssss.autocomplete": False,
"dynamicPrompts": True, "dynamicPrompts": True,
@@ -28,7 +27,7 @@ class LoraManagerLoader:
"optional": FlexibleOptionalInputType(any_type), "optional": FlexibleOptionalInputType(any_type),
} }
RETURN_TYPES = ("MODEL", "CLIP", IO.STRING, IO.STRING) RETURN_TYPES = ("MODEL", "CLIP", "STRING", "STRING")
RETURN_NAMES = ("MODEL", "CLIP", "trigger_words", "loaded_loras") RETURN_NAMES = ("MODEL", "CLIP", "trigger_words", "loaded_loras")
FUNCTION = "load_loras" FUNCTION = "load_loras"
@@ -141,7 +140,7 @@ class LoraManagerTextLoader:
return { return {
"required": { "required": {
"model": ("MODEL",), "model": ("MODEL",),
"lora_syntax": (IO.STRING, { "lora_syntax": ("STRING", {
"defaultInput": True, "defaultInput": True,
"forceInput": True, "forceInput": True,
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation" "tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation"
@@ -153,7 +152,7 @@ class LoraManagerTextLoader:
} }
} }
RETURN_TYPES = ("MODEL", "CLIP", IO.STRING, IO.STRING) RETURN_TYPES = ("MODEL", "CLIP", "STRING", "STRING")
RETURN_NAMES = ("MODEL", "CLIP", "trigger_words", "loaded_loras") RETURN_NAMES = ("MODEL", "CLIP", "trigger_words", "loaded_loras")
FUNCTION = "load_loras_from_text" FUNCTION = "load_loras_from_text"

View File

@@ -1,4 +1,3 @@
from comfy.comfy_types import IO # type: ignore
import os import os
from ..utils.utils import get_lora_info from ..utils.utils import get_lora_info
from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list
@@ -15,7 +14,7 @@ class LoraStacker:
def INPUT_TYPES(cls): def INPUT_TYPES(cls):
return { return {
"required": { "required": {
"text": (IO.STRING, { "text": ("STRING", {
"multiline": True, "multiline": True,
"pysssss.autocomplete": False, "pysssss.autocomplete": False,
"dynamicPrompts": True, "dynamicPrompts": True,
@@ -26,7 +25,7 @@ class LoraStacker:
"optional": FlexibleOptionalInputType(any_type), "optional": FlexibleOptionalInputType(any_type),
} }
RETURN_TYPES = ("LORA_STACK", IO.STRING, IO.STRING) RETURN_TYPES = ("LORA_STACK", "STRING", "STRING")
RETURN_NAMES = ("LORA_STACK", "trigger_words", "active_loras") RETURN_NAMES = ("LORA_STACK", "trigger_words", "active_loras")
FUNCTION = "stack_loras" FUNCTION = "stack_loras"

View File

@@ -1,5 +1,4 @@
from typing import Any, Optional from typing import Any, Optional
from nodes import CLIPTextEncode # type: ignore
class PromptLoraManager: class PromptLoraManager:
"""Encodes text (and optional trigger words) into CLIP conditioning.""" """Encodes text (and optional trigger words) into CLIP conditioning."""
@@ -55,5 +54,6 @@ class PromptLoraManager:
if trigger_words: if trigger_words:
prompt = ", ".join([trigger_words, text]) prompt = ", ".join([trigger_words, text])
from nodes import CLIPTextEncode # type: ignore
conditioning = CLIPTextEncode().encode(clip, prompt)[0] conditioning = CLIPTextEncode().encode(clip, prompt)[0]
return (conditioning, prompt,) return (conditioning, prompt,)

View File

@@ -1,6 +1,5 @@
import json import json
import re import re
from server import PromptServer # type: ignore
from .utils import FlexibleOptionalInputType, any_type from .utils import FlexibleOptionalInputType, any_type
import logging import logging

View File

@@ -1,4 +1,3 @@
from comfy.comfy_types import IO # type: ignore
import folder_paths # type: ignore import folder_paths # type: ignore
from ..utils.utils import get_lora_info from ..utils.utils import get_lora_info
from .utils import FlexibleOptionalInputType, any_type, get_loras_list from .utils import FlexibleOptionalInputType, any_type, get_loras_list
@@ -16,7 +15,7 @@ class WanVideoLoraSelect:
"required": { "required": {
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}), "low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}),
"merge_loras": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}), "merge_loras": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}),
"text": (IO.STRING, { "text": ("STRING", {
"multiline": True, "multiline": True,
"pysssss.autocomplete": False, "pysssss.autocomplete": False,
"dynamicPrompts": True, "dynamicPrompts": True,
@@ -27,7 +26,7 @@ class WanVideoLoraSelect:
"optional": FlexibleOptionalInputType(any_type), "optional": FlexibleOptionalInputType(any_type),
} }
RETURN_TYPES = ("WANVIDLORA", IO.STRING, IO.STRING) RETURN_TYPES = ("WANVIDLORA", "STRING", "STRING")
RETURN_NAMES = ("lora", "trigger_words", "active_loras") RETURN_NAMES = ("lora", "trigger_words", "active_loras")
FUNCTION = "process_loras" FUNCTION = "process_loras"

View File

@@ -1,5 +1,4 @@
from comfy.comfy_types import IO import folder_paths # type: ignore
import folder_paths
from ..utils.utils import get_lora_info from ..utils.utils import get_lora_info
from .utils import any_type from .utils import any_type
import logging import logging
@@ -20,7 +19,7 @@ class WanVideoLoraSelectFromText:
"required": { "required": {
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}), "low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}),
"merge_lora": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}), "merge_lora": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}),
"lora_syntax": (IO.STRING, { "lora_syntax": ("STRING", {
"multiline": True, "multiline": True,
"defaultInput": True, "defaultInput": True,
"forceInput": True, "forceInput": True,
@@ -34,7 +33,7 @@ class WanVideoLoraSelectFromText:
} }
} }
RETURN_TYPES = ("WANVIDLORA", IO.STRING, IO.STRING) RETURN_TYPES = ("WANVIDLORA", "STRING", "STRING")
RETURN_NAMES = ("lora", "trigger_words", "active_loras") RETURN_NAMES = ("lora", "trigger_words", "active_loras")
FUNCTION = "process_loras_from_syntax" FUNCTION = "process_loras_from_syntax"