From e252e44403f4d23bcacc086011daef157fa2fc75 Mon Sep 17 00:00:00 2001 From: Will Miao Date: Mon, 2 Feb 2026 10:47:17 +0800 Subject: [PATCH] refactor(logging): replace print statements with logger for consistency --- py/nodes/save_image.py | 9 ++++++--- py/services/lora_scanner.py | 16 ++++++++-------- py/services/model_scanner.py | 2 -- py/utils/lora_metadata.py | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/py/nodes/save_image.py b/py/nodes/save_image.py index e11f031a..de9d5649 100644 --- a/py/nodes/save_image.py +++ b/py/nodes/save_image.py @@ -8,6 +8,9 @@ from ..metadata_collector.metadata_processor import MetadataProcessor from ..metadata_collector import get_metadata from PIL import Image, PngImagePlugin import piexif +import logging + +logger = logging.getLogger(__name__) class SaveImageLM: NAME = "Save Image (LoraManager)" @@ -385,7 +388,7 @@ class SaveImageLM: exif_bytes = piexif.dump(exif_dict) save_kwargs["exif"] = exif_bytes except Exception as e: - print(f"Error adding EXIF data: {e}") + logger.error(f"Error adding EXIF data: {e}") img.save(file_path, format="JPEG", **save_kwargs) elif file_format == "webp": try: @@ -403,7 +406,7 @@ class SaveImageLM: exif_bytes = piexif.dump(exif_dict) save_kwargs["exif"] = exif_bytes except Exception as e: - print(f"Error adding EXIF data: {e}") + logger.error(f"Error adding EXIF data: {e}") img.save(file_path, format="WEBP", **save_kwargs) @@ -414,7 +417,7 @@ class SaveImageLM: }) except Exception as e: - print(f"Error saving image: {e}") + logger.error(f"Error saving image: {e}") return results diff --git a/py/services/lora_scanner.py b/py/services/lora_scanner.py index 6feff477..ee381845 100644 --- a/py/services/lora_scanner.py +++ b/py/services/lora_scanner.py @@ -30,36 +30,36 @@ class LoraScanner(ModelScanner): async def diagnose_hash_index(self): """Diagnostic method to verify hash index functionality""" - print("\n\n*** DIAGNOSING LORA HASH INDEX ***\n\n", file=sys.stderr) + logger.debug("\n\n*** DIAGNOSING LORA HASH INDEX ***\n\n") # First check if the hash index has any entries if hasattr(self, '_hash_index'): index_entries = len(self._hash_index._hash_to_path) - print(f"Hash index has {index_entries} entries", file=sys.stderr) + logger.debug(f"Hash index has {index_entries} entries") # Print a few example entries if available if index_entries > 0: - print("\nSample hash index entries:", file=sys.stderr) + logger.debug("\nSample hash index entries:") count = 0 for hash_val, path in self._hash_index._hash_to_path.items(): if count < 5: # Just show the first 5 - print(f"Hash: {hash_val[:8]}... -> Path: {path}", file=sys.stderr) + logger.debug(f"Hash: {hash_val[:8]}... -> Path: {path}") count += 1 else: break else: - print("Hash index not initialized", file=sys.stderr) + logger.debug("Hash index not initialized") # Try looking up by a known hash for testing if not hasattr(self, '_hash_index') or not self._hash_index._hash_to_path: - print("No hash entries to test lookup with", file=sys.stderr) + logger.debug("No hash entries to test lookup with") return test_hash = next(iter(self._hash_index._hash_to_path.keys())) test_path = self._hash_index.get_path(test_hash) - print(f"\nTest lookup by hash: {test_hash[:8]}... -> {test_path}", file=sys.stderr) + logger.debug(f"\nTest lookup by hash: {test_hash[:8]}... -> {test_path}") # Also test reverse lookup test_hash_result = self._hash_index.get_hash(test_path) - print(f"Test reverse lookup: {test_path} -> {test_hash_result[:8]}...\n\n", file=sys.stderr) + logger.debug(f"Test reverse lookup: {test_path} -> {test_hash_result[:8]}...\n\n") diff --git a/py/services/model_scanner.py b/py/services/model_scanner.py index 1ac38853..3a710c86 100644 --- a/py/services/model_scanner.py +++ b/py/services/model_scanner.py @@ -686,7 +686,6 @@ class ModelScanner: async def _initialize_cache(self) -> None: """Initialize or refresh the cache""" - print("init start", flush=True) self._is_initializing = True # Set flag try: start_time = time.time() @@ -700,7 +699,6 @@ class ModelScanner: scan_result = await self._gather_model_data() await self._apply_scan_result(scan_result) await self._save_persistent_cache(scan_result) - print("init end", flush=True) logger.info( f"{self.model_type.capitalize()} Scanner: Cache initialization completed in {time.time() - start_time:.2f} seconds, " diff --git a/py/utils/lora_metadata.py b/py/utils/lora_metadata.py index 7c8e7bc4..87d72516 100644 --- a/py/utils/lora_metadata.py +++ b/py/utils/lora_metadata.py @@ -17,7 +17,7 @@ async def extract_lora_metadata(file_path: str) -> Dict: base_model = determine_base_model(metadata.get("ss_base_model_version")) return {"base_model": base_model} except Exception as e: - print(f"Error reading metadata from {file_path}: {str(e)}") + logger.error(f"Error reading metadata from {file_path}: {str(e)}") return {"base_model": "Unknown"} async def extract_checkpoint_metadata(file_path: str) -> dict: