refactor: Update chunk size to 4MB for improved HDD throughput and optimize file writing during downloads

This commit is contained in:
Will Miao
2025-08-18 17:21:24 +08:00
parent 5d7a1dcde5
commit 05df40977d

View File

@@ -33,8 +33,8 @@ class CivitaiClient:
} }
self._session = None self._session = None
self._session_created_at = None self._session_created_at = None
# Set default buffer size to 1MB for higher throughput # Adjust chunk size based on storage type - consider making this configurable
self.chunk_size = 1024 * 1024 self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better HDD throughput
@property @property
async def session(self) -> aiohttp.ClientSession: async def session(self) -> aiohttp.ClientSession:
@@ -153,10 +153,12 @@ class CivitaiClient:
last_progress_report_time = datetime.now() last_progress_report_time = datetime.now()
# Stream download to file with progress updates using larger buffer # Stream download to file with progress updates using larger buffer
loop = asyncio.get_running_loop()
with open(save_path, 'wb') as f: with open(save_path, 'wb') as f:
async for chunk in response.content.iter_chunked(self.chunk_size): async for chunk in response.content.iter_chunked(self.chunk_size):
if chunk: if chunk:
f.write(chunk) # Run blocking file write in executor
await loop.run_in_executor(None, f.write, chunk)
current_size += len(chunk) current_size += len(chunk)
# Limit progress update frequency to reduce overhead # Limit progress update frequency to reduce overhead