mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-22 13:42:12 -03:00
- Add RateLimitError import and _make_request wrapper method to handle rate limiting - Update API methods to use _make_request wrapper instead of direct downloader calls - Add explicit RateLimitError handling in API methods to properly propagate rate limit errors - Add _extract_retry_after method to parse Retry-After headers - Improve error handling by surfacing rate limit information to callers These changes ensure that rate limiting from the Civitai API is properly detected and handled, allowing callers to implement appropriate backoff strategies when rate limits are encountered.
83 lines
2.4 KiB
Python
83 lines
2.4 KiB
Python
from unittest.mock import AsyncMock
|
|
|
|
import pytest
|
|
|
|
from py.services import model_metadata_provider as provider_module
|
|
from py.services.errors import RateLimitError
|
|
from py.services.model_metadata_provider import FallbackMetadataProvider
|
|
|
|
|
|
class RateLimitThenSuccessProvider:
|
|
def __init__(self) -> None:
|
|
self.calls = 0
|
|
|
|
async def get_model_by_hash(self, model_hash: str):
|
|
self.calls += 1
|
|
if self.calls == 1:
|
|
raise RateLimitError("limited", retry_after=1.0)
|
|
return {"id": "ok"}, None
|
|
|
|
|
|
class AlwaysRateLimitedProvider:
|
|
def __init__(self) -> None:
|
|
self.calls = 0
|
|
|
|
async def get_model_by_hash(self, model_hash: str):
|
|
self.calls += 1
|
|
raise RateLimitError("limited")
|
|
|
|
|
|
class TrackingProvider:
|
|
def __init__(self) -> None:
|
|
self.calls = 0
|
|
|
|
async def get_model_by_hash(self, model_hash: str):
|
|
self.calls += 1
|
|
return {"id": "secondary"}, None
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fallback_retries_same_provider_on_rate_limit(monkeypatch):
|
|
sleep_mock = AsyncMock()
|
|
monkeypatch.setattr(provider_module.asyncio, "sleep", sleep_mock)
|
|
monkeypatch.setattr(provider_module.random, "uniform", lambda *_: 0.0)
|
|
|
|
primary = RateLimitThenSuccessProvider()
|
|
secondary = TrackingProvider()
|
|
|
|
fallback = FallbackMetadataProvider(
|
|
[("primary", primary), ("secondary", secondary)],
|
|
)
|
|
|
|
result, error = await fallback.get_model_by_hash("abc")
|
|
|
|
assert error is None
|
|
assert result == {"id": "ok"}
|
|
assert primary.calls == 2
|
|
assert secondary.calls == 0
|
|
sleep_mock.assert_awaited_once()
|
|
assert sleep_mock.await_args_list[0].args[0] == pytest.approx(1.0, rel=0.0, abs=1e-6)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fallback_respects_retry_limit(monkeypatch):
|
|
sleep_mock = AsyncMock()
|
|
monkeypatch.setattr(provider_module.asyncio, "sleep", sleep_mock)
|
|
monkeypatch.setattr(provider_module.random, "uniform", lambda *_: 0.0)
|
|
|
|
primary = AlwaysRateLimitedProvider()
|
|
secondary = TrackingProvider()
|
|
|
|
fallback = FallbackMetadataProvider(
|
|
[("primary", primary), ("secondary", secondary)],
|
|
rate_limit_retry_limit=2,
|
|
)
|
|
|
|
with pytest.raises(RateLimitError) as exc_info:
|
|
await fallback.get_model_by_hash("abc")
|
|
|
|
assert exc_info.value.provider == "primary"
|
|
assert primary.calls == 2
|
|
assert secondary.calls == 0
|
|
sleep_mock.assert_awaited_once()
|