mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-21 21:22:11 -03:00
feat: enhance search with include/exclude tokens and improved sorting
- Add token parsing to support include/exclude search terms using "-" prefix - Implement token-based matching logic for relative path searches - Improve search result sorting by prioritizing prefix matches and match position - Add frontend test for multi-token highlighting with exclusion support
This commit is contained in:
@@ -136,4 +136,23 @@ describe('AutoComplete widget interactions', () => {
|
||||
expect(input.focus).toHaveBeenCalled();
|
||||
expect(input.setSelectionRange).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('highlights multiple include tokens while ignoring excluded ones', async () => {
|
||||
const input = document.createElement('textarea');
|
||||
document.body.append(input);
|
||||
|
||||
const { AutoComplete } = await import(AUTOCOMPLETE_MODULE);
|
||||
const autoComplete = new AutoComplete(input, 'loras', { showPreview: false });
|
||||
|
||||
const highlighted = autoComplete.highlightMatch(
|
||||
'models/flux/beta-detail.safetensors',
|
||||
'flux detail -beta',
|
||||
);
|
||||
|
||||
const highlightCount = (highlighted.match(/<span/g) || []).length;
|
||||
expect(highlightCount).toBe(2);
|
||||
expect(highlighted).toContain('flux');
|
||||
expect(highlighted).toContain('detail');
|
||||
expect(highlighted).not.toMatch(/beta<\/span>/i);
|
||||
});
|
||||
});
|
||||
|
||||
63
tests/services/test_relative_path_search.py
Normal file
63
tests/services/test_relative_path_search.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import pytest
|
||||
|
||||
from py.services.base_model_service import BaseModelService
|
||||
from py.utils.models import BaseModelMetadata
|
||||
|
||||
|
||||
class DummyService(BaseModelService):
|
||||
async def format_response(self, model_data):
|
||||
return model_data
|
||||
|
||||
|
||||
class FakeCache:
|
||||
def __init__(self, raw_data):
|
||||
self.raw_data = list(raw_data)
|
||||
|
||||
|
||||
class FakeScanner:
|
||||
def __init__(self, raw_data, roots):
|
||||
self._cache = FakeCache(raw_data)
|
||||
self._roots = list(roots)
|
||||
|
||||
async def get_cached_data(self, *_args, **_kwargs):
|
||||
return self._cache
|
||||
|
||||
def get_model_roots(self):
|
||||
return list(self._roots)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_relative_paths_supports_multiple_tokens():
|
||||
scanner = FakeScanner(
|
||||
[
|
||||
{"file_path": "/models/flux/detail-model.safetensors"},
|
||||
{"file_path": "/models/flux/only-flux.safetensors"},
|
||||
{"file_path": "/models/detail/flux-trained.safetensors"},
|
||||
{"file_path": "/models/detail/standalone.safetensors"},
|
||||
],
|
||||
["/models"],
|
||||
)
|
||||
service = DummyService("stub", scanner, BaseModelMetadata)
|
||||
|
||||
matching = await service.search_relative_paths("flux detail")
|
||||
|
||||
assert matching == [
|
||||
"flux/detail-model.safetensors",
|
||||
"detail/flux-trained.safetensors",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_relative_paths_excludes_tokens():
|
||||
scanner = FakeScanner(
|
||||
[
|
||||
{"file_path": "/models/flux/detail-model.safetensors"},
|
||||
{"file_path": "/models/flux/keep-me.safetensors"},
|
||||
],
|
||||
["/models"],
|
||||
)
|
||||
service = DummyService("stub", scanner, BaseModelMetadata)
|
||||
|
||||
matching = await service.search_relative_paths("flux -detail")
|
||||
|
||||
assert matching == ["flux/keep-me.safetensors"]
|
||||
Reference in New Issue
Block a user