From 8c5d5a8ca0ff65631fb9f8385a0e4d2b0f8e3536 Mon Sep 17 00:00:00 2001 From: Will Miao Date: Tue, 3 Mar 2026 15:00:01 +0800 Subject: [PATCH] feat(stats): implement infinite scrolling and paginated model usage lists (fixes #812) - Add get_model_usage_list API endpoint for paginated stats - Replace static rendering with client-side infinite scroll logic - Add scrollbars and max-height to model usage lists --- py/routes/stats_routes.py | 75 ++++++++++++++ static/css/components/statistics.css | 3 + static/js/statistics.js | 150 ++++++++++++++------------- 3 files changed, 156 insertions(+), 72 deletions(-) diff --git a/py/routes/stats_routes.py b/py/routes/stats_routes.py index a2a7d15c..0ac12348 100644 --- a/py/routes/stats_routes.py +++ b/py/routes/stats_routes.py @@ -209,6 +209,80 @@ class StatsRoutes: 'error': str(e) }, status=500) + async def get_model_usage_list(self, request: web.Request) -> web.Response: + """Get paginated model usage list for infinite scrolling""" + try: + await self.init_services() + + model_type = request.query.get('type', 'lora') + sort_order = request.query.get('sort', 'desc') + + try: + limit = int(request.query.get('limit', '50')) + offset = int(request.query.get('offset', '0')) + except ValueError: + limit = 50 + offset = 0 + + # Get usage statistics + usage_data = await self.usage_stats.get_stats() + + # Select proper cache and usage dict based on type + if model_type == 'lora': + cache = await self.lora_scanner.get_cached_data() + type_usage_data = usage_data.get('loras', {}) + elif model_type == 'checkpoint': + cache = await self.checkpoint_scanner.get_cached_data() + type_usage_data = usage_data.get('checkpoints', {}) + elif model_type == 'embedding': + cache = await self.embedding_scanner.get_cached_data() + type_usage_data = usage_data.get('embeddings', {}) + else: + return web.json_response({'success': False, 'error': f"Invalid model type: {model_type}"}, status=400) + + # Create list of all models + all_models = [] + for item in cache.raw_data: + sha256 = item.get('sha256') + usage_info = type_usage_data.get(sha256, {}) if sha256 else {} + usage_count = usage_info.get('total', 0) if isinstance(usage_info, dict) else 0 + + all_models.append({ + 'name': item.get('model_name', 'Unknown'), + 'usage_count': usage_count, + 'base_model': item.get('base_model', 'Unknown'), + 'preview_url': config.get_preview_static_url(item.get('preview_url', '')), + 'folder': item.get('folder', '') + }) + + # Sort the models + reverse = (sort_order == 'desc') + all_models.sort(key=lambda x: (x['usage_count'], x['name'].lower()), reverse=reverse) + if not reverse: + # If asc, sort by usage_count ascending, but keep name ascending + all_models.sort(key=lambda x: (x['usage_count'], x['name'].lower())) + else: + all_models.sort(key=lambda x: (-x['usage_count'], x['name'].lower())) + + # Slice for pagination + paginated_models = all_models[offset:offset + limit] + + return web.json_response({ + 'success': True, + 'data': { + 'items': paginated_models, + 'total': len(all_models), + 'type': model_type + } + }) + + except Exception as e: + logger.error(f"Error getting model usage list: {e}", exc_info=True) + return web.json_response({ + 'success': False, + 'error': str(e) + }, status=500) + async def get_base_model_distribution(self, request: web.Request) -> web.Response: """Get base model distribution statistics""" try: @@ -530,6 +604,7 @@ class StatsRoutes: # Register API routes app.router.add_get('/api/lm/stats/collection-overview', self.get_collection_overview) app.router.add_get('/api/lm/stats/usage-analytics', self.get_usage_analytics) + app.router.add_get('/api/lm/stats/model-usage-list', self.get_model_usage_list) app.router.add_get('/api/lm/stats/base-model-distribution', self.get_base_model_distribution) app.router.add_get('/api/lm/stats/tag-analytics', self.get_tag_analytics) app.router.add_get('/api/lm/stats/storage-analytics', self.get_storage_analytics) diff --git a/static/css/components/statistics.css b/static/css/components/statistics.css index 7cb76d6a..27908584 100644 --- a/static/css/components/statistics.css +++ b/static/css/components/statistics.css @@ -196,6 +196,9 @@ display: flex; flex-direction: column; gap: 8px; + max-height: 400px; + overflow-y: auto; + padding-right: 4px; } .model-item { diff --git a/static/js/statistics.js b/static/js/statistics.js index 74ce2207..ea76af59 100644 --- a/static/js/statistics.js +++ b/static/js/statistics.js @@ -10,6 +10,11 @@ export class StatisticsManager { this.charts = {}; this.data = {}; this.initialized = false; + this.listStates = { + lora: { offset: 0, limit: 50, sort: 'desc', isLoading: false, hasMore: true }, + checkpoint: { offset: 0, limit: 50, sort: 'desc', isLoading: false, hasMore: true }, + embedding: { offset: 0, limit: 50, sort: 'desc', isLoading: false, hasMore: true } + }; } async initialize() { @@ -105,7 +110,8 @@ export class StatisticsManager { this.initializeCharts(); // Initialize lists and other components - this.renderTopModelsLists(); + this.initializeLists(); + this.renderLargestModelsList(); this.renderTagCloud(); this.renderInsights(); } @@ -548,86 +554,86 @@ export class StatisticsManager { }); } - renderTopModelsLists() { - this.renderTopLorasList(); - this.renderTopCheckpointsList(); - this.renderTopEmbeddingsList(); - this.renderLargestModelsList(); + initializeLists() { + const listTypes = [ + { type: 'lora', containerId: 'topLorasList' }, + { type: 'checkpoint', containerId: 'topCheckpointsList' }, + { type: 'embedding', containerId: 'topEmbeddingsList' } + ]; + + listTypes.forEach(({ type, containerId }) => { + const container = document.getElementById(containerId); + + if (container) { + // Handle infinite scrolling + container.addEventListener('scroll', () => { + if (container.scrollTop + container.clientHeight >= container.scrollHeight - 50) { + this.fetchAndRenderList(type, container); + } + }); + + // Initial fetch + this.fetchAndRenderList(type, container); + } + }); } - renderTopLorasList() { - const container = document.getElementById('topLorasList'); - if (!container || !this.data.usage?.top_loras) return; + async fetchAndRenderList(type, container) { + const state = this.listStates[type]; + if (state.isLoading || !state.hasMore) return; - const topLoras = this.data.usage.top_loras; + state.isLoading = true; - if (topLoras.length === 0) { - container.innerHTML = '
No usage data available
'; - return; + // Show loading indicator on initial load + if (state.offset === 0) { + container.innerHTML = '
Loading...
'; } - container.innerHTML = topLoras.map(lora => ` -
- ${lora.name} -
-
${lora.name}
-
${lora.base_model} • ${lora.folder}
-
-
${lora.usage_count}
-
- `).join(''); - } + try { + const url = `/api/lm/stats/model-usage-list?type=${type}&sort=${state.sort}&offset=${state.offset}&limit=${state.limit}`; + const response = await fetch(url); + if (!response.ok) throw new Error('Network response was not ok'); + + const result = await response.json(); + if (result.success) { + const items = result.data.items; + + // Remove loading indicator if it's the first page + if (state.offset === 0) { + container.innerHTML = ''; + } - renderTopCheckpointsList() { - const container = document.getElementById('topCheckpointsList'); - if (!container || !this.data.usage?.top_checkpoints) return; + if (items.length === 0 && state.offset === 0) { + container.innerHTML = '
No models found
'; + state.hasMore = false; + } else if (items.length < state.limit) { + state.hasMore = false; + } - const topCheckpoints = this.data.usage.top_checkpoints; - - if (topCheckpoints.length === 0) { - container.innerHTML = '
No usage data available
'; - return; + const html = items.map(model => ` +
+ ${model.name} +
+
${model.name}
+
${model.base_model} • ${model.folder || 'Root'}
+
+
${model.usage_count}
+
+ `).join(''); + + container.insertAdjacentHTML('beforeend', html); + state.offset += state.limit; + } + } catch (error) { + console.error(`Error loading ${type} list:`, error); + if (state.offset === 0) { + container.innerHTML = '
Error loading data
'; + } + } finally { + state.isLoading = false; } - - container.innerHTML = topCheckpoints.map(checkpoint => ` -
- ${checkpoint.name} -
-
${checkpoint.name}
-
${checkpoint.base_model} • ${checkpoint.folder}
-
-
${checkpoint.usage_count}
-
- `).join(''); - } - - renderTopEmbeddingsList() { - const container = document.getElementById('topEmbeddingsList'); - if (!container || !this.data.usage?.top_embeddings) return; - - const topEmbeddings = this.data.usage.top_embeddings; - - if (topEmbeddings.length === 0) { - container.innerHTML = '
No usage data available
'; - return; - } - - container.innerHTML = topEmbeddings.map(embedding => ` -
- ${embedding.name} -
-
${embedding.name}
-
${embedding.base_model} • ${embedding.folder}
-
-
${embedding.usage_count}
-
- `).join(''); } renderLargestModelsList() {