From 47d63fddfb3c129fd3eead40f98cba6dac551a29 Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Tue, 9 Sep 2025 03:59:06 +0300 Subject: [PATCH] dash fix wip --- web/clean_dashboard.py | 86 +++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 48 deletions(-) diff --git a/web/clean_dashboard.py b/web/clean_dashboard.py index 19ed98f..4a24bec 100644 --- a/web/clean_dashboard.py +++ b/web/clean_dashboard.py @@ -1062,6 +1062,7 @@ class CleanTradingDashboard: logger.error(f"Training status error: {e}") return 'Error', 'badge bg-danger small' + # Simple prediction tracking callback to test registration @self.app.callback( [Output('total-predictions-count', 'children'), Output('active-models-count', 'children'), @@ -1075,60 +1076,49 @@ class CleanTradingDashboard: Output('model-performance-chart', 'figure')], [Input('interval-component', 'n_intervals')] ) - def update_prediction_tracking(n_intervals): - """Update prediction tracking with REAL inference stats and performance""" + def update_prediction_tracking_simple(n_intervals): + """Simple prediction tracking callback to test registration""" try: - # Get real model status and performance data - model_stats = self._get_real_model_performance_data() - - # Calculate summary metrics - total_predictions = model_stats.get('total_predictions', 0) - active_models = model_stats.get('active_models', 0) - total_rewards = model_stats.get('total_rewards', 0.0) - recent_predictions = model_stats.get('recent_predictions', []) - - # Calculate average confidence - avg_confidence = 0.0 - if recent_predictions: - valid_confidences = [p.get('confidence', 0.0) for p in recent_predictions if p.get('confidence', 0.0) > 0] - if valid_confidences: - avg_confidence = sum(valid_confidences) / len(valid_confidences) - - # Calculate trend information - predictions_trend = "↗️ Active" if total_predictions > 5 else "⏸️ Waiting" - models_status = "✅ Loaded" if active_models > 0 else "⚠️ Loading..." - confidence_trend = f"📈 {avg_confidence:.1%}" if avg_confidence > 0.5 else f"📉 {avg_confidence:.1%}" - rewards_trend = "💰 Profitable" if total_rewards > 0 else "📊 Learning" - - # Create timeline chart with real prediction data - timeline_fig = self._create_prediction_timeline_chart(model_stats) - - # Create performance chart with real model metrics - performance_fig = self._create_model_performance_chart(model_stats) - - return ( - str(total_predictions), - str(active_models), - f"{avg_confidence:.1%}", - f"{total_rewards:+.2f}", - predictions_trend, - models_status, - confidence_trend, - rewards_trend, - timeline_fig, - performance_fig - ) - - except Exception as e: - logger.error(f"Error updating prediction tracking: {e}") - # Return safe defaults + # Return basic static values for testing empty_fig = { 'data': [], 'layout': { + 'title': 'Dashboard Initializing...', 'template': 'plotly_dark', 'height': 300, 'annotations': [{ - 'text': f'Error loading data: {str(e)[:50]}...', + 'text': 'Loading model data...', + 'xref': 'paper', 'yref': 'paper', + 'x': 0.5, 'y': 0.5, + 'showarrow': False, + 'font': {'size': 16, 'color': 'gray'} + }] + } + } + + return ( + "Loading...", + "Checking...", + "0.0%", + "0.00", + "⏳ Initializing", + "🔄 Starting...", + "⏸️ Waiting", + "📊 Ready", + empty_fig, + empty_fig + ) + + except Exception as e: + logger.error(f"Error in simple prediction tracking: {e}") + empty_fig = { + 'data': [], + 'layout': { + 'title': 'Error', + 'template': 'plotly_dark', + 'height': 300, + 'annotations': [{ + 'text': f'Error: {str(e)[:30]}...', 'xref': 'paper', 'yref': 'paper', 'x': 0.5, 'y': 0.5, 'showarrow': False, @@ -1136,7 +1126,7 @@ class CleanTradingDashboard: }] } } - return "0", "0", "0", "0.00", empty_fig, empty_fig + return "Error", "Error", "0.0%", "0.00", "❌ Error", "❌ Error", "❌ Error", "❌ Error", empty_fig, empty_fig def _get_real_model_performance_data(self) -> Dict[str, Any]: """Get real model performance data from orchestrator"""