diff --git a/web/clean_dashboard.py b/web/clean_dashboard.py
index fadf49c..19ed98f 100644
--- a/web/clean_dashboard.py
+++ b/web/clean_dashboard.py
@@ -1064,132 +1064,388 @@ class CleanTradingDashboard:
@self.app.callback(
[Output('total-predictions-count', 'children'),
- Output('pending-predictions-count', 'children'),
Output('active-models-count', 'children'),
+ Output('avg-confidence', 'children'),
Output('total-rewards-sum', 'children'),
+ Output('predictions-trend', 'children'),
+ Output('models-status', 'children'),
+ Output('confidence-trend', 'children'),
+ Output('rewards-trend', 'children'),
Output('prediction-timeline-chart', 'figure'),
Output('model-performance-chart', 'figure')],
[Input('interval-component', 'n_intervals')]
)
def update_prediction_tracking(n_intervals):
- """Update prediction tracking charts and metrics"""
+ """Update prediction tracking with REAL inference stats and performance"""
try:
- if (hasattr(self.orchestrator, 'enhanced_training_system') and
- self.orchestrator.enhanced_training_system):
-
- # Get prediction data
- stats = self.orchestrator.enhanced_training_system.get_model_performance_stats()
- models = stats.get('models', [])
- total_active = stats.get('total_active_predictions', 0)
-
- # Calculate totals
- total_predictions = sum(m.get('total_predictions', 0) for m in models)
- total_rewards = sum(m.get('total_reward', 0) for m in models)
- active_models = len(models)
-
- # Create timeline chart (simplified)
- timeline_fig = {
- 'data': [],
- 'layout': {
- 'title': 'Recent Predictions Timeline',
- 'xaxis': {'title': 'Time'},
- 'yaxis': {'title': 'Confidence'},
- 'template': 'plotly_dark',
- 'height': 300,
- 'showlegend': False
- }
- }
-
- # Add empty annotation if no data
- if not models:
- timeline_fig['layout']['annotations'] = [{
- 'text': 'No prediction data yet',
- 'xref': 'paper', 'yref': 'paper',
- 'x': 0.5, 'y': 0.5,
- 'showarrow': False,
- 'font': {'size': 16, 'color': 'gray'}
- }]
-
- # Create performance chart
- performance_fig = {
- 'data': [],
- 'layout': {
- 'title': 'Model Performance',
- 'template': 'plotly_dark',
- 'height': 300,
- 'showlegend': True
- }
- }
-
- if models:
- model_names = [m.get('model_name', 'Unknown') for m in models]
- accuracies = [m.get('accuracy', 0) * 100 for m in models]
- rewards = [m.get('total_reward', 0) for m in models]
-
- # Add accuracy bars
- performance_fig['data'].append({
- 'x': model_names,
- 'y': accuracies,
- 'type': 'bar',
- 'name': 'Accuracy (%)',
- 'marker': {'color': 'lightblue'}
- })
-
- performance_fig['layout']['xaxis'] = {'title': 'Model'}
- performance_fig['layout']['yaxis'] = {'title': 'Accuracy (%)'}
- else:
- performance_fig['layout']['annotations'] = [{
- 'text': 'No model data yet',
- 'xref': 'paper', 'yref': 'paper',
- 'x': 0.5, 'y': 0.5,
- 'showarrow': False,
- 'font': {'size': 16, 'color': 'gray'}
- }]
-
- return (
- str(total_predictions),
- str(total_active),
- str(active_models),
- f"{total_rewards:.1f}",
- timeline_fig,
- performance_fig
- )
- else:
- # Training system not available
- empty_fig = {
- 'data': [],
- 'layout': {
- 'template': 'plotly_dark',
- 'height': 300,
- 'annotations': [{
- 'text': 'Training system not available',
- 'xref': 'paper', 'yref': 'paper',
- 'x': 0.5, 'y': 0.5,
- 'showarrow': False,
- 'font': {'size': 16, 'color': 'red'}
- }]
- }
- }
-
- return "N/A", "N/A", "N/A", "N/A", empty_fig, empty_fig
-
+ # Get real model status and performance data
+ model_stats = self._get_real_model_performance_data()
+
+ # Calculate summary metrics
+ total_predictions = model_stats.get('total_predictions', 0)
+ active_models = model_stats.get('active_models', 0)
+ total_rewards = model_stats.get('total_rewards', 0.0)
+ recent_predictions = model_stats.get('recent_predictions', [])
+
+ # Calculate average confidence
+ avg_confidence = 0.0
+ if recent_predictions:
+ valid_confidences = [p.get('confidence', 0.0) for p in recent_predictions if p.get('confidence', 0.0) > 0]
+ if valid_confidences:
+ avg_confidence = sum(valid_confidences) / len(valid_confidences)
+
+ # Calculate trend information
+ predictions_trend = "↗️ Active" if total_predictions > 5 else "⏸️ Waiting"
+ models_status = "✅ Loaded" if active_models > 0 else "⚠️ Loading..."
+ confidence_trend = f"📈 {avg_confidence:.1%}" if avg_confidence > 0.5 else f"📉 {avg_confidence:.1%}"
+ rewards_trend = "💰 Profitable" if total_rewards > 0 else "📊 Learning"
+
+ # Create timeline chart with real prediction data
+ timeline_fig = self._create_prediction_timeline_chart(model_stats)
+
+ # Create performance chart with real model metrics
+ performance_fig = self._create_model_performance_chart(model_stats)
+
+ return (
+ str(total_predictions),
+ str(active_models),
+ f"{avg_confidence:.1%}",
+ f"{total_rewards:+.2f}",
+ predictions_trend,
+ models_status,
+ confidence_trend,
+ rewards_trend,
+ timeline_fig,
+ performance_fig
+ )
+
except Exception as e:
logger.error(f"Error updating prediction tracking: {e}")
- error_fig = {
+ # Return safe defaults
+ empty_fig = {
'data': [],
'layout': {
'template': 'plotly_dark',
'height': 300,
'annotations': [{
- 'text': f'Error: {str(e)}',
+ 'text': f'Error loading data: {str(e)[:50]}...',
'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5,
'showarrow': False,
- 'font': {'size': 14, 'color': 'red'}
+ 'font': {'size': 12, 'color': 'red'}
}]
}
}
- return "Error", "Error", "Error", "Error", error_fig, error_fig
+ return "0", "0", "0", "0.00", empty_fig, empty_fig
+
+ def _get_real_model_performance_data(self) -> Dict[str, Any]:
+ """Get real model performance data from orchestrator"""
+ try:
+ model_data = {
+ 'total_predictions': 0,
+ 'pending_predictions': 0,
+ 'active_models': 0,
+ 'total_rewards': 0.0,
+ 'models': [],
+ 'recent_predictions': []
+ }
+
+ if not self.orchestrator:
+ return model_data
+
+ # Get model states from orchestrator
+ model_states = getattr(self.orchestrator, 'model_states', {})
+
+ # Check each model type
+ for model_type in ['cnn', 'dqn', 'cob_rl']:
+ if model_type in model_states:
+ state = model_states[model_type]
+ is_loaded = state.get('checkpoint_loaded', False)
+
+ if is_loaded:
+ model_data['active_models'] += 1
+
+ # Add model info (include all models, not just loaded ones)
+ model_data['models'].append({
+ 'name': model_type.upper(),
+ 'status': 'LOADED' if is_loaded else 'FRESH',
+ 'current_loss': state.get('current_loss', 0.0),
+ 'best_loss': state.get('best_loss', None),
+ 'checkpoint_filename': state.get('checkpoint_filename', 'none'),
+ 'training_sessions': getattr(self.orchestrator, f'{model_type}_training_count', 0),
+ 'last_inference': getattr(self.orchestrator, f'{model_type}_last_inference', None),
+ 'inference_count': getattr(self.orchestrator, f'{model_type}_inference_count', 0)
+ })
+
+ # Get recent predictions from our tracking
+ if hasattr(self, 'recent_decisions') and self.recent_decisions:
+ for decision in list(self.recent_decisions)[-20:]: # Last 20 decisions
+ model_data['recent_predictions'].append({
+ 'timestamp': decision.get('timestamp', datetime.now()),
+ 'action': decision.get('action', 'UNKNOWN'),
+ 'confidence': decision.get('confidence', 0.0),
+ 'reward': decision.get('reward', 0.0),
+ 'outcome': decision.get('outcome', 'pending')
+ })
+
+ model_data['total_predictions'] = len(model_data['recent_predictions'])
+ model_data['pending_predictions'] = sum(1 for p in model_data['recent_predictions']
+ if p.get('outcome') == 'pending')
+ model_data['total_rewards'] = sum(p.get('reward', 0.0) for p in model_data['recent_predictions'])
+
+ return model_data
+
+ except Exception as e:
+ logger.error(f"Error getting real model performance data: {e}")
+ return {
+ 'total_predictions': 0,
+ 'pending_predictions': 0,
+ 'active_models': 0,
+ 'total_rewards': 0.0,
+ 'models': [],
+ 'recent_predictions': []
+ }
+
+ def _create_prediction_timeline_chart(self, model_stats: Dict[str, Any]) -> Dict[str, Any]:
+ """Create prediction timeline chart with real data"""
+ try:
+ recent_predictions = model_stats.get('recent_predictions', [])
+
+ if not recent_predictions:
+ return {
+ 'data': [],
+ 'layout': {
+ 'title': 'Recent Predictions Timeline',
+ 'template': 'plotly_dark',
+ 'height': 300,
+ 'annotations': [{
+ 'text': 'No predictions yet',
+ 'xref': 'paper', 'yref': 'paper',
+ 'x': 0.5, 'y': 0.5,
+ 'showarrow': False,
+ 'font': {'size': 16, 'color': 'gray'}
+ }]
+ }
+ }
+
+ # Prepare data for timeline
+ timestamps = []
+ confidences = []
+ rewards = []
+ actions = []
+
+ for pred in recent_predictions[-50:]: # Last 50 predictions
+ timestamps.append(pred.get('timestamp', datetime.now()))
+ confidences.append(pred.get('confidence', 0.0) * 100) # Convert to percentage
+ rewards.append(pred.get('reward', 0.0))
+ actions.append(pred.get('action', 'UNKNOWN'))
+
+ # Create timeline chart
+ fig = {
+ 'data': [
+ {
+ 'x': timestamps,
+ 'y': confidences,
+ 'type': 'scatter',
+ 'mode': 'lines+markers',
+ 'name': 'Confidence (%)',
+ 'line': {'color': '#00ff88', 'width': 2},
+ 'marker': {'size': 6}
+ },
+ {
+ 'x': timestamps,
+ 'y': rewards,
+ 'type': 'bar',
+ 'name': 'Reward',
+ 'yaxis': 'y2',
+ 'marker': {'color': '#ff6b6b'}
+ }
+ ],
+ 'layout': {
+ 'title': 'Prediction Timeline (Last 50)',
+ 'template': 'plotly_dark',
+ 'height': 300,
+ 'xaxis': {
+ 'title': 'Time',
+ 'type': 'date'
+ },
+ 'yaxis': {
+ 'title': 'Confidence (%)',
+ 'range': [0, 100]
+ },
+ 'yaxis2': {
+ 'title': 'Reward',
+ 'overlaying': 'y',
+ 'side': 'right',
+ 'showgrid': False
+ },
+ 'showlegend': True,
+ 'legend': {'x': 0, 'y': 1}
+ }
+ }
+
+ return fig
+
+ except Exception as e:
+ logger.error(f"Error creating prediction timeline chart: {e}")
+ return {
+ 'data': [],
+ 'layout': {
+ 'title': 'Prediction Timeline',
+ 'template': 'plotly_dark',
+ 'height': 300,
+ 'annotations': [{
+ 'text': f'Chart error: {str(e)[:30]}...',
+ 'xref': 'paper', 'yref': 'paper',
+ 'x': 0.5, 'y': 0.5,
+ 'showarrow': False,
+ 'font': {'size': 12, 'color': 'red'}
+ }]
+ }
+ }
+
+ def _create_model_performance_chart(self, model_stats: Dict[str, Any]) -> Dict[str, Any]:
+ """Create model performance chart with real metrics"""
+ try:
+ models = model_stats.get('models', [])
+
+ if not models:
+ return {
+ 'data': [],
+ 'layout': {
+ 'title': 'Model Performance',
+ 'template': 'plotly_dark',
+ 'height': 300,
+ 'annotations': [{
+ 'text': 'No active models',
+ 'xref': 'paper', 'yref': 'paper',
+ 'x': 0.5, 'y': 0.5,
+ 'showarrow': False,
+ 'font': {'size': 16, 'color': 'gray'}
+ }]
+ }
+ }
+
+ # Prepare data for performance chart
+ model_names = []
+ current_losses = []
+ best_losses = []
+ training_sessions = []
+ inference_counts = []
+ statuses = []
+
+ for model in models:
+ model_names.append(model.get('name', 'Unknown'))
+ current_losses.append(model.get('current_loss', 0.0))
+ best_losses.append(model.get('best_loss', model.get('current_loss', 0.0)))
+ training_sessions.append(model.get('training_sessions', 0))
+ inference_counts.append(model.get('inference_count', 0))
+ statuses.append(model.get('status', 'Unknown'))
+
+ # Create comprehensive performance chart
+ fig = {
+ 'data': [
+ {
+ 'x': model_names,
+ 'y': current_losses,
+ 'type': 'bar',
+ 'name': 'Current Loss',
+ 'marker': {'color': '#ff6b6b'},
+ 'yaxis': 'y1'
+ },
+ {
+ 'x': model_names,
+ 'y': best_losses,
+ 'type': 'bar',
+ 'name': 'Best Loss',
+ 'marker': {'color': '#4ecdc4'},
+ 'yaxis': 'y1'
+ },
+ {
+ 'x': model_names,
+ 'y': training_sessions,
+ 'type': 'scatter',
+ 'mode': 'markers',
+ 'name': 'Training Sessions',
+ 'marker': {'color': '#ffd93d', 'size': 12},
+ 'yaxis': 'y2'
+ },
+ {
+ 'x': model_names,
+ 'y': inference_counts,
+ 'type': 'scatter',
+ 'mode': 'markers',
+ 'name': 'Inference Count',
+ 'marker': {'color': '#a8e6cf', 'size': 8},
+ 'yaxis': 'y2'
+ }
+ ],
+ 'layout': {
+ 'title': 'Real Model Performance & Activity',
+ 'template': 'plotly_dark',
+ 'height': 300,
+ 'xaxis': {
+ 'title': 'Model'
+ },
+ 'yaxis': {
+ 'title': 'Loss',
+ 'side': 'left'
+ },
+ 'yaxis2': {
+ 'title': 'Activity Count',
+ 'side': 'right',
+ 'overlaying': 'y',
+ 'showgrid': False
+ },
+ 'showlegend': True,
+ 'legend': {'x': 0, 'y': 1}
+ }
+ }
+
+ # Add status annotations with more detail
+ annotations = []
+ for i, (name, status) in enumerate(zip(model_names, statuses)):
+ color = '#00ff88' if status == 'LOADED' else '#ff6b6b'
+ loss_text = f"{status}
Loss: {current_losses[i]:.4f}"
+ if training_sessions[i] > 0:
+ loss_text += f"
Trained: {training_sessions[i]}x"
+ if inference_counts[i] > 0:
+ loss_text += f"
Inferred: {inference_counts[i]}x"
+
+ annotations.append({
+ 'text': loss_text,
+ 'x': name,
+ 'y': max(current_losses[i] * 1.1, 0.01),
+ 'xref': 'x',
+ 'yref': 'y',
+ 'showarrow': False,
+ 'font': {'color': color, 'size': 8},
+ 'align': 'center'
+ })
+
+ fig['layout']['annotations'] = annotations
+
+ return fig
+
+ except Exception as e:
+ logger.error(f"Error creating model performance chart: {e}")
+ return {
+ 'data': [],
+ 'layout': {
+ 'title': 'Model Performance',
+ 'template': 'plotly_dark',
+ 'height': 300,
+ 'annotations': [{
+ 'text': f'Chart error: {str(e)[:30]}...',
+ 'xref': 'paper', 'yref': 'paper',
+ 'x': 0.5, 'y': 0.5,
+ 'showarrow': False,
+ 'font': {'size': 12, 'color': 'red'}
+ }]
+ }
+ }
+
+ return "0", "0", "0.0%", "0.00", "❌ Error", "❌ Error", "❌ Error", "❌ Error", error_fig, error_fig
@self.app.callback(
[Output('eth-cob-content', 'children'),
diff --git a/web/layout_manager.py b/web/layout_manager.py
index f596540..ba85c58 100644
--- a/web/layout_manager.py
+++ b/web/layout_manager.py
@@ -37,33 +37,37 @@ class DashboardLayoutManager:
"🧠 Model Predictions & Performance Tracking"
], className="text-light mb-3"),
- # Summary cards row
+ # Summary cards row - Enhanced with real metrics
html.Div([
html.Div([
html.Div([
html.H6("0", id="total-predictions-count", className="mb-0 text-primary"),
- html.Small("Total Predictions", className="text-light")
+ html.Small("Recent Signals", className="text-light"),
+ html.Small("", id="predictions-trend", className="d-block text-xs text-muted")
], className="card-body text-center p-2 bg-dark")
], className="card col-md-3 mx-1 bg-dark border-secondary"),
-
- html.Div([
- html.Div([
- html.H6("0", id="pending-predictions-count", className="mb-0 text-warning"),
- html.Small("Pending Resolution", className="text-light")
- ], className="card-body text-center p-2 bg-dark")
- ], className="card col-md-3 mx-1 bg-dark border-secondary"),
-
+
html.Div([
html.Div([
html.H6("0", id="active-models-count", className="mb-0 text-info"),
- html.Small("Active Models", className="text-light")
+ html.Small("Loaded Models", className="text-light"),
+ html.Small("", id="models-status", className="d-block text-xs text-success")
], className="card-body text-center p-2 bg-dark")
], className="card col-md-3 mx-1 bg-dark border-secondary"),
-
+
html.Div([
html.Div([
- html.H6("0.0", id="total-rewards-sum", className="mb-0 text-success"),
- html.Small("Total Rewards", className="text-light")
+ html.H6("0.00", id="avg-confidence", className="mb-0 text-warning"),
+ html.Small("Avg Confidence", className="text-light"),
+ html.Small("", id="confidence-trend", className="d-block text-xs text-muted")
+ ], className="card-body text-center p-2 bg-dark")
+ ], className="card col-md-3 mx-1 bg-dark border-secondary"),
+
+ html.Div([
+ html.Div([
+ html.H6("+0.00", id="total-rewards-sum", className="mb-0 text-success"),
+ html.Small("Total Rewards", className="text-light"),
+ html.Small("", id="rewards-trend", className="d-block text-xs text-muted")
], className="card-body text-center p-2 bg-dark")
], className="card col-md-3 mx-1 bg-dark border-secondary")
], className="row mb-3"),