update dash with model performance

This commit is contained in:
Dobromir Popov
2025-09-09 03:51:04 +03:00
parent 55fb865e7f
commit 2f51966fa8
2 changed files with 379 additions and 119 deletions

View File

@@ -1064,132 +1064,388 @@ class CleanTradingDashboard:
@self.app.callback( @self.app.callback(
[Output('total-predictions-count', 'children'), [Output('total-predictions-count', 'children'),
Output('pending-predictions-count', 'children'),
Output('active-models-count', 'children'), Output('active-models-count', 'children'),
Output('avg-confidence', 'children'),
Output('total-rewards-sum', 'children'), Output('total-rewards-sum', 'children'),
Output('predictions-trend', 'children'),
Output('models-status', 'children'),
Output('confidence-trend', 'children'),
Output('rewards-trend', 'children'),
Output('prediction-timeline-chart', 'figure'), Output('prediction-timeline-chart', 'figure'),
Output('model-performance-chart', 'figure')], Output('model-performance-chart', 'figure')],
[Input('interval-component', 'n_intervals')] [Input('interval-component', 'n_intervals')]
) )
def update_prediction_tracking(n_intervals): def update_prediction_tracking(n_intervals):
"""Update prediction tracking charts and metrics""" """Update prediction tracking with REAL inference stats and performance"""
try: try:
if (hasattr(self.orchestrator, 'enhanced_training_system') and # Get real model status and performance data
self.orchestrator.enhanced_training_system): model_stats = self._get_real_model_performance_data()
# Get prediction data # Calculate summary metrics
stats = self.orchestrator.enhanced_training_system.get_model_performance_stats() total_predictions = model_stats.get('total_predictions', 0)
models = stats.get('models', []) active_models = model_stats.get('active_models', 0)
total_active = stats.get('total_active_predictions', 0) total_rewards = model_stats.get('total_rewards', 0.0)
recent_predictions = model_stats.get('recent_predictions', [])
# Calculate totals # Calculate average confidence
total_predictions = sum(m.get('total_predictions', 0) for m in models) avg_confidence = 0.0
total_rewards = sum(m.get('total_reward', 0) for m in models) if recent_predictions:
active_models = len(models) valid_confidences = [p.get('confidence', 0.0) for p in recent_predictions if p.get('confidence', 0.0) > 0]
if valid_confidences:
avg_confidence = sum(valid_confidences) / len(valid_confidences)
# Create timeline chart (simplified) # Calculate trend information
timeline_fig = { predictions_trend = "↗️ Active" if total_predictions > 5 else "⏸️ Waiting"
'data': [], models_status = "✅ Loaded" if active_models > 0 else "⚠️ Loading..."
'layout': { confidence_trend = f"📈 {avg_confidence:.1%}" if avg_confidence > 0.5 else f"📉 {avg_confidence:.1%}"
'title': 'Recent Predictions Timeline', rewards_trend = "💰 Profitable" if total_rewards > 0 else "📊 Learning"
'xaxis': {'title': 'Time'},
'yaxis': {'title': 'Confidence'},
'template': 'plotly_dark',
'height': 300,
'showlegend': False
}
}
# Add empty annotation if no data # Create timeline chart with real prediction data
if not models: timeline_fig = self._create_prediction_timeline_chart(model_stats)
timeline_fig['layout']['annotations'] = [{
'text': 'No prediction data yet',
'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5,
'showarrow': False,
'font': {'size': 16, 'color': 'gray'}
}]
# Create performance chart # Create performance chart with real model metrics
performance_fig = { performance_fig = self._create_model_performance_chart(model_stats)
'data': [],
'layout': {
'title': 'Model Performance',
'template': 'plotly_dark',
'height': 300,
'showlegend': True
}
}
if models:
model_names = [m.get('model_name', 'Unknown') for m in models]
accuracies = [m.get('accuracy', 0) * 100 for m in models]
rewards = [m.get('total_reward', 0) for m in models]
# Add accuracy bars
performance_fig['data'].append({
'x': model_names,
'y': accuracies,
'type': 'bar',
'name': 'Accuracy (%)',
'marker': {'color': 'lightblue'}
})
performance_fig['layout']['xaxis'] = {'title': 'Model'}
performance_fig['layout']['yaxis'] = {'title': 'Accuracy (%)'}
else:
performance_fig['layout']['annotations'] = [{
'text': 'No model data yet',
'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5,
'showarrow': False,
'font': {'size': 16, 'color': 'gray'}
}]
return ( return (
str(total_predictions), str(total_predictions),
str(total_active),
str(active_models), str(active_models),
f"{total_rewards:.1f}", f"{avg_confidence:.1%}",
f"{total_rewards:+.2f}",
predictions_trend,
models_status,
confidence_trend,
rewards_trend,
timeline_fig, timeline_fig,
performance_fig performance_fig
) )
else:
# Training system not available except Exception as e:
logger.error(f"Error updating prediction tracking: {e}")
# Return safe defaults
empty_fig = { empty_fig = {
'data': [], 'data': [],
'layout': { 'layout': {
'template': 'plotly_dark', 'template': 'plotly_dark',
'height': 300, 'height': 300,
'annotations': [{ 'annotations': [{
'text': 'Training system not available', 'text': f'Error loading data: {str(e)[:50]}...',
'xref': 'paper', 'yref': 'paper', 'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5, 'x': 0.5, 'y': 0.5,
'showarrow': False, 'showarrow': False,
'font': {'size': 16, 'color': 'red'} 'font': {'size': 12, 'color': 'red'}
}] }]
} }
} }
return "0", "0", "0", "0.00", empty_fig, empty_fig
return "N/A", "N/A", "N/A", "N/A", empty_fig, empty_fig def _get_real_model_performance_data(self) -> Dict[str, Any]:
"""Get real model performance data from orchestrator"""
try:
model_data = {
'total_predictions': 0,
'pending_predictions': 0,
'active_models': 0,
'total_rewards': 0.0,
'models': [],
'recent_predictions': []
}
if not self.orchestrator:
return model_data
# Get model states from orchestrator
model_states = getattr(self.orchestrator, 'model_states', {})
# Check each model type
for model_type in ['cnn', 'dqn', 'cob_rl']:
if model_type in model_states:
state = model_states[model_type]
is_loaded = state.get('checkpoint_loaded', False)
if is_loaded:
model_data['active_models'] += 1
# Add model info (include all models, not just loaded ones)
model_data['models'].append({
'name': model_type.upper(),
'status': 'LOADED' if is_loaded else 'FRESH',
'current_loss': state.get('current_loss', 0.0),
'best_loss': state.get('best_loss', None),
'checkpoint_filename': state.get('checkpoint_filename', 'none'),
'training_sessions': getattr(self.orchestrator, f'{model_type}_training_count', 0),
'last_inference': getattr(self.orchestrator, f'{model_type}_last_inference', None),
'inference_count': getattr(self.orchestrator, f'{model_type}_inference_count', 0)
})
# Get recent predictions from our tracking
if hasattr(self, 'recent_decisions') and self.recent_decisions:
for decision in list(self.recent_decisions)[-20:]: # Last 20 decisions
model_data['recent_predictions'].append({
'timestamp': decision.get('timestamp', datetime.now()),
'action': decision.get('action', 'UNKNOWN'),
'confidence': decision.get('confidence', 0.0),
'reward': decision.get('reward', 0.0),
'outcome': decision.get('outcome', 'pending')
})
model_data['total_predictions'] = len(model_data['recent_predictions'])
model_data['pending_predictions'] = sum(1 for p in model_data['recent_predictions']
if p.get('outcome') == 'pending')
model_data['total_rewards'] = sum(p.get('reward', 0.0) for p in model_data['recent_predictions'])
return model_data
except Exception as e: except Exception as e:
logger.error(f"Error updating prediction tracking: {e}") logger.error(f"Error getting real model performance data: {e}")
error_fig = { return {
'total_predictions': 0,
'pending_predictions': 0,
'active_models': 0,
'total_rewards': 0.0,
'models': [],
'recent_predictions': []
}
def _create_prediction_timeline_chart(self, model_stats: Dict[str, Any]) -> Dict[str, Any]:
"""Create prediction timeline chart with real data"""
try:
recent_predictions = model_stats.get('recent_predictions', [])
if not recent_predictions:
return {
'data': [], 'data': [],
'layout': { 'layout': {
'title': 'Recent Predictions Timeline',
'template': 'plotly_dark', 'template': 'plotly_dark',
'height': 300, 'height': 300,
'annotations': [{ 'annotations': [{
'text': f'Error: {str(e)}', 'text': 'No predictions yet',
'xref': 'paper', 'yref': 'paper', 'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5, 'x': 0.5, 'y': 0.5,
'showarrow': False, 'showarrow': False,
'font': {'size': 14, 'color': 'red'} 'font': {'size': 16, 'color': 'gray'}
}] }]
} }
} }
return "Error", "Error", "Error", "Error", error_fig, error_fig
# Prepare data for timeline
timestamps = []
confidences = []
rewards = []
actions = []
for pred in recent_predictions[-50:]: # Last 50 predictions
timestamps.append(pred.get('timestamp', datetime.now()))
confidences.append(pred.get('confidence', 0.0) * 100) # Convert to percentage
rewards.append(pred.get('reward', 0.0))
actions.append(pred.get('action', 'UNKNOWN'))
# Create timeline chart
fig = {
'data': [
{
'x': timestamps,
'y': confidences,
'type': 'scatter',
'mode': 'lines+markers',
'name': 'Confidence (%)',
'line': {'color': '#00ff88', 'width': 2},
'marker': {'size': 6}
},
{
'x': timestamps,
'y': rewards,
'type': 'bar',
'name': 'Reward',
'yaxis': 'y2',
'marker': {'color': '#ff6b6b'}
}
],
'layout': {
'title': 'Prediction Timeline (Last 50)',
'template': 'plotly_dark',
'height': 300,
'xaxis': {
'title': 'Time',
'type': 'date'
},
'yaxis': {
'title': 'Confidence (%)',
'range': [0, 100]
},
'yaxis2': {
'title': 'Reward',
'overlaying': 'y',
'side': 'right',
'showgrid': False
},
'showlegend': True,
'legend': {'x': 0, 'y': 1}
}
}
return fig
except Exception as e:
logger.error(f"Error creating prediction timeline chart: {e}")
return {
'data': [],
'layout': {
'title': 'Prediction Timeline',
'template': 'plotly_dark',
'height': 300,
'annotations': [{
'text': f'Chart error: {str(e)[:30]}...',
'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5,
'showarrow': False,
'font': {'size': 12, 'color': 'red'}
}]
}
}
def _create_model_performance_chart(self, model_stats: Dict[str, Any]) -> Dict[str, Any]:
"""Create model performance chart with real metrics"""
try:
models = model_stats.get('models', [])
if not models:
return {
'data': [],
'layout': {
'title': 'Model Performance',
'template': 'plotly_dark',
'height': 300,
'annotations': [{
'text': 'No active models',
'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5,
'showarrow': False,
'font': {'size': 16, 'color': 'gray'}
}]
}
}
# Prepare data for performance chart
model_names = []
current_losses = []
best_losses = []
training_sessions = []
inference_counts = []
statuses = []
for model in models:
model_names.append(model.get('name', 'Unknown'))
current_losses.append(model.get('current_loss', 0.0))
best_losses.append(model.get('best_loss', model.get('current_loss', 0.0)))
training_sessions.append(model.get('training_sessions', 0))
inference_counts.append(model.get('inference_count', 0))
statuses.append(model.get('status', 'Unknown'))
# Create comprehensive performance chart
fig = {
'data': [
{
'x': model_names,
'y': current_losses,
'type': 'bar',
'name': 'Current Loss',
'marker': {'color': '#ff6b6b'},
'yaxis': 'y1'
},
{
'x': model_names,
'y': best_losses,
'type': 'bar',
'name': 'Best Loss',
'marker': {'color': '#4ecdc4'},
'yaxis': 'y1'
},
{
'x': model_names,
'y': training_sessions,
'type': 'scatter',
'mode': 'markers',
'name': 'Training Sessions',
'marker': {'color': '#ffd93d', 'size': 12},
'yaxis': 'y2'
},
{
'x': model_names,
'y': inference_counts,
'type': 'scatter',
'mode': 'markers',
'name': 'Inference Count',
'marker': {'color': '#a8e6cf', 'size': 8},
'yaxis': 'y2'
}
],
'layout': {
'title': 'Real Model Performance & Activity',
'template': 'plotly_dark',
'height': 300,
'xaxis': {
'title': 'Model'
},
'yaxis': {
'title': 'Loss',
'side': 'left'
},
'yaxis2': {
'title': 'Activity Count',
'side': 'right',
'overlaying': 'y',
'showgrid': False
},
'showlegend': True,
'legend': {'x': 0, 'y': 1}
}
}
# Add status annotations with more detail
annotations = []
for i, (name, status) in enumerate(zip(model_names, statuses)):
color = '#00ff88' if status == 'LOADED' else '#ff6b6b'
loss_text = f"{status}<br>Loss: {current_losses[i]:.4f}"
if training_sessions[i] > 0:
loss_text += f"<br>Trained: {training_sessions[i]}x"
if inference_counts[i] > 0:
loss_text += f"<br>Inferred: {inference_counts[i]}x"
annotations.append({
'text': loss_text,
'x': name,
'y': max(current_losses[i] * 1.1, 0.01),
'xref': 'x',
'yref': 'y',
'showarrow': False,
'font': {'color': color, 'size': 8},
'align': 'center'
})
fig['layout']['annotations'] = annotations
return fig
except Exception as e:
logger.error(f"Error creating model performance chart: {e}")
return {
'data': [],
'layout': {
'title': 'Model Performance',
'template': 'plotly_dark',
'height': 300,
'annotations': [{
'text': f'Chart error: {str(e)[:30]}...',
'xref': 'paper', 'yref': 'paper',
'x': 0.5, 'y': 0.5,
'showarrow': False,
'font': {'size': 12, 'color': 'red'}
}]
}
}
return "0", "0", "0.0%", "0.00", "❌ Error", "❌ Error", "❌ Error", "❌ Error", error_fig, error_fig
@self.app.callback( @self.app.callback(
[Output('eth-cob-content', 'children'), [Output('eth-cob-content', 'children'),

View File

@@ -37,33 +37,37 @@ class DashboardLayoutManager:
"🧠 Model Predictions & Performance Tracking" "🧠 Model Predictions & Performance Tracking"
], className="text-light mb-3"), ], className="text-light mb-3"),
# Summary cards row # Summary cards row - Enhanced with real metrics
html.Div([ html.Div([
html.Div([ html.Div([
html.Div([ html.Div([
html.H6("0", id="total-predictions-count", className="mb-0 text-primary"), html.H6("0", id="total-predictions-count", className="mb-0 text-primary"),
html.Small("Total Predictions", className="text-light") html.Small("Recent Signals", className="text-light"),
], className="card-body text-center p-2 bg-dark") html.Small("", id="predictions-trend", className="d-block text-xs text-muted")
], className="card col-md-3 mx-1 bg-dark border-secondary"),
html.Div([
html.Div([
html.H6("0", id="pending-predictions-count", className="mb-0 text-warning"),
html.Small("Pending Resolution", className="text-light")
], className="card-body text-center p-2 bg-dark") ], className="card-body text-center p-2 bg-dark")
], className="card col-md-3 mx-1 bg-dark border-secondary"), ], className="card col-md-3 mx-1 bg-dark border-secondary"),
html.Div([ html.Div([
html.Div([ html.Div([
html.H6("0", id="active-models-count", className="mb-0 text-info"), html.H6("0", id="active-models-count", className="mb-0 text-info"),
html.Small("Active Models", className="text-light") html.Small("Loaded Models", className="text-light"),
html.Small("", id="models-status", className="d-block text-xs text-success")
], className="card-body text-center p-2 bg-dark") ], className="card-body text-center p-2 bg-dark")
], className="card col-md-3 mx-1 bg-dark border-secondary"), ], className="card col-md-3 mx-1 bg-dark border-secondary"),
html.Div([ html.Div([
html.Div([ html.Div([
html.H6("0.0", id="total-rewards-sum", className="mb-0 text-success"), html.H6("0.00", id="avg-confidence", className="mb-0 text-warning"),
html.Small("Total Rewards", className="text-light") html.Small("Avg Confidence", className="text-light"),
html.Small("", id="confidence-trend", className="d-block text-xs text-muted")
], className="card-body text-center p-2 bg-dark")
], className="card col-md-3 mx-1 bg-dark border-secondary"),
html.Div([
html.Div([
html.H6("+0.00", id="total-rewards-sum", className="mb-0 text-success"),
html.Small("Total Rewards", className="text-light"),
html.Small("", id="rewards-trend", className="d-block text-xs text-muted")
], className="card-body text-center p-2 bg-dark") ], className="card-body text-center p-2 bg-dark")
], className="card col-md-3 mx-1 bg-dark border-secondary") ], className="card col-md-3 mx-1 bg-dark border-secondary")
], className="row mb-3"), ], className="row mb-3"),