training metrics . fix cnn model

This commit is contained in:
Dobromir Popov
2025-09-09 03:43:20 +03:00
parent a3029d09c2
commit 55fb865e7f
2 changed files with 59 additions and 21 deletions

View File

@@ -7346,7 +7346,7 @@ class CleanTradingDashboard:
}
metadata = save_checkpoint(
model=checkpoint_data,
model=model, # Pass the actual model, not checkpoint_data
model_name="enhanced_cnn",
model_type="cnn",
performance_metrics=performance_metrics,
@@ -8016,21 +8016,32 @@ class CleanTradingDashboard:
def get_model_performance_metrics(self) -> Dict[str, Any]:
"""Get detailed performance metrics for all models"""
try:
if not hasattr(self, 'training_performance'):
# Check both possible structures
training_metrics = None
if hasattr(self, 'training_performance_metrics'):
training_metrics = self.training_performance_metrics
elif hasattr(self, 'training_performance'):
training_metrics = self.training_performance
if not training_metrics:
return {}
performance_metrics = {}
for model_name, metrics in self.training_performance.items():
if metrics['training_times']:
avg_training = sum(metrics['training_times']) / len(metrics['training_times'])
max_training = max(metrics['training_times'])
min_training = min(metrics['training_times'])
for model_name, metrics in training_metrics.items():
# Safely check for training_times key
training_times = metrics.get('training_times', [])
total_calls = metrics.get('total_calls', 0)
if training_times and len(training_times) > 0:
avg_training = sum(training_times) / len(training_times)
max_training = max(training_times)
min_training = min(training_times)
performance_metrics[model_name] = {
'avg_training_time_ms': round(avg_training * 1000, 2),
'max_training_time_ms': round(max_training * 1000, 2),
'min_training_time_ms': round(min_training * 1000, 2),
'total_calls': metrics['total_calls'],
'total_calls': total_calls,
'training_frequency_hz': round(1.0 / avg_training if avg_training > 0 else 0, 1)
}
else:
@@ -8038,14 +8049,21 @@ class CleanTradingDashboard:
'avg_training_time_ms': 0,
'max_training_time_ms': 0,
'min_training_time_ms': 0,
'total_calls': 0,
'total_calls': total_calls,
'training_frequency_hz': 0
}
return performance_metrics
except Exception as e:
logger.error(f"Error getting performance metrics: {e}")
return {}
# Return empty dict for each expected model to prevent further errors
return {
'decision': {'avg_training_time_ms': 0, 'max_training_time_ms': 0, 'min_training_time_ms': 0, 'total_calls': 0, 'training_frequency_hz': 0},
'cob_rl': {'avg_training_time_ms': 0, 'max_training_time_ms': 0, 'min_training_time_ms': 0, 'total_calls': 0, 'training_frequency_hz': 0},
'dqn': {'avg_training_time_ms': 0, 'max_training_time_ms': 0, 'min_training_time_ms': 0, 'total_calls': 0, 'training_frequency_hz': 0},
'cnn': {'avg_training_time_ms': 0, 'max_training_time_ms': 0, 'min_training_time_ms': 0, 'total_calls': 0, 'training_frequency_hz': 0},
'transformer': {'avg_training_time_ms': 0, 'max_training_time_ms': 0, 'min_training_time_ms': 0, 'total_calls': 0, 'training_frequency_hz': 0}
}
def create_clean_dashboard(data_provider: Optional[DataProvider] = None, orchestrator: Optional[TradingOrchestrator] = None, trading_executor: Optional[TradingExecutor] = None):