logging
This commit is contained in:
@ -1872,32 +1872,67 @@ class EnhancedRealtimeTrainingSystem:
|
||||
def _log_training_progress(self):
|
||||
"""Log comprehensive training progress"""
|
||||
try:
|
||||
stats = {
|
||||
'iteration': self.training_iteration,
|
||||
'experience_buffer': len(self.experience_buffer),
|
||||
'priority_buffer': len(self.priority_buffer),
|
||||
'dqn_memory': self._get_dqn_memory_size(),
|
||||
'data_streams': {
|
||||
'ohlcv_1m': len(self.real_time_data['ohlcv_1m']),
|
||||
'ticks': len(self.real_time_data['ticks']),
|
||||
'cob_snapshots': len(self.real_time_data['cob_snapshots']),
|
||||
'market_events': len(self.real_time_data['market_events'])
|
||||
}
|
||||
}
|
||||
logger.info("=" * 60)
|
||||
logger.info("ENHANCED TRAINING SYSTEM PROGRESS REPORT")
|
||||
logger.info("=" * 60)
|
||||
|
||||
# Basic training statistics
|
||||
logger.info(f"Training Iteration: {self.training_iteration}")
|
||||
logger.info(f"Experience Buffer: {len(self.experience_buffer)} samples")
|
||||
logger.info(f"Priority Buffer: {len(self.priority_buffer)} samples")
|
||||
logger.info(f"DQN Memory: {self._get_dqn_memory_size()} experiences")
|
||||
|
||||
# Data stream statistics
|
||||
logger.info("\nDATA STREAMS:")
|
||||
logger.info(f" OHLCV 1m: {len(self.real_time_data['ohlcv_1m'])} records")
|
||||
logger.info(f" Ticks: {len(self.real_time_data['ticks'])} records")
|
||||
logger.info(f" COB Snapshots: {len(self.real_time_data['cob_snapshots'])} records")
|
||||
logger.info(f" Market Events: {len(self.real_time_data['market_events'])} records")
|
||||
|
||||
# Performance metrics
|
||||
logger.info("\nPERFORMANCE METRICS:")
|
||||
if self.performance_history['dqn_losses']:
|
||||
stats['dqn_avg_loss'] = np.mean(list(self.performance_history['dqn_losses'])[-10:])
|
||||
dqn_avg_loss = np.mean(list(self.performance_history['dqn_losses'])[-10:])
|
||||
dqn_recent_loss = list(self.performance_history['dqn_losses'])[-1] if self.performance_history['dqn_losses'] else 0
|
||||
logger.info(f" DQN Average Loss (10): {dqn_avg_loss:.4f}")
|
||||
logger.info(f" DQN Recent Loss: {dqn_recent_loss:.4f}")
|
||||
|
||||
if self.performance_history['cnn_losses']:
|
||||
stats['cnn_avg_loss'] = np.mean(list(self.performance_history['cnn_losses'])[-10:])
|
||||
cnn_avg_loss = np.mean(list(self.performance_history['cnn_losses'])[-10:])
|
||||
cnn_recent_loss = list(self.performance_history['cnn_losses'])[-1] if self.performance_history['cnn_losses'] else 0
|
||||
logger.info(f" CNN Average Loss (10): {cnn_avg_loss:.4f}")
|
||||
logger.info(f" CNN Recent Loss: {cnn_recent_loss:.4f}")
|
||||
|
||||
if self.performance_history['validation_scores']:
|
||||
stats['validation_score'] = self.performance_history['validation_scores'][-1]['combined_score']
|
||||
validation_score = self.performance_history['validation_scores'][-1]['combined_score']
|
||||
logger.info(f" Validation Score: {validation_score:.3f}")
|
||||
|
||||
logger.info(f"ENHANCED TRAINING PROGRESS: {stats}")
|
||||
# Training configuration
|
||||
logger.info("\nTRAINING CONFIGURATION:")
|
||||
logger.info(f" DQN Training Interval: {self.training_config['dqn_training_interval']} iterations")
|
||||
logger.info(f" CNN Training Interval: {self.training_config['cnn_training_interval']} iterations")
|
||||
logger.info(f" COB RL Training Interval: {self.training_config['cob_rl_training_interval']} iterations")
|
||||
logger.info(f" Validation Interval: {self.training_config['validation_interval']} iterations")
|
||||
|
||||
# Prediction statistics
|
||||
if hasattr(self, 'prediction_history') and self.prediction_history:
|
||||
logger.info("\nPREDICTION STATISTICS:")
|
||||
recent_predictions = list(self.prediction_history)[-10:] if len(self.prediction_history) > 10 else list(self.prediction_history)
|
||||
logger.info(f" Recent Predictions: {len(recent_predictions)}")
|
||||
if recent_predictions:
|
||||
avg_confidence = np.mean([p.get('confidence', 0) for p in recent_predictions])
|
||||
logger.info(f" Average Confidence: {avg_confidence:.3f}")
|
||||
|
||||
logger.info("=" * 60)
|
||||
|
||||
# Periodic comprehensive logging (every 20th iteration)
|
||||
if self.training_iteration % 20 == 0:
|
||||
logger.info("PERIODIC ENHANCED TRAINING COMPREHENSIVE LOG:")
|
||||
if hasattr(self.orchestrator, 'log_model_statistics'):
|
||||
self.orchestrator.log_model_statistics(detailed=True)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error logging progress: {e}")
|
||||
logger.error(f"Error logging enhanced training progress: {e}")
|
||||
|
||||
def _validation_worker(self):
|
||||
"""Background worker for continuous validation"""
|
||||
|
Reference in New Issue
Block a user