#!/usr/bin/env python3 """ Test script to verify model stats functionality """ import sys import os sys.path.append(os.path.dirname(os.path.abspath(__file__))) import logging from core.orchestrator import TradingOrchestrator # Set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def test_model_stats(): """Test the model stats functionality""" try: logger.info("Testing model stats functionality...") # Create orchestrator instance (this will initialize model states) orchestrator = TradingOrchestrator() # Sync with dashboard values orchestrator.sync_model_states_with_dashboard() # Get current model stats stats = orchestrator.get_model_training_stats() logger.info("Current model training stats:") for model_name, model_stats in stats.items(): if model_stats['current_loss'] is not None: logger.info(f" {model_name.upper()}: {model_stats['current_loss']:.4f} loss, {model_stats['improvement_pct']:.1f}% improvement") else: logger.info(f" {model_name.upper()}: No training data yet") # Test updating a model loss orchestrator.update_model_loss('cnn', 0.0001) logger.info("Updated CNN loss to 0.0001") # Get updated stats updated_stats = orchestrator.get_model_training_stats() cnn_stats = updated_stats['cnn'] logger.info(f"CNN updated: {cnn_stats['current_loss']:.4f} loss, {cnn_stats['improvement_pct']:.1f}% improvement") return True except Exception as e: logger.error(f"❌ Model stats test failed: {e}") return False if __name__ == "__main__": success = test_model_stats() sys.exit(0 if success else 1)