#!/usr/bin/env python3 """ Test Model Statistics Implementation This script tests the new model statistics tracking functionality. """ import asyncio import time from core.orchestrator import TradingOrchestrator from core.data_provider import DataProvider async def test_model_statistics(): """Test the model statistics tracking""" print("=== Testing Model Statistics ===") # Initialize orchestrator print("1. Initializing orchestrator...") data_provider = DataProvider() orchestrator = TradingOrchestrator(data_provider=data_provider) # Wait for initialization await asyncio.sleep(2) # Test initial statistics print("\n2. Initial model statistics:") orchestrator.log_model_statistics() # Run some predictions to generate statistics print("\n3. Running predictions to generate statistics...") for i in range(5): print(f" Running prediction batch {i+1}/5...") predictions = await orchestrator._get_all_predictions('ETH/USDT') print(f" Got {len(predictions)} predictions") await asyncio.sleep(1) # Small delay between batches # Show updated statistics print("\n4. Updated model statistics:") orchestrator.log_model_statistics(detailed=True) # Test statistics summary print("\n5. Statistics summary (JSON format):") summary = orchestrator.get_model_statistics_summary() for model_name, stats in summary.items(): print(f" {model_name}: {stats}") # Test individual model statistics print("\n6. Individual model statistics:") for model_name in orchestrator.model_statistics.keys(): stats = orchestrator.get_model_statistics(model_name) if stats: print(f" {model_name}: {stats.total_inferences} inferences, " f"rate={stats.inference_rate_per_minute:.1f}/min") print("\n✅ Model statistics test completed successfully!") if __name__ == "__main__": asyncio.run(test_model_statistics())