90 lines
3.3 KiB
Python
90 lines
3.3 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test script to debug dashboard data flow issues
|
|
|
|
This script tests if the dashboard can properly retrieve and display model data.
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
import logging
|
|
logging.basicConfig(level=logging.DEBUG)
|
|
|
|
from web.clean_dashboard import CleanTradingDashboard
|
|
from core.orchestrator import TradingOrchestrator
|
|
from core.data_provider import DataProvider
|
|
|
|
def test_dashboard_data_flow():
|
|
"""Test if dashboard can retrieve model data correctly"""
|
|
|
|
print("🧪 DASHBOARD DATA FLOW TEST")
|
|
print("=" * 50)
|
|
|
|
try:
|
|
# Initialize components
|
|
data_provider = DataProvider()
|
|
orchestrator = TradingOrchestrator(data_provider=data_provider)
|
|
|
|
print(f"✅ Orchestrator initialized")
|
|
print(f" Model registry models: {list(orchestrator.model_registry.get_all_models().keys())}")
|
|
print(f" Model toggle states: {list(orchestrator.model_toggle_states.keys())}")
|
|
|
|
# Initialize dashboard
|
|
dashboard = CleanTradingDashboard(
|
|
data_provider=data_provider,
|
|
orchestrator=orchestrator
|
|
)
|
|
|
|
print(f"✅ Dashboard initialized")
|
|
|
|
# Test available models
|
|
available_models = dashboard._get_available_models()
|
|
print(f" Available models: {list(available_models.keys())}")
|
|
|
|
# Test training metrics
|
|
print("\n📊 Testing training metrics...")
|
|
toggle_states = {}
|
|
for model_name in available_models.keys():
|
|
toggle_states[model_name] = orchestrator.get_model_toggle_state(model_name)
|
|
|
|
print(f" Toggle states: {list(toggle_states.keys())}")
|
|
|
|
metrics_data = dashboard._get_training_metrics(toggle_states)
|
|
print(f" Metrics data type: {type(metrics_data)}")
|
|
|
|
if metrics_data and isinstance(metrics_data, dict):
|
|
print(f" Metrics keys: {list(metrics_data.keys())}")
|
|
if 'loaded_models' in metrics_data:
|
|
loaded_models = metrics_data['loaded_models']
|
|
print(f" Loaded models count: {len(loaded_models)}")
|
|
for model_name, model_info in loaded_models.items():
|
|
print(f" - {model_name}: active={model_info.get('active', False)}")
|
|
else:
|
|
print(" ❌ No 'loaded_models' in metrics_data!")
|
|
else:
|
|
print(f" ❌ Invalid metrics_data: {metrics_data}")
|
|
|
|
# Test component manager formatting
|
|
print("\n🎨 Testing component manager...")
|
|
formatted_components = dashboard.component_manager.format_training_metrics(metrics_data)
|
|
print(f" Formatted components type: {type(formatted_components)}")
|
|
print(f" Formatted components count: {len(formatted_components) if formatted_components else 0}")
|
|
|
|
if formatted_components:
|
|
print(" ✅ Component manager returned formatted data")
|
|
else:
|
|
print(" ❌ Component manager returned empty data")
|
|
|
|
print("\n🚀 Dashboard data flow test completed!")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Test failed with error: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
if __name__ == "__main__":
|
|
test_dashboard_data_flow() |