Merge commit 'd49a473ed6f4aef55bfdd47d6370e53582be6b7b' into cleanup
This commit is contained in:
@@ -1,204 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Training Integration with Dashboard
|
||||
|
||||
This script tests the enhanced dashboard's ability to:
|
||||
1. Stream training data to CNN and DQN models
|
||||
2. Display real-time training metrics and progress
|
||||
3. Show model learning curves and performance
|
||||
4. Integrate with the continuous training system
|
||||
"""
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import time
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def test_training_integration():
|
||||
"""Test the training integration functionality"""
|
||||
try:
|
||||
print("="*60)
|
||||
print("TESTING TRAINING INTEGRATION WITH DASHBOARD")
|
||||
print("="*60)
|
||||
|
||||
# Import dashboard
|
||||
from web.clean_dashboard import CleanTradingDashboard as TradingDashboard
|
||||
from core.data_provider import DataProvider
|
||||
from core.orchestrator import TradingOrchestrator
|
||||
|
||||
# Create components
|
||||
data_provider = DataProvider()
|
||||
orchestrator = TradingOrchestrator(data_provider)
|
||||
dashboard = TradingDashboard(data_provider, orchestrator)
|
||||
|
||||
print(f"✓ Dashboard created with training integration")
|
||||
print(f"✓ Continuous training active: {getattr(dashboard, 'training_active', False)}")
|
||||
|
||||
# Test 1: Simulate tick data for training
|
||||
print("\n📊 TEST 1: Simulating Tick Data")
|
||||
print("-" * 40)
|
||||
|
||||
# Add simulated tick data to cache
|
||||
base_price = 3500.0
|
||||
for i in range(1000):
|
||||
tick_data = {
|
||||
'timestamp': datetime.now() - timedelta(seconds=1000-i),
|
||||
'price': base_price + (i % 100) * 0.1,
|
||||
'volume': 100 + (i % 50),
|
||||
'side': 'buy' if i % 2 == 0 else 'sell'
|
||||
}
|
||||
dashboard.tick_cache.append(tick_data)
|
||||
|
||||
print(f"✓ Added {len(dashboard.tick_cache)} ticks to cache")
|
||||
|
||||
# Test 2: Prepare training data
|
||||
print("\n🔄 TEST 2: Preparing Training Data")
|
||||
print("-" * 40)
|
||||
|
||||
training_data = dashboard._prepare_training_data()
|
||||
if training_data:
|
||||
print(f"✓ Training data prepared successfully")
|
||||
print(f" - OHLCV bars: {len(training_data['ohlcv'])}")
|
||||
print(f" - Features: {training_data['features']}")
|
||||
print(f" - Symbol: {training_data['symbol']}")
|
||||
else:
|
||||
print("❌ Failed to prepare training data")
|
||||
|
||||
# Test 3: Format data for CNN
|
||||
print("\n🧠 TEST 3: CNN Data Formatting")
|
||||
print("-" * 40)
|
||||
|
||||
if training_data:
|
||||
cnn_data = dashboard._format_data_for_cnn(training_data)
|
||||
if cnn_data and 'sequences' in cnn_data:
|
||||
print(f"✓ CNN data formatted successfully")
|
||||
print(f" - Sequences shape: {cnn_data['sequences'].shape}")
|
||||
print(f" - Targets shape: {cnn_data['targets'].shape}")
|
||||
print(f" - Sequence length: {cnn_data['sequence_length']}")
|
||||
else:
|
||||
print("❌ Failed to format CNN data")
|
||||
|
||||
# Test 4: Format data for RL
|
||||
print("\n🤖 TEST 4: RL Data Formatting")
|
||||
print("-" * 40)
|
||||
|
||||
if training_data:
|
||||
rl_experiences = dashboard._format_data_for_rl(training_data)
|
||||
if rl_experiences:
|
||||
print(f"✓ RL experiences formatted successfully")
|
||||
print(f" - Number of experiences: {len(rl_experiences)}")
|
||||
print(f" - Experience format: (state, action, reward, next_state, done)")
|
||||
print(f" - Sample experience shapes: {[len(exp) for exp in rl_experiences[:3]]}")
|
||||
else:
|
||||
print("❌ Failed to format RL experiences")
|
||||
|
||||
# Test 5: Send training data to models
|
||||
print("\n📤 TEST 5: Sending Training Data to Models")
|
||||
print("-" * 40)
|
||||
|
||||
success = dashboard.send_training_data_to_models()
|
||||
print(f"✓ Training data sent: {success}")
|
||||
|
||||
if hasattr(dashboard, 'training_stats'):
|
||||
stats = dashboard.training_stats
|
||||
print(f" - Total training sessions: {stats.get('total_training_sessions', 0)}")
|
||||
print(f" - CNN training count: {stats.get('cnn_training_count', 0)}")
|
||||
print(f" - RL training count: {stats.get('rl_training_count', 0)}")
|
||||
print(f" - Training data points: {stats.get('training_data_points', 0)}")
|
||||
|
||||
# Test 6: Training metrics display
|
||||
print("\n📈 TEST 6: Training Metrics Display")
|
||||
print("-" * 40)
|
||||
|
||||
training_metrics = dashboard._create_training_metrics()
|
||||
print(f"✓ Training metrics created: {len(training_metrics)} components")
|
||||
|
||||
# Test 7: Model training status
|
||||
print("\n🔍 TEST 7: Model Training Status")
|
||||
print("-" * 40)
|
||||
|
||||
training_status = dashboard._get_model_training_status()
|
||||
print(f"✓ Training status retrieved")
|
||||
print(f" - CNN status: {training_status['cnn']['status']}")
|
||||
print(f" - CNN accuracy: {training_status['cnn']['accuracy']:.1%}")
|
||||
print(f" - RL status: {training_status['rl']['status']}")
|
||||
print(f" - RL win rate: {training_status['rl']['win_rate']:.1%}")
|
||||
|
||||
# Test 8: Training events log
|
||||
print("\n📝 TEST 8: Training Events Log")
|
||||
print("-" * 40)
|
||||
|
||||
training_events = dashboard._get_recent_training_events()
|
||||
print(f"✓ Training events retrieved: {len(training_events)} events")
|
||||
|
||||
# Test 9: Mini training chart
|
||||
print("\n📊 TEST 9: Mini Training Chart")
|
||||
print("-" * 40)
|
||||
|
||||
try:
|
||||
training_chart = dashboard._create_mini_training_chart(training_status)
|
||||
print(f"✓ Mini training chart created")
|
||||
print(f" - Chart type: {type(training_chart)}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating training chart: {e}")
|
||||
|
||||
# Test 10: Continuous training loop
|
||||
print("\n🔄 TEST 10: Continuous Training Loop")
|
||||
print("-" * 40)
|
||||
|
||||
print(f"✓ Continuous training active: {getattr(dashboard, 'training_active', False)}")
|
||||
if hasattr(dashboard, 'training_thread'):
|
||||
print(f"✓ Training thread alive: {dashboard.training_thread.is_alive()}")
|
||||
|
||||
# Test 11: Integration with existing continuous training system
|
||||
print("\n🔗 TEST 11: Integration with Continuous Training System")
|
||||
print("-" * 40)
|
||||
|
||||
try:
|
||||
# Check if we can get tick cache for external training
|
||||
tick_cache = dashboard.get_tick_cache_for_training()
|
||||
print(f"✓ Tick cache accessible: {len(tick_cache)} ticks")
|
||||
|
||||
# Check if we can get 1-second bars
|
||||
one_second_bars = dashboard.get_one_second_bars()
|
||||
print(f"✓ 1-second bars accessible: {len(one_second_bars)} bars")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error accessing training data: {e}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("TRAINING INTEGRATION TEST COMPLETED")
|
||||
print("="*60)
|
||||
|
||||
# Summary
|
||||
print("\n📋 SUMMARY:")
|
||||
print(f"✓ Dashboard with training integration: WORKING")
|
||||
print(f"✓ Training data preparation: WORKING")
|
||||
print(f"✓ CNN data formatting: WORKING")
|
||||
print(f"✓ RL data formatting: WORKING")
|
||||
print(f"✓ Training metrics display: WORKING")
|
||||
print(f"✓ Continuous training: ACTIVE")
|
||||
print(f"✓ Model status tracking: WORKING")
|
||||
print(f"✓ Training events logging: WORKING")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Training integration test failed: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = test_training_integration()
|
||||
if success:
|
||||
print("\n🎉 All training integration tests passed!")
|
||||
else:
|
||||
print("\n❌ Some training integration tests failed!")
|
||||
sys.exit(1)
|
||||
@@ -1,262 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Universal Data Format Compliance
|
||||
|
||||
This script verifies that our enhanced trading system properly feeds
|
||||
the 5 required timeseries streams to all models:
|
||||
- ETH/USDT: ticks (1s), 1m, 1h, 1d
|
||||
- BTC/USDT: ticks (1s) as reference
|
||||
|
||||
This is our universal trading system input format.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from core.config import get_config
|
||||
from core.data_provider import DataProvider
|
||||
from core.universal_data_adapter import UniversalDataAdapter, UniversalDataStream
|
||||
from core.enhanced_orchestrator import EnhancedTradingOrchestrator
|
||||
from training.enhanced_cnn_trainer import EnhancedCNNTrainer
|
||||
from training.enhanced_rl_trainer import EnhancedRLTrainer
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def test_universal_data_format():
|
||||
"""Test that all components properly use the universal 5-timeseries format"""
|
||||
logger.info("="*80)
|
||||
logger.info("🧪 TESTING UNIVERSAL DATA FORMAT COMPLIANCE")
|
||||
logger.info("="*80)
|
||||
|
||||
try:
|
||||
# Initialize components
|
||||
config = get_config()
|
||||
data_provider = DataProvider(config)
|
||||
|
||||
# Test 1: Universal Data Adapter
|
||||
logger.info("\n📊 TEST 1: Universal Data Adapter")
|
||||
logger.info("-" * 40)
|
||||
|
||||
adapter = UniversalDataAdapter(data_provider)
|
||||
universal_stream = adapter.get_universal_data_stream()
|
||||
|
||||
if universal_stream is None:
|
||||
logger.error("❌ Failed to get universal data stream")
|
||||
return False
|
||||
|
||||
# Validate format
|
||||
is_valid, issues = adapter.validate_universal_format(universal_stream)
|
||||
if not is_valid:
|
||||
logger.error(f"❌ Universal format validation failed: {issues}")
|
||||
return False
|
||||
|
||||
logger.info("✅ Universal Data Adapter: PASSED")
|
||||
logger.info(f" ETH ticks: {len(universal_stream.eth_ticks)} samples")
|
||||
logger.info(f" ETH 1m: {len(universal_stream.eth_1m)} candles")
|
||||
logger.info(f" ETH 1h: {len(universal_stream.eth_1h)} candles")
|
||||
logger.info(f" ETH 1d: {len(universal_stream.eth_1d)} candles")
|
||||
logger.info(f" BTC reference: {len(universal_stream.btc_ticks)} samples")
|
||||
logger.info(f" Data quality: {universal_stream.metadata['data_quality']['overall_score']:.2f}")
|
||||
|
||||
# Test 2: Enhanced Orchestrator
|
||||
logger.info("\n🎯 TEST 2: Enhanced Orchestrator")
|
||||
logger.info("-" * 40)
|
||||
|
||||
orchestrator = EnhancedTradingOrchestrator(data_provider)
|
||||
|
||||
# Test that orchestrator uses universal adapter
|
||||
if not hasattr(orchestrator, 'universal_adapter'):
|
||||
logger.error("❌ Orchestrator missing universal_adapter")
|
||||
return False
|
||||
|
||||
# Test coordinated decisions
|
||||
decisions = await orchestrator.make_coordinated_decisions()
|
||||
|
||||
logger.info("✅ Enhanced Orchestrator: PASSED")
|
||||
logger.info(f" Generated {len(decisions)} decisions")
|
||||
logger.info(f" Universal adapter: {type(orchestrator.universal_adapter).__name__}")
|
||||
|
||||
for symbol, decision in decisions.items():
|
||||
if decision:
|
||||
logger.info(f" {symbol}: {decision.action} (confidence: {decision.confidence:.2f})")
|
||||
|
||||
# Test 3: CNN Model Data Format
|
||||
logger.info("\n🧠 TEST 3: CNN Model Data Format")
|
||||
logger.info("-" * 40)
|
||||
|
||||
# Format data for CNN
|
||||
cnn_data = adapter.format_for_model(universal_stream, 'cnn')
|
||||
|
||||
required_cnn_keys = ['eth_ticks', 'eth_1m', 'eth_1h', 'eth_1d', 'btc_ticks']
|
||||
missing_keys = [key for key in required_cnn_keys if key not in cnn_data]
|
||||
|
||||
if missing_keys:
|
||||
logger.error(f"❌ CNN data missing keys: {missing_keys}")
|
||||
return False
|
||||
|
||||
logger.info("✅ CNN Model Data Format: PASSED")
|
||||
for key, data in cnn_data.items():
|
||||
if isinstance(data, np.ndarray):
|
||||
logger.info(f" {key}: shape {data.shape}")
|
||||
else:
|
||||
logger.info(f" {key}: {type(data)}")
|
||||
|
||||
# Test 4: RL Model Data Format
|
||||
logger.info("\n🤖 TEST 4: RL Model Data Format")
|
||||
logger.info("-" * 40)
|
||||
|
||||
# Format data for RL
|
||||
rl_data = adapter.format_for_model(universal_stream, 'rl')
|
||||
|
||||
if 'state_vector' not in rl_data:
|
||||
logger.error("❌ RL data missing state_vector")
|
||||
return False
|
||||
|
||||
state_vector = rl_data['state_vector']
|
||||
if not isinstance(state_vector, np.ndarray):
|
||||
logger.error("❌ RL state_vector is not numpy array")
|
||||
return False
|
||||
|
||||
logger.info("✅ RL Model Data Format: PASSED")
|
||||
logger.info(f" State vector shape: {state_vector.shape}")
|
||||
logger.info(f" State vector size: {len(state_vector)} features")
|
||||
|
||||
# Test 5: CNN Trainer Integration
|
||||
logger.info("\n🎓 TEST 5: CNN Trainer Integration")
|
||||
logger.info("-" * 40)
|
||||
|
||||
try:
|
||||
cnn_trainer = EnhancedCNNTrainer(config, orchestrator)
|
||||
logger.info("✅ CNN Trainer Integration: PASSED")
|
||||
logger.info(f" Model timeframes: {cnn_trainer.model.timeframes}")
|
||||
logger.info(f" Model device: {cnn_trainer.model.device}")
|
||||
except Exception as e:
|
||||
logger.error(f"❌ CNN Trainer Integration failed: {e}")
|
||||
return False
|
||||
|
||||
# Test 6: RL Trainer Integration
|
||||
logger.info("\n🎮 TEST 6: RL Trainer Integration")
|
||||
logger.info("-" * 40)
|
||||
|
||||
try:
|
||||
rl_trainer = EnhancedRLTrainer(config, orchestrator)
|
||||
logger.info("✅ RL Trainer Integration: PASSED")
|
||||
logger.info(f" RL agents: {len(rl_trainer.agents)}")
|
||||
for symbol, agent in rl_trainer.agents.items():
|
||||
logger.info(f" {symbol} agent: {type(agent).__name__}")
|
||||
except Exception as e:
|
||||
logger.error(f"❌ RL Trainer Integration failed: {e}")
|
||||
return False
|
||||
|
||||
# Test 7: Data Flow Verification
|
||||
logger.info("\n🔄 TEST 7: Data Flow Verification")
|
||||
logger.info("-" * 40)
|
||||
|
||||
# Verify that models receive the correct data format
|
||||
test_predictions = await orchestrator._get_enhanced_predictions_universal(
|
||||
'ETH/USDT',
|
||||
list(orchestrator.market_states['ETH/USDT'])[-1] if orchestrator.market_states['ETH/USDT'] else None,
|
||||
universal_stream
|
||||
)
|
||||
|
||||
if test_predictions:
|
||||
logger.info("✅ Data Flow Verification: PASSED")
|
||||
for pred in test_predictions:
|
||||
logger.info(f" Model: {pred.model_name}")
|
||||
logger.info(f" Action: {pred.overall_action}")
|
||||
logger.info(f" Confidence: {pred.overall_confidence:.2f}")
|
||||
logger.info(f" Timeframes: {len(pred.timeframe_predictions)}")
|
||||
else:
|
||||
logger.warning("⚠️ No predictions generated (may be normal if no models loaded)")
|
||||
|
||||
# Test 8: Configuration Compliance
|
||||
logger.info("\n⚙️ TEST 8: Configuration Compliance")
|
||||
logger.info("-" * 40)
|
||||
|
||||
# Check that config matches universal format
|
||||
expected_symbols = ['ETH/USDT', 'BTC/USDT']
|
||||
expected_timeframes = ['1s', '1m', '1h', '1d']
|
||||
|
||||
config_symbols = config.symbols
|
||||
config_timeframes = config.timeframes
|
||||
|
||||
symbols_match = all(symbol in config_symbols for symbol in expected_symbols)
|
||||
timeframes_match = all(tf in config_timeframes for tf in expected_timeframes)
|
||||
|
||||
if not symbols_match:
|
||||
logger.warning(f"⚠️ Config symbols may not match universal format")
|
||||
logger.warning(f" Expected: {expected_symbols}")
|
||||
logger.warning(f" Config: {config_symbols}")
|
||||
|
||||
if not timeframes_match:
|
||||
logger.warning(f"⚠️ Config timeframes may not match universal format")
|
||||
logger.warning(f" Expected: {expected_timeframes}")
|
||||
logger.warning(f" Config: {config_timeframes}")
|
||||
|
||||
if symbols_match and timeframes_match:
|
||||
logger.info("✅ Configuration Compliance: PASSED")
|
||||
else:
|
||||
logger.info("⚠️ Configuration Compliance: PARTIAL")
|
||||
|
||||
logger.info(f" Symbols: {config_symbols}")
|
||||
logger.info(f" Timeframes: {config_timeframes}")
|
||||
|
||||
# Final Summary
|
||||
logger.info("\n" + "="*80)
|
||||
logger.info("🎉 UNIVERSAL DATA FORMAT TEST SUMMARY")
|
||||
logger.info("="*80)
|
||||
logger.info("✅ All core tests PASSED!")
|
||||
logger.info("")
|
||||
logger.info("📋 VERIFIED COMPLIANCE:")
|
||||
logger.info(" ✓ Universal Data Adapter working")
|
||||
logger.info(" ✓ Enhanced Orchestrator using universal format")
|
||||
logger.info(" ✓ CNN models receive 5 timeseries streams")
|
||||
logger.info(" ✓ RL models receive combined state vector")
|
||||
logger.info(" ✓ Trainers properly integrated")
|
||||
logger.info(" ✓ Data flow verified")
|
||||
logger.info("")
|
||||
logger.info("🎯 UNIVERSAL FORMAT ACTIVE:")
|
||||
logger.info(" 1. ETH/USDT ticks (1s) ✓")
|
||||
logger.info(" 2. ETH/USDT 1m ✓")
|
||||
logger.info(" 3. ETH/USDT 1h ✓")
|
||||
logger.info(" 4. ETH/USDT 1d ✓")
|
||||
logger.info(" 5. BTC/USDT reference ticks ✓")
|
||||
logger.info("")
|
||||
logger.info("🚀 Your enhanced trading system is ready with universal data format!")
|
||||
logger.info("="*80)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Universal data format test failed: {e}")
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
return False
|
||||
|
||||
async def main():
|
||||
"""Main test function"""
|
||||
logger.info("🚀 Starting Universal Data Format Compliance Test...")
|
||||
|
||||
success = await test_universal_data_format()
|
||||
|
||||
if success:
|
||||
logger.info("\n🎉 All tests passed! Universal data format is properly implemented.")
|
||||
logger.info("Your enhanced trading system respects the 5-timeseries input format.")
|
||||
else:
|
||||
logger.error("\n💥 Tests failed! Please check the universal data format implementation.")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,177 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Universal Data Stream Integration with Dashboard
|
||||
|
||||
This script validates that:
|
||||
1. CleanTradingDashboard properly subscribes to UnifiedDataStream
|
||||
2. All 5 timeseries are properly received and processed
|
||||
3. Data flows correctly from provider -> adapter -> stream -> dashboard
|
||||
4. Consumer callback functions work as expected
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from core.config import get_config
|
||||
from core.data_provider import DataProvider
|
||||
from core.enhanced_orchestrator import EnhancedTradingOrchestrator
|
||||
from core.trading_executor import TradingExecutor
|
||||
from web.clean_dashboard import CleanTradingDashboard
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def test_universal_stream_integration():
|
||||
"""Test Universal Data Stream integration with dashboard"""
|
||||
logger.info("="*80)
|
||||
logger.info("🧪 TESTING UNIVERSAL DATA STREAM INTEGRATION")
|
||||
logger.info("="*80)
|
||||
|
||||
try:
|
||||
# Initialize components
|
||||
logger.info("\n📦 STEP 1: Initialize Components")
|
||||
logger.info("-" * 40)
|
||||
|
||||
config = get_config()
|
||||
data_provider = DataProvider()
|
||||
orchestrator = EnhancedTradingOrchestrator(
|
||||
data_provider=data_provider,
|
||||
symbols=['ETH/USDT', 'BTC/USDT'],
|
||||
enhanced_rl_training=True
|
||||
)
|
||||
trading_executor = TradingExecutor()
|
||||
|
||||
logger.info("✅ Core components initialized")
|
||||
|
||||
# Initialize dashboard with Universal Data Stream
|
||||
logger.info("\n📊 STEP 2: Initialize Dashboard with Universal Stream")
|
||||
logger.info("-" * 40)
|
||||
|
||||
dashboard = CleanTradingDashboard(
|
||||
data_provider=data_provider,
|
||||
orchestrator=orchestrator,
|
||||
trading_executor=trading_executor
|
||||
)
|
||||
|
||||
# Check Universal Stream initialization
|
||||
if hasattr(dashboard, 'unified_stream') and dashboard.unified_stream:
|
||||
logger.info("✅ Universal Data Stream initialized successfully")
|
||||
logger.info(f"📋 Consumer ID: {dashboard.stream_consumer_id}")
|
||||
else:
|
||||
logger.error("❌ Universal Data Stream not initialized")
|
||||
return False
|
||||
|
||||
# Test consumer registration
|
||||
logger.info("\n🔗 STEP 3: Validate Consumer Registration")
|
||||
logger.info("-" * 40)
|
||||
|
||||
stream_stats = dashboard.unified_stream.get_stream_stats()
|
||||
logger.info(f"📊 Stream Stats: {stream_stats}")
|
||||
|
||||
if stream_stats['total_consumers'] > 0:
|
||||
logger.info(f"✅ {stream_stats['total_consumers']} consumers registered")
|
||||
else:
|
||||
logger.warning("⚠️ No consumers registered")
|
||||
|
||||
# Test data callback
|
||||
logger.info("\n📡 STEP 4: Test Data Callback")
|
||||
logger.info("-" * 40)
|
||||
|
||||
# Create test data packet
|
||||
test_data = {
|
||||
'timestamp': time.time(),
|
||||
'consumer_id': dashboard.stream_consumer_id,
|
||||
'consumer_name': 'CleanTradingDashboard',
|
||||
'ticks': [
|
||||
{'symbol': 'ETHUSDT', 'price': 3000.0, 'volume': 1.5, 'timestamp': time.time()},
|
||||
{'symbol': 'ETHUSDT', 'price': 3001.0, 'volume': 2.0, 'timestamp': time.time()},
|
||||
],
|
||||
'ohlcv': {'one_second_bars': [], 'multi_timeframe': {
|
||||
'ETH/USDT': {
|
||||
'1s': [{'timestamp': time.time(), 'open': 3000, 'high': 3002, 'low': 2999, 'close': 3001, 'volume': 10}],
|
||||
'1m': [{'timestamp': time.time(), 'open': 2990, 'high': 3010, 'low': 2985, 'close': 3001, 'volume': 100}],
|
||||
'1h': [{'timestamp': time.time(), 'open': 2900, 'high': 3050, 'low': 2880, 'close': 3001, 'volume': 1000}],
|
||||
'1d': [{'timestamp': time.time(), 'open': 2800, 'high': 3200, 'low': 2750, 'close': 3001, 'volume': 10000}]
|
||||
},
|
||||
'BTC/USDT': {
|
||||
'1s': [{'timestamp': time.time(), 'open': 65000, 'high': 65020, 'low': 64980, 'close': 65010, 'volume': 0.5}]
|
||||
}
|
||||
}},
|
||||
'training_data': {'market_state': 'test', 'features': []},
|
||||
'ui_data': {'formatted_data': 'test_ui_data'}
|
||||
}
|
||||
|
||||
# Test callback manually
|
||||
try:
|
||||
dashboard._handle_unified_stream_data(test_data)
|
||||
logger.info("✅ Data callback executed successfully")
|
||||
|
||||
# Check if data was processed
|
||||
if hasattr(dashboard, 'current_prices') and 'ETH/USDT' in dashboard.current_prices:
|
||||
logger.info(f"✅ Price updated: ETH/USDT = ${dashboard.current_prices['ETH/USDT']}")
|
||||
else:
|
||||
logger.warning("⚠️ Prices not updated in dashboard")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Data callback failed: {e}")
|
||||
return False
|
||||
|
||||
# Test Universal Data Adapter
|
||||
logger.info("\n🔄 STEP 5: Test Universal Data Adapter")
|
||||
logger.info("-" * 40)
|
||||
|
||||
if hasattr(orchestrator, 'universal_adapter'):
|
||||
universal_stream = orchestrator.universal_adapter.get_universal_data_stream()
|
||||
if universal_stream:
|
||||
logger.info("✅ Universal Data Adapter working")
|
||||
logger.info(f"📊 ETH ticks: {len(universal_stream.eth_ticks)} samples")
|
||||
logger.info(f"📊 ETH 1m: {len(universal_stream.eth_1m)} candles")
|
||||
logger.info(f"📊 ETH 1h: {len(universal_stream.eth_1h)} candles")
|
||||
logger.info(f"📊 ETH 1d: {len(universal_stream.eth_1d)} candles")
|
||||
logger.info(f"📊 BTC ticks: {len(universal_stream.btc_ticks)} samples")
|
||||
|
||||
# Validate format
|
||||
is_valid, issues = orchestrator.universal_adapter.validate_universal_format(universal_stream)
|
||||
if is_valid:
|
||||
logger.info("✅ Universal format validation passed")
|
||||
else:
|
||||
logger.warning(f"⚠️ Format issues: {issues}")
|
||||
else:
|
||||
logger.error("❌ Universal Data Adapter failed to get stream")
|
||||
return False
|
||||
else:
|
||||
logger.error("❌ Universal Data Adapter not found in orchestrator")
|
||||
return False
|
||||
|
||||
# Summary
|
||||
logger.info("\n🎯 SUMMARY")
|
||||
logger.info("-" * 40)
|
||||
logger.info("✅ Universal Data Stream properly integrated")
|
||||
logger.info("✅ Dashboard subscribes as consumer")
|
||||
logger.info("✅ All 5 timeseries format validated")
|
||||
logger.info("✅ Data callback processing works")
|
||||
logger.info("✅ Universal Data Adapter functional")
|
||||
|
||||
logger.info("\n🏆 INTEGRATION TEST PASSED")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Integration test failed: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = asyncio.run(test_universal_stream_integration())
|
||||
sys.exit(0 if success else 1)
|
||||
Reference in New Issue
Block a user