221 lines
9.2 KiB
Python
221 lines
9.2 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test COB Data Integration
|
|
|
|
This script tests that COB data is properly flowing through to BaseDataInput
|
|
and being used in the CNN model predictions.
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
import time
|
|
import logging
|
|
from datetime import datetime
|
|
|
|
# Add project root to path
|
|
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
from core.orchestrator import TradingOrchestrator
|
|
from core.config import get_config
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
def test_cob_data_flow():
|
|
"""Test that COB data flows through to BaseDataInput"""
|
|
|
|
logger.info("=== Testing COB Data Integration ===")
|
|
|
|
try:
|
|
# Initialize orchestrator
|
|
config = get_config()
|
|
orchestrator = TradingOrchestrator(
|
|
symbol="ETH/USDT",
|
|
config=config
|
|
)
|
|
|
|
logger.info("✅ Orchestrator initialized")
|
|
|
|
# Check if COB integration is available
|
|
if orchestrator.cob_integration:
|
|
logger.info("✅ COB integration is available")
|
|
else:
|
|
logger.warning("⚠️ COB integration is not available")
|
|
|
|
# Wait a bit for COB data to potentially arrive
|
|
logger.info("Waiting for COB data...")
|
|
time.sleep(5)
|
|
|
|
# Test building BaseDataInput
|
|
symbol = "ETH/USDT"
|
|
base_data = orchestrator.build_base_data_input(symbol)
|
|
|
|
if base_data:
|
|
logger.info("✅ BaseDataInput created successfully")
|
|
|
|
# Check if COB data is present
|
|
if base_data.cob_data:
|
|
logger.info("✅ COB data is present in BaseDataInput")
|
|
logger.info(f" COB current price: {base_data.cob_data.current_price}")
|
|
logger.info(f" COB bucket size: {base_data.cob_data.bucket_size}")
|
|
logger.info(f" COB price buckets: {len(base_data.cob_data.price_buckets)} buckets")
|
|
logger.info(f" COB bid/ask imbalance: {len(base_data.cob_data.bid_ask_imbalance)} entries")
|
|
|
|
# Test feature vector generation
|
|
features = base_data.get_feature_vector()
|
|
logger.info(f"✅ Feature vector generated: {len(features)} features")
|
|
|
|
# Check if COB features are non-zero (indicating real data)
|
|
# COB features are at positions 7500-7700 (after OHLCV and BTC data)
|
|
cob_features = features[7500:7700] # 200 COB features
|
|
non_zero_cob = sum(1 for f in cob_features if f != 0.0)
|
|
|
|
if non_zero_cob > 0:
|
|
logger.info(f"✅ COB features contain real data: {non_zero_cob}/200 non-zero features")
|
|
else:
|
|
logger.warning("⚠️ COB features are all zeros (no real COB data)")
|
|
|
|
else:
|
|
logger.warning("⚠️ COB data is None in BaseDataInput")
|
|
|
|
# Check if there's COB data in the cache
|
|
if hasattr(orchestrator, 'data_integration'):
|
|
cached_cob = orchestrator.data_integration.cache.get('cob_data', symbol)
|
|
if cached_cob:
|
|
logger.info("✅ COB data found in cache but not in BaseDataInput")
|
|
else:
|
|
logger.warning("⚠️ No COB data in cache either")
|
|
|
|
# Test CNN prediction with the BaseDataInput
|
|
if orchestrator.cnn_adapter:
|
|
logger.info("Testing CNN prediction with BaseDataInput...")
|
|
try:
|
|
prediction = orchestrator.cnn_adapter.predict(base_data)
|
|
if prediction:
|
|
logger.info("✅ CNN prediction successful")
|
|
logger.info(f" Action: {prediction.predictions['action']}")
|
|
logger.info(f" Confidence: {prediction.confidence:.3f}")
|
|
logger.info(f" Pivot price: {prediction.predictions.get('pivot_price', 'N/A')}")
|
|
else:
|
|
logger.warning("⚠️ CNN prediction returned None")
|
|
except Exception as e:
|
|
logger.error(f"❌ CNN prediction failed: {e}")
|
|
else:
|
|
logger.warning("⚠️ CNN adapter not available")
|
|
else:
|
|
logger.error("❌ Failed to create BaseDataInput")
|
|
|
|
# Check orchestrator's latest COB data
|
|
if hasattr(orchestrator, 'latest_cob_data') and orchestrator.latest_cob_data:
|
|
logger.info(f"✅ Orchestrator has COB data for symbols: {list(orchestrator.latest_cob_data.keys())}")
|
|
for sym, cob_data in orchestrator.latest_cob_data.items():
|
|
logger.info(f" {sym}: {len(cob_data)} COB data fields")
|
|
else:
|
|
logger.warning("⚠️ No COB data in orchestrator.latest_cob_data")
|
|
|
|
return base_data is not None and (base_data.cob_data is not None if base_data else False)
|
|
|
|
except Exception as e:
|
|
logger.error(f"❌ Test failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return False
|
|
|
|
def test_cob_cache_updates():
|
|
"""Test that COB data updates are properly cached"""
|
|
|
|
logger.info("=== Testing COB Cache Updates ===")
|
|
|
|
try:
|
|
# Initialize orchestrator
|
|
config = get_config()
|
|
orchestrator = TradingOrchestrator(
|
|
symbol="ETH/USDT",
|
|
config=config
|
|
)
|
|
|
|
# Check initial cache state
|
|
symbol = "ETH/USDT"
|
|
initial_cob = orchestrator.data_integration.cache.get('cob_data', symbol)
|
|
logger.info(f"Initial COB data in cache: {initial_cob is not None}")
|
|
|
|
# Simulate COB data update
|
|
from core.data_models import COBData
|
|
|
|
mock_cob_data = {
|
|
'current_price': 3000.0,
|
|
'price_buckets': {
|
|
2999.0: {'bid_volume': 100.0, 'ask_volume': 80.0, 'total_volume': 180.0, 'imbalance': 0.11},
|
|
3000.0: {'bid_volume': 150.0, 'ask_volume': 120.0, 'total_volume': 270.0, 'imbalance': 0.11},
|
|
3001.0: {'bid_volume': 90.0, 'ask_volume': 110.0, 'total_volume': 200.0, 'imbalance': -0.10}
|
|
},
|
|
'bid_ask_imbalance': {2999.0: 0.11, 3000.0: 0.11, 3001.0: -0.10},
|
|
'volume_weighted_prices': {2999.0: 2999.5, 3000.0: 3000.2, 3001.0: 3000.8},
|
|
'order_flow_metrics': {'total_volume': 650.0, 'avg_imbalance': 0.04},
|
|
'ma_1s_imbalance': {3000.0: 0.05},
|
|
'ma_5s_imbalance': {3000.0: 0.03}
|
|
}
|
|
|
|
# Trigger COB data update through callback
|
|
logger.info("Simulating COB data update...")
|
|
orchestrator._on_cob_dashboard_data(symbol, mock_cob_data)
|
|
|
|
# Check if cache was updated
|
|
updated_cob = orchestrator.data_integration.cache.get('cob_data', symbol)
|
|
if updated_cob:
|
|
logger.info("✅ COB data successfully updated in cache")
|
|
logger.info(f" Current price: {updated_cob.current_price}")
|
|
logger.info(f" Price buckets: {len(updated_cob.price_buckets)}")
|
|
else:
|
|
logger.warning("⚠️ COB data not found in cache after update")
|
|
|
|
# Test BaseDataInput with updated COB data
|
|
base_data = orchestrator.build_base_data_input(symbol)
|
|
if base_data and base_data.cob_data:
|
|
logger.info("✅ BaseDataInput now contains COB data")
|
|
|
|
# Test feature vector with real COB data
|
|
features = base_data.get_feature_vector()
|
|
cob_features = features[7500:7700] # 200 COB features
|
|
non_zero_cob = sum(1 for f in cob_features if f != 0.0)
|
|
logger.info(f"✅ COB features with real data: {non_zero_cob}/200 non-zero")
|
|
else:
|
|
logger.warning("⚠️ BaseDataInput still doesn't have COB data")
|
|
|
|
return updated_cob is not None
|
|
|
|
except Exception as e:
|
|
logger.error(f"❌ Cache update test failed: {e}")
|
|
return False
|
|
|
|
def main():
|
|
"""Run all COB integration tests"""
|
|
|
|
logger.info("Starting COB Data Integration Tests")
|
|
|
|
# Test 1: COB data flow
|
|
test1_passed = test_cob_data_flow()
|
|
|
|
# Test 2: COB cache updates
|
|
test2_passed = test_cob_cache_updates()
|
|
|
|
# Summary
|
|
logger.info("=== Test Summary ===")
|
|
logger.info(f"COB Data Flow: {'✅ PASSED' if test1_passed else '❌ FAILED'}")
|
|
logger.info(f"COB Cache Updates: {'✅ PASSED' if test2_passed else '❌ FAILED'}")
|
|
|
|
if test1_passed and test2_passed:
|
|
logger.info("🎉 All tests passed! COB data integration is working.")
|
|
logger.info("The system now:")
|
|
logger.info(" - Properly integrates COB data into BaseDataInput")
|
|
logger.info(" - Updates cache when COB data arrives")
|
|
logger.info(" - Includes COB features in CNN model input")
|
|
else:
|
|
logger.error("❌ Some tests failed. COB integration needs attention.")
|
|
if not test1_passed:
|
|
logger.error(" - COB data is not flowing to BaseDataInput")
|
|
if not test2_passed:
|
|
logger.error(" - COB cache updates are not working")
|
|
|
|
if __name__ == "__main__":
|
|
main() |