508 lines
20 KiB
Python
508 lines
20 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Enhanced Extrema Training Test Suite
|
|
|
|
Tests the complete extrema training system including:
|
|
1. 200-candle 1m context data loading
|
|
2. Local extrema detection (bottoms and tops)
|
|
3. Training on not-so-perfect opportunities
|
|
4. Dashboard integration with extrema information
|
|
5. Reusable functionality across different dashboards
|
|
|
|
This test suite verifies all components work together correctly.
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
import asyncio
|
|
import logging
|
|
import numpy as np
|
|
import pandas as pd
|
|
from datetime import datetime, timedelta
|
|
from typing import Dict, List, Any
|
|
import time
|
|
|
|
# Add project root to path
|
|
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
# Configure logging
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
def test_extrema_trainer_initialization():
|
|
"""Test 1: Extrema trainer initialization and basic functionality"""
|
|
print("\n" + "="*60)
|
|
print("TEST 1: Extrema Trainer Initialization")
|
|
print("="*60)
|
|
|
|
try:
|
|
from core.extrema_trainer import ExtremaTrainer
|
|
from core.data_provider import DataProvider
|
|
|
|
# Initialize components
|
|
data_provider = DataProvider()
|
|
symbols = ['ETHUSDT', 'BTCUSDT']
|
|
|
|
# Create extrema trainer
|
|
extrema_trainer = ExtremaTrainer(
|
|
data_provider=data_provider,
|
|
symbols=symbols,
|
|
window_size=10
|
|
)
|
|
|
|
# Verify initialization
|
|
assert extrema_trainer.symbols == symbols
|
|
assert extrema_trainer.window_size == 10
|
|
assert len(extrema_trainer.detected_extrema) == len(symbols)
|
|
assert len(extrema_trainer.context_data) == len(symbols)
|
|
|
|
print("✅ Extrema trainer initialized successfully")
|
|
print(f" - Symbols: {symbols}")
|
|
print(f" - Window size: {extrema_trainer.window_size}")
|
|
print(f" - Context data containers: {len(extrema_trainer.context_data)}")
|
|
print(f" - Extrema containers: {len(extrema_trainer.detected_extrema)}")
|
|
|
|
return True, extrema_trainer
|
|
|
|
except Exception as e:
|
|
print(f"❌ Extrema trainer initialization failed: {e}")
|
|
return False, None
|
|
|
|
def test_context_data_loading(extrema_trainer):
|
|
"""Test 2: 200-candle 1m context data loading"""
|
|
print("\n" + "="*60)
|
|
print("TEST 2: 200-Candle 1m Context Data Loading")
|
|
print("="*60)
|
|
|
|
try:
|
|
# Initialize context data
|
|
start_time = time.time()
|
|
results = extrema_trainer.initialize_context_data()
|
|
load_time = time.time() - start_time
|
|
|
|
# Verify results
|
|
successful_loads = sum(1 for success in results.values() if success)
|
|
total_symbols = len(extrema_trainer.symbols)
|
|
|
|
print(f"✅ Context data loading completed in {load_time:.2f} seconds")
|
|
print(f" - Success rate: {successful_loads}/{total_symbols} symbols")
|
|
|
|
# Check context data details
|
|
for symbol in extrema_trainer.symbols:
|
|
context = extrema_trainer.context_data[symbol]
|
|
candles_loaded = len(context.candles)
|
|
features_available = context.features is not None
|
|
|
|
print(f" - {symbol}: {candles_loaded} candles, features: {'✅' if features_available else '❌'}")
|
|
|
|
if features_available:
|
|
print(f" Features shape: {context.features.shape}")
|
|
|
|
# Test context feature retrieval
|
|
for symbol in extrema_trainer.symbols:
|
|
features = extrema_trainer.get_context_features_for_model(symbol)
|
|
if features is not None:
|
|
print(f" - {symbol} model features: {features.shape}")
|
|
else:
|
|
print(f" - {symbol} model features: Not available")
|
|
|
|
return successful_loads > 0
|
|
|
|
except Exception as e:
|
|
print(f"❌ Context data loading failed: {e}")
|
|
return False
|
|
|
|
def test_extrema_detection(extrema_trainer):
|
|
"""Test 3: Local extrema detection (bottoms and tops)"""
|
|
print("\n" + "="*60)
|
|
print("TEST 3: Local Extrema Detection")
|
|
print("="*60)
|
|
|
|
try:
|
|
# Run batch extrema detection
|
|
start_time = time.time()
|
|
detection_results = extrema_trainer.run_batch_detection()
|
|
detection_time = time.time() - start_time
|
|
|
|
# Analyze results
|
|
total_extrema = sum(len(extrema_list) for extrema_list in detection_results.values())
|
|
|
|
print(f"✅ Extrema detection completed in {detection_time:.2f} seconds")
|
|
print(f" - Total extrema detected: {total_extrema}")
|
|
|
|
# Detailed breakdown by symbol
|
|
for symbol, extrema_list in detection_results.items():
|
|
if extrema_list:
|
|
bottoms = len([e for e in extrema_list if e.extrema_type == 'bottom'])
|
|
tops = len([e for e in extrema_list if e.extrema_type == 'top'])
|
|
avg_confidence = np.mean([e.confidence for e in extrema_list])
|
|
|
|
print(f" - {symbol}: {len(extrema_list)} extrema (bottoms: {bottoms}, tops: {tops})")
|
|
print(f" Average confidence: {avg_confidence:.3f}")
|
|
|
|
# Show recent extrema details
|
|
for extrema in extrema_list[-2:]: # Last 2 extrema
|
|
print(f" {extrema.extrema_type.upper()} @ ${extrema.price:.2f} "
|
|
f"(confidence: {extrema.confidence:.3f}, action: {extrema.optimal_action})")
|
|
|
|
# Test perfect moves for CNN
|
|
perfect_moves = extrema_trainer.get_perfect_moves_for_cnn(count=20)
|
|
print(f" - Perfect moves for CNN training: {len(perfect_moves)}")
|
|
|
|
if perfect_moves:
|
|
for move in perfect_moves[:3]: # Show first 3
|
|
print(f" {move['optimal_action']} {move['symbol']} @ {move['timestamp'].strftime('%H:%M:%S')} "
|
|
f"(outcome: {move['actual_outcome']:.3f}, confidence: {move['confidence_should_have_been']:.3f})")
|
|
|
|
return total_extrema > 0
|
|
|
|
except Exception as e:
|
|
print(f"❌ Extrema detection failed: {e}")
|
|
return False
|
|
|
|
def test_context_data_updates(extrema_trainer):
|
|
"""Test 4: Context data updates and continuous extrema detection"""
|
|
print("\n" + "="*60)
|
|
print("TEST 4: Context Data Updates and Continuous Detection")
|
|
print("="*60)
|
|
|
|
try:
|
|
# Test single symbol update
|
|
symbol = extrema_trainer.symbols[0]
|
|
|
|
print(f"Testing context update for {symbol}...")
|
|
start_time = time.time()
|
|
update_results = extrema_trainer.update_context_data(symbol)
|
|
update_time = time.time() - start_time
|
|
|
|
print(f"✅ Context update completed in {update_time:.2f} seconds")
|
|
print(f" - Update result for {symbol}: {'✅' if update_results.get(symbol, False) else '❌'}")
|
|
|
|
# Test all symbols update
|
|
print("Testing context update for all symbols...")
|
|
start_time = time.time()
|
|
all_update_results = extrema_trainer.update_context_data()
|
|
all_update_time = time.time() - start_time
|
|
|
|
successful_updates = sum(1 for success in all_update_results.values() if success)
|
|
|
|
print(f"✅ All symbols update completed in {all_update_time:.2f} seconds")
|
|
print(f" - Success rate: {successful_updates}/{len(extrema_trainer.symbols)} symbols")
|
|
|
|
# Check for new extrema after updates
|
|
new_extrema = extrema_trainer.run_batch_detection()
|
|
new_total = sum(len(extrema_list) for extrema_list in new_extrema.values())
|
|
|
|
print(f" - New extrema detected after update: {new_total}")
|
|
|
|
return successful_updates > 0
|
|
|
|
except Exception as e:
|
|
print(f"❌ Context data updates failed: {e}")
|
|
return False
|
|
|
|
def test_extrema_stats_and_training_data(extrema_trainer):
|
|
"""Test 5: Extrema statistics and training data retrieval"""
|
|
print("\n" + "="*60)
|
|
print("TEST 5: Extrema Statistics and Training Data")
|
|
print("="*60)
|
|
|
|
try:
|
|
# Get comprehensive stats
|
|
stats = extrema_trainer.get_extrema_stats()
|
|
|
|
print("✅ Extrema statistics retrieved successfully")
|
|
print(f" - Total extrema detected: {stats.get('total_extrema_detected', 0)}")
|
|
print(f" - Training queue size: {stats.get('training_queue_size', 0)}")
|
|
print(f" - Window size: {stats.get('window_size', 0)}")
|
|
|
|
# Confidence thresholds
|
|
thresholds = stats.get('confidence_thresholds', {})
|
|
print(f" - Confidence thresholds: min={thresholds.get('min', 0):.2f}, max={thresholds.get('max', 0):.2f}")
|
|
|
|
# Context data status
|
|
context_status = stats.get('context_data_status', {})
|
|
for symbol, status in context_status.items():
|
|
candles = status.get('candles_loaded', 0)
|
|
features = status.get('features_available', False)
|
|
last_update = status.get('last_update', 'Unknown')
|
|
print(f" - {symbol}: {candles} candles, features: {'✅' if features else '❌'}, updated: {last_update}")
|
|
|
|
# Recent extrema breakdown
|
|
recent_extrema = stats.get('recent_extrema', {})
|
|
if recent_extrema:
|
|
print(f" - Recent extrema: {recent_extrema.get('bottoms', 0)} bottoms, {recent_extrema.get('tops', 0)} tops")
|
|
print(f" - Average confidence: {recent_extrema.get('avg_confidence', 0):.3f}")
|
|
print(f" - Average outcome: {recent_extrema.get('avg_outcome', 0):.3f}")
|
|
|
|
# Test training data retrieval
|
|
training_data = extrema_trainer.get_extrema_training_data(count=10, min_confidence=0.4)
|
|
print(f" - Training data (min confidence 0.4): {len(training_data)} cases")
|
|
|
|
if training_data:
|
|
high_confidence_cases = len([case for case in training_data if case.confidence > 0.7])
|
|
print(f" - High confidence cases (>0.7): {high_confidence_cases}")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Extrema statistics retrieval failed: {e}")
|
|
return False
|
|
|
|
def test_enhanced_orchestrator_integration():
|
|
"""Test 6: Enhanced orchestrator integration with extrema trainer"""
|
|
print("\n" + "="*60)
|
|
print("TEST 6: Enhanced Orchestrator Integration")
|
|
print("="*60)
|
|
|
|
try:
|
|
from core.enhanced_orchestrator import EnhancedTradingOrchestrator
|
|
from core.data_provider import DataProvider
|
|
|
|
# Initialize orchestrator (should include extrema trainer)
|
|
data_provider = DataProvider()
|
|
orchestrator = EnhancedTradingOrchestrator(data_provider)
|
|
|
|
# Verify extrema trainer integration
|
|
assert hasattr(orchestrator, 'extrema_trainer')
|
|
assert orchestrator.extrema_trainer is not None
|
|
|
|
print("✅ Enhanced orchestrator initialized with extrema trainer")
|
|
print(f" - Extrema trainer symbols: {orchestrator.extrema_trainer.symbols}")
|
|
|
|
# Test extrema stats retrieval through orchestrator
|
|
extrema_stats = orchestrator.get_extrema_stats()
|
|
print(f" - Extrema stats available: {'✅' if extrema_stats else '❌'}")
|
|
|
|
if extrema_stats:
|
|
print(f" - Total extrema: {extrema_stats.get('total_extrema_detected', 0)}")
|
|
print(f" - Training queue: {extrema_stats.get('training_queue_size', 0)}")
|
|
|
|
# Test context features retrieval
|
|
for symbol in orchestrator.symbols[:2]: # Test first 2 symbols
|
|
context_features = orchestrator.get_context_features_for_model(symbol)
|
|
if context_features is not None:
|
|
print(f" - {symbol} context features: {context_features.shape}")
|
|
else:
|
|
print(f" - {symbol} context features: Not available")
|
|
|
|
# Test perfect moves for CNN
|
|
perfect_moves = orchestrator.get_perfect_moves_for_cnn(count=10)
|
|
print(f" - Perfect moves for CNN: {len(perfect_moves)}")
|
|
|
|
return True, orchestrator
|
|
|
|
except Exception as e:
|
|
print(f"❌ Enhanced orchestrator integration failed: {e}")
|
|
return False, None
|
|
|
|
def test_dashboard_integration(orchestrator):
|
|
"""Test 7: Dashboard integration with extrema information"""
|
|
print("\n" + "="*60)
|
|
print("TEST 7: Dashboard Integration")
|
|
print("="*60)
|
|
|
|
try:
|
|
from web.old_archived.scalping_dashboard import RealTimeScalpingDashboard
|
|
|
|
# Initialize dashboard with enhanced orchestrator
|
|
dashboard = RealTimeScalpingDashboard(orchestrator=orchestrator)
|
|
|
|
print("✅ Dashboard initialized with enhanced orchestrator")
|
|
|
|
# Test sensitivity learning info (should include extrema stats)
|
|
sensitivity_info = dashboard._get_sensitivity_learning_info()
|
|
|
|
print("✅ Sensitivity learning info retrieved")
|
|
print(f" - Info structure: {list(sensitivity_info.keys())}")
|
|
|
|
# Check for extrema information
|
|
if 'extrema' in sensitivity_info:
|
|
extrema_info = sensitivity_info['extrema']
|
|
print(f" - Extrema info available: ✅")
|
|
print(f" - Total extrema detected: {extrema_info.get('total_extrema_detected', 0)}")
|
|
print(f" - Training queue size: {extrema_info.get('training_queue_size', 0)}")
|
|
|
|
recent_extrema = extrema_info.get('recent_extrema', {})
|
|
if recent_extrema:
|
|
print(f" - Recent bottoms: {recent_extrema.get('bottoms', 0)}")
|
|
print(f" - Recent tops: {recent_extrema.get('tops', 0)}")
|
|
print(f" - Average confidence: {recent_extrema.get('avg_confidence', 0):.3f}")
|
|
|
|
# Check for context data information
|
|
if 'context_data' in sensitivity_info:
|
|
context_info = sensitivity_info['context_data']
|
|
print(f" - Context data info available: ✅")
|
|
print(f" - Symbols with context: {len(context_info)}")
|
|
|
|
for symbol, status in list(context_info.items())[:2]: # Show first 2
|
|
candles = status.get('candles_loaded', 0)
|
|
features = status.get('features_available', False)
|
|
print(f" - {symbol}: {candles} candles, features: {'✅' if features else '❌'}")
|
|
|
|
# Test model training status creation
|
|
try:
|
|
training_status = dashboard._create_model_training_status()
|
|
print("✅ Model training status created successfully")
|
|
print(f" - Status type: {type(training_status)}")
|
|
except Exception as e:
|
|
print(f"⚠️ Model training status creation had issues: {e}")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Dashboard integration failed: {e}")
|
|
return False
|
|
|
|
def test_reusability_across_dashboards():
|
|
"""Test 8: Reusability of extrema trainer across different dashboards"""
|
|
print("\n" + "="*60)
|
|
print("TEST 8: Reusability Across Different Dashboards")
|
|
print("="*60)
|
|
|
|
try:
|
|
from core.extrema_trainer import ExtremaTrainer
|
|
from core.data_provider import DataProvider
|
|
|
|
# Create shared extrema trainer
|
|
data_provider = DataProvider()
|
|
shared_extrema_trainer = ExtremaTrainer(
|
|
data_provider=data_provider,
|
|
symbols=['ETHUSDT'],
|
|
window_size=8 # Different window size
|
|
)
|
|
|
|
# Initialize context data
|
|
shared_extrema_trainer.initialize_context_data()
|
|
|
|
print("✅ Shared extrema trainer created")
|
|
print(f" - Window size: {shared_extrema_trainer.window_size}")
|
|
print(f" - Symbols: {shared_extrema_trainer.symbols}")
|
|
|
|
# Simulate usage by multiple dashboard types
|
|
dashboard_types = ['scalping', 'swing', 'analysis']
|
|
|
|
for dashboard_type in dashboard_types:
|
|
print(f"\n Testing {dashboard_type} dashboard usage:")
|
|
|
|
# Get extrema stats (reusable method)
|
|
stats = shared_extrema_trainer.get_extrema_stats()
|
|
print(f" - {dashboard_type}: Extrema stats retrieved ✅")
|
|
|
|
# Get context features (reusable method)
|
|
features = shared_extrema_trainer.get_context_features_for_model('ETHUSDT')
|
|
if features is not None:
|
|
print(f" - {dashboard_type}: Context features available ✅ {features.shape}")
|
|
else:
|
|
print(f" - {dashboard_type}: Context features not available ❌")
|
|
|
|
# Get training data (reusable method)
|
|
training_data = shared_extrema_trainer.get_extrema_training_data(count=5)
|
|
print(f" - {dashboard_type}: Training data retrieved ✅ ({len(training_data)} cases)")
|
|
|
|
# Get perfect moves (reusable method)
|
|
perfect_moves = shared_extrema_trainer.get_perfect_moves_for_cnn(count=5)
|
|
print(f" - {dashboard_type}: Perfect moves retrieved ✅ ({len(perfect_moves)} moves)")
|
|
|
|
print("\n✅ Extrema trainer successfully reused across different dashboard types")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"❌ Reusability test failed: {e}")
|
|
return False
|
|
|
|
def run_comprehensive_test_suite():
|
|
"""Run the complete test suite"""
|
|
print("🚀 ENHANCED EXTREMA TRAINING TEST SUITE")
|
|
print("="*80)
|
|
print("Testing 200-candle context data, extrema detection, and dashboard integration")
|
|
print("="*80)
|
|
|
|
test_results = []
|
|
extrema_trainer = None
|
|
orchestrator = None
|
|
|
|
# Test 1: Extrema trainer initialization
|
|
success, extrema_trainer = test_extrema_trainer_initialization()
|
|
test_results.append(("Extrema Trainer Initialization", success))
|
|
|
|
if success and extrema_trainer:
|
|
# Test 2: Context data loading
|
|
success = test_context_data_loading(extrema_trainer)
|
|
test_results.append(("200-Candle Context Data Loading", success))
|
|
|
|
# Test 3: Extrema detection
|
|
success = test_extrema_detection(extrema_trainer)
|
|
test_results.append(("Local Extrema Detection", success))
|
|
|
|
# Test 4: Context data updates
|
|
success = test_context_data_updates(extrema_trainer)
|
|
test_results.append(("Context Data Updates", success))
|
|
|
|
# Test 5: Stats and training data
|
|
success = test_extrema_stats_and_training_data(extrema_trainer)
|
|
test_results.append(("Extrema Stats and Training Data", success))
|
|
|
|
# Test 6: Enhanced orchestrator integration
|
|
success, orchestrator = test_enhanced_orchestrator_integration()
|
|
test_results.append(("Enhanced Orchestrator Integration", success))
|
|
|
|
if success and orchestrator:
|
|
# Test 7: Dashboard integration
|
|
success = test_dashboard_integration(orchestrator)
|
|
test_results.append(("Dashboard Integration", success))
|
|
|
|
# Test 8: Reusability
|
|
success = test_reusability_across_dashboards()
|
|
test_results.append(("Reusability Across Dashboards", success))
|
|
|
|
# Print final results
|
|
print("\n" + "="*80)
|
|
print("🏁 TEST SUITE RESULTS")
|
|
print("="*80)
|
|
|
|
passed = 0
|
|
total = len(test_results)
|
|
|
|
for test_name, success in test_results:
|
|
status = "✅ PASSED" if success else "❌ FAILED"
|
|
print(f"{test_name:<40} {status}")
|
|
if success:
|
|
passed += 1
|
|
|
|
print("="*80)
|
|
print(f"OVERALL RESULT: {passed}/{total} tests passed ({passed/total*100:.1f}%)")
|
|
|
|
if passed == total:
|
|
print("🎉 ALL TESTS PASSED! Enhanced extrema training system is working correctly.")
|
|
elif passed >= total * 0.8:
|
|
print("✅ MOSTLY SUCCESSFUL! System is functional with minor issues.")
|
|
else:
|
|
print("⚠️ SIGNIFICANT ISSUES DETECTED! Please review failed tests.")
|
|
|
|
print("="*80)
|
|
|
|
return passed, total
|
|
|
|
if __name__ == "__main__":
|
|
try:
|
|
passed, total = run_comprehensive_test_suite()
|
|
|
|
# Exit with appropriate code
|
|
if passed == total:
|
|
sys.exit(0) # Success
|
|
else:
|
|
sys.exit(1) # Some failures
|
|
|
|
except KeyboardInterrupt:
|
|
print("\n\n⚠️ Test suite interrupted by user")
|
|
sys.exit(2)
|
|
except Exception as e:
|
|
print(f"\n\n❌ Test suite crashed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
sys.exit(3) |