data capture implemented - needed for training
This commit is contained in:
239
test_training_status.py
Normal file
239
test_training_status.py
Normal file
@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Training Status Audit - Check if models are actively training
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
sys.path.append(str(Path('.').absolute()))
|
||||
|
||||
from core.enhanced_orchestrator import EnhancedTradingOrchestrator
|
||||
from core.data_provider import DataProvider
|
||||
|
||||
async def check_training_status():
|
||||
print("=" * 70)
|
||||
print("TRAINING STATUS AUDIT")
|
||||
print("=" * 70)
|
||||
|
||||
try:
|
||||
data_provider = DataProvider()
|
||||
orchestrator = EnhancedTradingOrchestrator(
|
||||
data_provider=data_provider,
|
||||
symbols=['ETH/USDT', 'BTC/USDT'],
|
||||
enhanced_rl_training=True
|
||||
)
|
||||
|
||||
print(f"✓ Enhanced Orchestrator created")
|
||||
|
||||
# 1. Check DQN Agent Status
|
||||
print("\n--- DQN AGENT STATUS ---")
|
||||
if hasattr(orchestrator, 'sensitivity_dqn_agent'):
|
||||
dqn_agent = orchestrator.sensitivity_dqn_agent
|
||||
print(f"DQN Agent: {dqn_agent}")
|
||||
|
||||
if dqn_agent is not None:
|
||||
print(f"DQN Agent Type: {type(dqn_agent)}")
|
||||
|
||||
# Check if it has training stats
|
||||
if hasattr(dqn_agent, 'get_enhanced_training_stats'):
|
||||
try:
|
||||
stats = dqn_agent.get_enhanced_training_stats()
|
||||
print(f"DQN Training Stats: {stats}")
|
||||
except Exception as e:
|
||||
print(f"Error getting DQN stats: {e}")
|
||||
|
||||
# Check memory and training status
|
||||
if hasattr(dqn_agent, 'memory'):
|
||||
print(f"DQN Memory Size: {len(dqn_agent.memory)}")
|
||||
if hasattr(dqn_agent, 'batch_size'):
|
||||
print(f"DQN Batch Size: {dqn_agent.batch_size}")
|
||||
if hasattr(dqn_agent, 'epsilon'):
|
||||
print(f"DQN Epsilon: {dqn_agent.epsilon}")
|
||||
|
||||
# Check if training is possible
|
||||
can_train = hasattr(dqn_agent, 'replay') and hasattr(dqn_agent, 'memory')
|
||||
print(f"DQN Can Train: {can_train}")
|
||||
|
||||
else:
|
||||
print("❌ DQN Agent is None - needs initialization")
|
||||
try:
|
||||
orchestrator._initialize_sensitivity_dqn()
|
||||
print("✓ DQN Agent initialized")
|
||||
dqn_agent = orchestrator.sensitivity_dqn_agent
|
||||
print(f"New DQN Agent: {type(dqn_agent)}")
|
||||
except Exception as e:
|
||||
print(f"Error initializing DQN: {e}")
|
||||
else:
|
||||
print("❌ No DQN agent attribute found")
|
||||
|
||||
# 2. Check CNN Status
|
||||
print("\n--- CNN MODEL STATUS ---")
|
||||
if hasattr(orchestrator, 'williams_structure'):
|
||||
williams = orchestrator.williams_structure
|
||||
print(f"Williams CNN: {williams}")
|
||||
|
||||
if williams is not None:
|
||||
print(f"Williams Type: {type(williams)}")
|
||||
|
||||
# Check if it has training stats
|
||||
if hasattr(williams, 'get_training_stats'):
|
||||
try:
|
||||
stats = williams.get_training_stats()
|
||||
print(f"CNN Training Stats: {stats}")
|
||||
except Exception as e:
|
||||
print(f"Error getting CNN stats: {e}")
|
||||
|
||||
# Check if it's enabled
|
||||
print(f"Williams Enabled: {getattr(orchestrator, 'williams_enabled', False)}")
|
||||
else:
|
||||
print("❌ Williams CNN is None")
|
||||
else:
|
||||
print("❌ No Williams CNN attribute found")
|
||||
|
||||
# 3. Check COB Integration Training
|
||||
print("\n--- COB INTEGRATION STATUS ---")
|
||||
if hasattr(orchestrator, 'cob_integration'):
|
||||
cob = orchestrator.cob_integration
|
||||
print(f"COB Integration: {cob}")
|
||||
|
||||
if cob is not None:
|
||||
print(f"COB Type: {type(cob)}")
|
||||
|
||||
# Check if COB is started
|
||||
cob_active = getattr(orchestrator, 'cob_integration_active', False)
|
||||
print(f"COB Active: {cob_active}")
|
||||
|
||||
# Try to start COB if not active
|
||||
if not cob_active:
|
||||
print("Starting COB integration...")
|
||||
try:
|
||||
await orchestrator.start_cob_integration()
|
||||
print("✓ COB integration started")
|
||||
except Exception as e:
|
||||
print(f"Error starting COB: {e}")
|
||||
|
||||
# Get COB stats
|
||||
try:
|
||||
stats = cob.get_statistics()
|
||||
print(f"COB Statistics: {stats}")
|
||||
except Exception as e:
|
||||
print(f"Error getting COB stats: {e}")
|
||||
|
||||
# Check COB feature generation
|
||||
cob_features = getattr(orchestrator, 'latest_cob_features', {})
|
||||
print(f"COB Features Available: {list(cob_features.keys())}")
|
||||
else:
|
||||
print("❌ COB Integration is None")
|
||||
else:
|
||||
print("❌ No COB integration attribute found")
|
||||
|
||||
# 4. Check Training Queues and Learning
|
||||
print("\n--- TRAINING ACTIVITY STATUS ---")
|
||||
|
||||
# Check extrema trainer
|
||||
if hasattr(orchestrator, 'extrema_trainer'):
|
||||
extrema = orchestrator.extrema_trainer
|
||||
print(f"Extrema Trainer: {extrema}")
|
||||
if extrema and hasattr(extrema, 'get_training_stats'):
|
||||
try:
|
||||
stats = extrema.get_training_stats()
|
||||
print(f"Extrema Training Stats: {stats}")
|
||||
except Exception as e:
|
||||
print(f"Error getting extrema stats: {e}")
|
||||
|
||||
# Check negative case trainer
|
||||
if hasattr(orchestrator, 'negative_case_trainer'):
|
||||
negative = orchestrator.negative_case_trainer
|
||||
print(f"Negative Case Trainer: {negative}")
|
||||
|
||||
# Check recent decisions and training queues
|
||||
if hasattr(orchestrator, 'recent_decisions'):
|
||||
recent_decisions = orchestrator.recent_decisions
|
||||
print(f"Recent Decisions: {len(recent_decisions) if recent_decisions else 0}")
|
||||
|
||||
if hasattr(orchestrator, 'sensitivity_learning_queue'):
|
||||
queue = orchestrator.sensitivity_learning_queue
|
||||
print(f"Sensitivity Learning Queue: {len(queue) if queue else 0}")
|
||||
|
||||
if hasattr(orchestrator, 'rl_evaluation_queue'):
|
||||
queue = orchestrator.rl_evaluation_queue
|
||||
print(f"RL Evaluation Queue: {len(queue) if queue else 0}")
|
||||
|
||||
# 5. Test Signal Generation and Training
|
||||
print("\n--- TESTING SIGNAL GENERATION ---")
|
||||
|
||||
# Generate a test decision to see if training is triggered
|
||||
try:
|
||||
print("Making coordinated decisions...")
|
||||
decisions = await orchestrator.make_coordinated_decisions()
|
||||
print(f"Decisions Generated: {len(decisions) if decisions else 0}")
|
||||
|
||||
for symbol, decision in decisions.items():
|
||||
if decision:
|
||||
print(f"{symbol}: {decision.action} (confidence: {decision.confidence:.3f})")
|
||||
else:
|
||||
print(f"{symbol}: No decision")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error making decisions: {e}")
|
||||
|
||||
# 6. Wait and check for training activity
|
||||
print("\n--- MONITORING TRAINING ACTIVITY (10 seconds) ---")
|
||||
|
||||
initial_stats = {}
|
||||
|
||||
# Capture initial state
|
||||
if hasattr(orchestrator, 'sensitivity_dqn_agent') and orchestrator.sensitivity_dqn_agent:
|
||||
if hasattr(orchestrator.sensitivity_dqn_agent, 'memory'):
|
||||
initial_stats['dqn_memory'] = len(orchestrator.sensitivity_dqn_agent.memory)
|
||||
|
||||
# Wait and monitor
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
print(f"Monitoring... {i+1}/10")
|
||||
|
||||
# Check if any training happened
|
||||
if hasattr(orchestrator, 'sensitivity_dqn_agent') and orchestrator.sensitivity_dqn_agent:
|
||||
if hasattr(orchestrator.sensitivity_dqn_agent, 'memory'):
|
||||
current_memory = len(orchestrator.sensitivity_dqn_agent.memory)
|
||||
if current_memory != initial_stats.get('dqn_memory', 0):
|
||||
print(f"🔥 DQN training detected! Memory: {initial_stats.get('dqn_memory', 0)} -> {current_memory}")
|
||||
|
||||
# Final status
|
||||
print("\n--- FINAL TRAINING STATUS ---")
|
||||
|
||||
# Check if models are actively learning
|
||||
dqn_learning = False
|
||||
cnn_learning = False
|
||||
cob_learning = False
|
||||
|
||||
if hasattr(orchestrator, 'sensitivity_dqn_agent') and orchestrator.sensitivity_dqn_agent:
|
||||
memory_size = getattr(orchestrator.sensitivity_dqn_agent, 'memory', [])
|
||||
batch_size = getattr(orchestrator.sensitivity_dqn_agent, 'batch_size', 32)
|
||||
dqn_learning = len(memory_size) >= batch_size if hasattr(memory_size, '__len__') else False
|
||||
|
||||
print(f"DQN Learning Ready: {dqn_learning}")
|
||||
print(f"CNN Learning Ready: {cnn_learning}")
|
||||
print(f"COB Learning Ready: {cob_learning}")
|
||||
|
||||
# GPU Utilization Check
|
||||
try:
|
||||
import GPUtil
|
||||
gpus = GPUtil.getGPUs()
|
||||
if gpus:
|
||||
for gpu in gpus:
|
||||
print(f"GPU {gpu.id}: {gpu.load*100:.1f}% utilization, {gpu.memoryUtil*100:.1f}% memory")
|
||||
else:
|
||||
print("No GPUs detected")
|
||||
except ImportError:
|
||||
print("GPUtil not available - cannot check GPU status")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in training status check: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(check_training_status())
|
Reference in New Issue
Block a user