fix model mappings,dash updates, trading
This commit is contained in:
@ -32,6 +32,7 @@ from core.data_provider import DataProvider
|
||||
from core.enhanced_orchestrator import EnhancedTradingOrchestrator
|
||||
from core.trading_executor import TradingExecutor
|
||||
from web.clean_dashboard import CleanTradingDashboard as TradingDashboard
|
||||
from utils.tensorboard_logger import TensorBoardLogger
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -69,6 +70,15 @@ class EnhancedRLTrainingIntegrator:
|
||||
'cob_features_available': 0
|
||||
}
|
||||
|
||||
# Initialize TensorBoard logger
|
||||
experiment_name = f"enhanced_rl_training_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||
self.tb_logger = TensorBoardLogger(
|
||||
log_dir="runs",
|
||||
experiment_name=experiment_name,
|
||||
enabled=True
|
||||
)
|
||||
logger.info(f"TensorBoard logging enabled for experiment: {experiment_name}")
|
||||
|
||||
logger.info("Enhanced RL Training Integrator initialized")
|
||||
|
||||
async def start_integration(self):
|
||||
@ -217,6 +227,19 @@ class EnhancedRLTrainingIntegrator:
|
||||
logger.info(f" * Std: {feature_std:.6f}")
|
||||
logger.info(f" * Range: [{feature_min:.6f}, {feature_max:.6f}]")
|
||||
|
||||
# Log feature statistics to TensorBoard
|
||||
step = self.training_stats['total_episodes']
|
||||
self.tb_logger.log_scalars('Features/Distribution', {
|
||||
'non_zero_percentage': non_zero_features/len(state_vector)*100,
|
||||
'mean': feature_mean,
|
||||
'std': feature_std,
|
||||
'min': feature_min,
|
||||
'max': feature_max
|
||||
}, step)
|
||||
|
||||
# Log feature histogram to TensorBoard
|
||||
self.tb_logger.log_histogram('Features/Values', state_vector, step)
|
||||
|
||||
# Check if features are properly distributed
|
||||
if non_zero_features > len(state_vector) * 0.1: # At least 10% non-zero
|
||||
logger.info(" * GOOD: Features are well distributed")
|
||||
@ -262,6 +285,18 @@ class EnhancedRLTrainingIntegrator:
|
||||
logger.info(" - Enhanced pivot-based reward system: WORKING")
|
||||
self.training_stats['enhanced_reward_calculations'] += 1
|
||||
|
||||
# Log reward metrics to TensorBoard
|
||||
step = self.training_stats['enhanced_reward_calculations']
|
||||
self.tb_logger.log_scalar('Rewards/Enhanced', enhanced_reward, step)
|
||||
|
||||
# Log reward components to TensorBoard
|
||||
self.tb_logger.log_scalars('Rewards/Components', {
|
||||
'pnl_component': trade_outcome['net_pnl'],
|
||||
'confidence': trade_decision['confidence'],
|
||||
'volatility': market_data['volatility'],
|
||||
'order_flow_strength': market_data['order_flow_strength']
|
||||
}, step)
|
||||
|
||||
else:
|
||||
logger.error(" - FAILED: Enhanced reward calculation method not available")
|
||||
|
||||
@ -325,20 +360,66 @@ class EnhancedRLTrainingIntegrator:
|
||||
# Make coordinated decisions using enhanced orchestrator
|
||||
decisions = await self.enhanced_orchestrator.make_coordinated_decisions()
|
||||
|
||||
# Track iteration metrics for TensorBoard
|
||||
iteration_metrics = {
|
||||
'decisions_count': len(decisions),
|
||||
'confidence_avg': 0.0,
|
||||
'state_size_avg': 0.0,
|
||||
'successful_states': 0
|
||||
}
|
||||
|
||||
# Process each decision
|
||||
for symbol, decision in decisions.items():
|
||||
if decision:
|
||||
logger.info(f" {symbol}: {decision.action} (confidence: {decision.confidence:.3f})")
|
||||
|
||||
# Track confidence for TensorBoard
|
||||
iteration_metrics['confidence_avg'] += decision.confidence
|
||||
|
||||
# Build comprehensive state for this decision
|
||||
comprehensive_state = self.enhanced_orchestrator.build_comprehensive_rl_state(symbol)
|
||||
|
||||
if comprehensive_state is not None:
|
||||
logger.info(f" - Comprehensive state: {len(comprehensive_state)} features")
|
||||
state_size = len(comprehensive_state)
|
||||
logger.info(f" - Comprehensive state: {state_size} features")
|
||||
self.training_stats['total_episodes'] += 1
|
||||
|
||||
# Track state size for TensorBoard
|
||||
iteration_metrics['state_size_avg'] += state_size
|
||||
iteration_metrics['successful_states'] += 1
|
||||
|
||||
# Log individual state metrics to TensorBoard
|
||||
self.tb_logger.log_state_metrics(
|
||||
symbol=symbol,
|
||||
state_info={
|
||||
'size': state_size,
|
||||
'quality': 1.0 if state_size == 13400 else 0.8,
|
||||
'feature_counts': {
|
||||
'total': state_size,
|
||||
'non_zero': np.count_nonzero(comprehensive_state)
|
||||
}
|
||||
},
|
||||
step=self.training_stats['total_episodes']
|
||||
)
|
||||
else:
|
||||
logger.warning(f" - Failed to build comprehensive state for {symbol}")
|
||||
|
||||
# Calculate averages for TensorBoard
|
||||
if decisions:
|
||||
iteration_metrics['confidence_avg'] /= len(decisions)
|
||||
|
||||
if iteration_metrics['successful_states'] > 0:
|
||||
iteration_metrics['state_size_avg'] /= iteration_metrics['successful_states']
|
||||
|
||||
# Log iteration metrics to TensorBoard
|
||||
self.tb_logger.log_scalars('Training/Iteration', {
|
||||
'iteration': iteration + 1,
|
||||
'decisions_count': iteration_metrics['decisions_count'],
|
||||
'confidence_avg': iteration_metrics['confidence_avg'],
|
||||
'state_size_avg': iteration_metrics['state_size_avg'],
|
||||
'successful_states': iteration_metrics['successful_states']
|
||||
}, iteration + 1)
|
||||
|
||||
# Wait between iterations
|
||||
await asyncio.sleep(2)
|
||||
|
||||
@ -357,16 +438,33 @@ class EnhancedRLTrainingIntegrator:
|
||||
logger.info(f" - Pivot features extracted: {self.training_stats['pivot_features_extracted']}")
|
||||
|
||||
# Calculate success rates
|
||||
state_success_rate = 0
|
||||
if self.training_stats['total_episodes'] > 0:
|
||||
state_success_rate = self.training_stats['successful_state_builds'] / self.training_stats['total_episodes'] * 100
|
||||
logger.info(f" - State building success rate: {state_success_rate:.1f}%")
|
||||
|
||||
# Log final statistics to TensorBoard
|
||||
self.tb_logger.log_scalars('Integration/Statistics', {
|
||||
'total_episodes': self.training_stats['total_episodes'],
|
||||
'successful_state_builds': self.training_stats['successful_state_builds'],
|
||||
'enhanced_reward_calculations': self.training_stats['enhanced_reward_calculations'],
|
||||
'comprehensive_features_used': self.training_stats['comprehensive_features_used'],
|
||||
'pivot_features_extracted': self.training_stats['pivot_features_extracted'],
|
||||
'state_success_rate': state_success_rate
|
||||
}, 0) # Use step 0 for final summary stats
|
||||
|
||||
# Integration status
|
||||
if self.training_stats['comprehensive_features_used'] > 0:
|
||||
logger.info("STATUS: COMPREHENSIVE RL TRAINING INTEGRATION SUCCESSFUL! ✅")
|
||||
logger.info("The system is now using the full 13,400 feature comprehensive state.")
|
||||
|
||||
# Log success status to TensorBoard
|
||||
self.tb_logger.log_scalar('Integration/Success', 1.0, 0)
|
||||
else:
|
||||
logger.warning("STATUS: Integration partially successful - some fallbacks may occur")
|
||||
|
||||
# Log partial success status to TensorBoard
|
||||
self.tb_logger.log_scalar('Integration/Success', 0.5, 0)
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
|
Reference in New Issue
Block a user