This commit is contained in:
Dobromir Popov
2025-06-27 03:30:21 +03:00
parent d791ab8b14
commit 601e44de25
5 changed files with 1025 additions and 346 deletions

View File

@ -140,6 +140,12 @@ class CleanTradingDashboard:
self.total_fees = 0.0
self.current_position = None
# ENHANCED: Model control toggles - separate inference and training
self.dqn_inference_enabled = True # Default: enabled
self.dqn_training_enabled = True # Default: enabled
self.cnn_inference_enabled = True
self.cnn_training_enabled = True
# Leverage management - adjustable x1 to x100
self.current_leverage = 50 # Default x50 leverage
self.min_leverage = 1
@ -1094,46 +1100,64 @@ class CleanTradingDashboard:
logger.debug(f"Error adding prediction accuracy feedback to chart: {e}")
def _get_recent_dqn_predictions(self, symbol: str) -> List[Dict]:
"""Get recent DQN predictions from enhanced training system (forward-looking only)"""
"""Get recent DQN predictions from orchestrator with sample generation"""
try:
predictions = []
# Get REAL forward-looking predictions from enhanced training system
# Generate sample predictions if needed (for display purposes)
if hasattr(self.orchestrator, 'generate_sample_predictions_for_display'):
self.orchestrator.generate_sample_predictions_for_display(symbol)
# Get REAL predictions from orchestrator
if hasattr(self.orchestrator, 'recent_dqn_predictions'):
predictions.extend(list(self.orchestrator.recent_dqn_predictions.get(symbol, [])))
# Get from enhanced training system as additional source
if hasattr(self, 'training_system') and self.training_system:
if hasattr(self.training_system, 'recent_dqn_predictions'):
predictions.extend(self.training_system.recent_dqn_predictions.get(symbol, []))
# Get from orchestrator as fallback
if hasattr(self.orchestrator, 'recent_dqn_predictions'):
predictions.extend(self.orchestrator.recent_dqn_predictions.get(symbol, []))
# Remove duplicates and sort by timestamp
unique_predictions = []
seen_timestamps = set()
for pred in predictions:
timestamp_key = pred.get('timestamp', datetime.now()).isoformat()
if timestamp_key not in seen_timestamps:
unique_predictions.append(pred)
seen_timestamps.add(timestamp_key)
# REMOVED: Mock prediction generation - now using REAL predictions only
# No more artificial past predictions or random data
return sorted(predictions, key=lambda x: x.get('timestamp', datetime.now()))
return sorted(unique_predictions, key=lambda x: x.get('timestamp', datetime.now()))
except Exception as e:
logger.debug(f"Error getting DQN predictions: {e}")
return []
def _get_recent_cnn_predictions(self, symbol: str) -> List[Dict]:
"""Get recent CNN predictions from enhanced training system (forward-looking only)"""
"""Get recent CNN predictions from orchestrator with sample generation"""
try:
predictions = []
# Get REAL forward-looking predictions from enhanced training system
# Sample predictions are generated in DQN method to avoid duplication
# Get REAL predictions from orchestrator
if hasattr(self.orchestrator, 'recent_cnn_predictions'):
predictions.extend(list(self.orchestrator.recent_cnn_predictions.get(symbol, [])))
# Get from enhanced training system as additional source
if hasattr(self, 'training_system') and self.training_system:
if hasattr(self.training_system, 'recent_cnn_predictions'):
predictions.extend(self.training_system.recent_cnn_predictions.get(symbol, []))
# Get from orchestrator as fallback
if hasattr(self.orchestrator, 'recent_cnn_predictions'):
predictions.extend(self.orchestrator.recent_cnn_predictions.get(symbol, []))
# Remove duplicates and sort by timestamp
unique_predictions = []
seen_timestamps = set()
for pred in predictions:
timestamp_key = pred.get('timestamp', datetime.now()).isoformat()
if timestamp_key not in seen_timestamps:
unique_predictions.append(pred)
seen_timestamps.add(timestamp_key)
# REMOVED: Mock prediction generation - now using REAL predictions only
# No more artificial past predictions or random data
return sorted(predictions, key=lambda x: x.get('timestamp', datetime.now()))
return sorted(unique_predictions, key=lambda x: x.get('timestamp', datetime.now()))
except Exception as e:
logger.debug(f"Error getting CNN predictions: {e}")
@ -1159,77 +1183,88 @@ class CleanTradingDashboard:
return []
def _add_signals_to_mini_chart(self, fig: go.Figure, symbol: str, ws_data_1s: pd.DataFrame, row: int = 2):
"""Add ALL signals (executed and non-executed) to the 1s mini chart"""
"""Add ALL signals (executed and non-executed) to the 1s mini chart - FIXED PERSISTENCE"""
try:
if not self.recent_decisions:
return
# Show ALL signals on the mini chart - MORE SIGNALS for better visibility
all_signals = self.recent_decisions[-100:] # Last 100 signals (increased from 50)
# Show ALL signals on the mini chart - EXTEND HISTORY for better visibility
all_signals = self.recent_decisions[-200:] # Last 200 signals (increased from 100)
buy_signals = []
sell_signals = []
current_time = datetime.now()
for signal in all_signals:
# Try to get full timestamp first, fall back to string timestamp
signal_time = self._get_signal_attribute(signal, 'full_timestamp')
if not signal_time:
signal_time = self._get_signal_attribute(signal, 'timestamp')
# IMPROVED: Try multiple timestamp fields for better compatibility
signal_time = None
# STREAMLINED: Handle both dict and TradingDecision object types with SINGLE timestamp field
signal_dict = signal.__dict__ if hasattr(signal, '__dict__') else signal
# UNIFIED: Use only 'timestamp' field throughout the project
if 'timestamp' in signal_dict and signal_dict['timestamp']:
timestamp_val = signal_dict['timestamp']
if isinstance(timestamp_val, datetime):
signal_time = timestamp_val
elif isinstance(timestamp_val, str):
try:
# Handle time-only format with current date
if ':' in timestamp_val and len(timestamp_val.split(':')) >= 2:
time_parts = timestamp_val.split(':')
signal_time = current_time.replace(
hour=int(time_parts[0]),
minute=int(time_parts[1]),
second=int(time_parts[2]) if len(time_parts) > 2 else 0,
microsecond=0
)
# FIXED: Handle day boundary properly
if signal_time > current_time + timedelta(minutes=5):
signal_time -= timedelta(days=1)
else:
signal_time = pd.to_datetime(timestamp_val)
except Exception as e:
logger.debug(f"Error parsing timestamp {timestamp_val}: {e}")
continue
# Skip if no valid timestamp
if not signal_time:
continue
# Get signal attributes with safe defaults
signal_price = self._get_signal_attribute(signal, 'price', 0)
signal_action = self._get_signal_attribute(signal, 'action', 'HOLD')
signal_confidence = self._get_signal_attribute(signal, 'confidence', 0)
is_executed = self._get_signal_attribute(signal, 'executed', False)
is_manual = self._get_signal_attribute(signal, 'manual', False)
if signal_time and signal_price and signal_confidence and signal_confidence > 0:
# FIXED: Same timestamp conversion as main chart
if isinstance(signal_time, str):
try:
# Handle time-only format with current date
if ':' in signal_time and len(signal_time.split(':')) == 3:
now = datetime.now()
time_parts = signal_time.split(':')
signal_time = now.replace(
hour=int(time_parts[0]),
minute=int(time_parts[1]),
second=int(time_parts[2]),
microsecond=0
)
# Handle day boundary issues
if signal_time > now + timedelta(minutes=5):
signal_time -= timedelta(days=1)
else:
signal_time = pd.to_datetime(signal_time)
except Exception as e:
logger.debug(f"Error parsing mini chart timestamp {signal_time}: {e}")
continue
elif not isinstance(signal_time, datetime):
# Convert other timestamp formats to datetime
try:
signal_time = pd.to_datetime(signal_time)
except Exception as e:
logger.debug(f"Error converting mini chart timestamp to datetime: {e}")
continue
signal_data = {
'x': signal_time,
'y': signal_price,
'confidence': signal_confidence,
'executed': is_executed
}
if signal_action == 'BUY':
buy_signals.append(signal_data)
elif signal_action == 'SELL':
sell_signals.append(signal_data)
# Only show signals with valid data
if not signal_price or signal_confidence <= 0 or signal_action == 'HOLD':
continue
signal_data = {
'x': signal_time,
'y': signal_price,
'confidence': signal_confidence,
'executed': is_executed,
'manual': is_manual
}
if signal_action == 'BUY':
buy_signals.append(signal_data)
elif signal_action == 'SELL':
sell_signals.append(signal_data)
# Add ALL BUY signals to mini chart
# Add ALL BUY signals to mini chart with ENHANCED VISIBILITY
if buy_signals:
# Split into executed and non-executed
# Split into executed and non-executed, manual and ML-generated
executed_buys = [s for s in buy_signals if s['executed']]
pending_buys = [s for s in buy_signals if not s['executed']]
manual_buys = [s for s in buy_signals if s.get('manual', False)]
ml_buys = [s for s in buy_signals if not s.get('manual', False) and s['executed']] # ML-generated executed trades
# Executed buy signals (solid green triangles)
# EXECUTED buy signals (solid green triangles) - MOST VISIBLE
if executed_buys:
fig.add_trace(
go.Scatter(
@ -1238,12 +1273,12 @@ class CleanTradingDashboard:
mode='markers',
marker=dict(
symbol='triangle-up',
size=10,
size=12, # Larger size for better visibility
color='rgba(0, 255, 100, 1.0)',
line=dict(width=2, color='green')
line=dict(width=3, color='darkgreen') # Thicker border
),
name='BUY (Executed)',
showlegend=False,
showlegend=True,
hovertemplate="<b>BUY EXECUTED</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
@ -1252,6 +1287,54 @@ class CleanTradingDashboard:
),
row=row, col=1
)
# MANUAL buy signals (bright blue stars) - HIGHLY VISIBLE
if manual_buys:
fig.add_trace(
go.Scatter(
x=[s['x'] for s in manual_buys],
y=[s['y'] for s in manual_buys],
mode='markers',
marker=dict(
symbol='star',
size=15, # Even larger for manual trades
color='rgba(0, 150, 255, 1.0)',
line=dict(width=3, color='darkblue')
),
name='BUY (Manual)',
showlegend=True,
hovertemplate="<b>MANUAL BUY</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
"Confidence: %{customdata:.1%}<extra></extra>",
customdata=[s['confidence'] for s in manual_buys]
),
row=row, col=1
)
# ML-GENERATED buy signals (bright cyan diamonds) - HIGHLY VISIBLE
if ml_buys:
fig.add_trace(
go.Scatter(
x=[s['x'] for s in ml_buys],
y=[s['y'] for s in ml_buys],
mode='markers',
marker=dict(
symbol='diamond',
size=13, # Large size for ML trades
color='rgba(0, 255, 255, 1.0)',
line=dict(width=3, color='darkcyan')
),
name='BUY (ML)',
showlegend=True,
hovertemplate="<b>ML BUY</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
"Confidence: %{customdata:.1%}<extra></extra>",
customdata=[s['confidence'] for s in ml_buys]
),
row=row, col=1
)
# Pending/non-executed buy signals (hollow green triangles)
if pending_buys:
@ -1266,9 +1349,9 @@ class CleanTradingDashboard:
color='rgba(0, 255, 100, 0.5)',
line=dict(width=2, color='green')
),
name='📊 BUY (Signal)',
showlegend=False,
hovertemplate="<b>📊 BUY SIGNAL</b><br>" +
name='BUY (Signal)',
showlegend=True,
hovertemplate="<b>BUY SIGNAL</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
"Confidence: %{customdata:.1%}<extra></extra>",
@ -1277,13 +1360,15 @@ class CleanTradingDashboard:
row=row, col=1
)
# Add ALL SELL signals to mini chart
# Add ALL SELL signals to mini chart with ENHANCED VISIBILITY
if sell_signals:
# Split into executed and non-executed
# Split into executed and non-executed, manual and ML-generated
executed_sells = [s for s in sell_signals if s['executed']]
pending_sells = [s for s in sell_signals if not s['executed']]
manual_sells = [s for s in sell_signals if s.get('manual', False)]
ml_sells = [s for s in sell_signals if not s.get('manual', False) and s['executed']] # ML-generated executed trades
# Executed sell signals (solid red triangles)
# EXECUTED sell signals (solid red triangles) - MOST VISIBLE
if executed_sells:
fig.add_trace(
go.Scatter(
@ -1292,12 +1377,12 @@ class CleanTradingDashboard:
mode='markers',
marker=dict(
symbol='triangle-down',
size=10,
size=12, # Larger size for better visibility
color='rgba(255, 100, 100, 1.0)',
line=dict(width=2, color='red')
line=dict(width=3, color='darkred') # Thicker border
),
name='SELL (Executed)',
showlegend=False,
showlegend=True,
hovertemplate="<b>SELL EXECUTED</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
@ -1307,6 +1392,54 @@ class CleanTradingDashboard:
row=row, col=1
)
# MANUAL sell signals (bright orange stars) - HIGHLY VISIBLE
if manual_sells:
fig.add_trace(
go.Scatter(
x=[s['x'] for s in manual_sells],
y=[s['y'] for s in manual_sells],
mode='markers',
marker=dict(
symbol='star',
size=15, # Even larger for manual trades
color='rgba(255, 150, 0, 1.0)',
line=dict(width=3, color='darkorange')
),
name='SELL (Manual)',
showlegend=True,
hovertemplate="<b>MANUAL SELL</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
"Confidence: %{customdata:.1%}<extra></extra>",
customdata=[s['confidence'] for s in manual_sells]
),
row=row, col=1
)
# ML-GENERATED sell signals (bright magenta diamonds) - HIGHLY VISIBLE
if ml_sells:
fig.add_trace(
go.Scatter(
x=[s['x'] for s in ml_sells],
y=[s['y'] for s in ml_sells],
mode='markers',
marker=dict(
symbol='diamond',
size=13, # Large size for ML trades
color='rgba(255, 0, 255, 1.0)',
line=dict(width=3, color='darkmagenta')
),
name='SELL (ML)',
showlegend=True,
hovertemplate="<b>ML SELL</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
"Confidence: %{customdata:.1%}<extra></extra>",
customdata=[s['confidence'] for s in ml_sells]
),
row=row, col=1
)
# Pending/non-executed sell signals (hollow red triangles)
if pending_sells:
fig.add_trace(
@ -1320,9 +1453,9 @@ class CleanTradingDashboard:
color='rgba(255, 100, 100, 0.5)',
line=dict(width=2, color='red')
),
name='📊 SELL (Signal)',
showlegend=False,
hovertemplate="<b>📊 SELL SIGNAL</b><br>" +
name='SELL (Signal)',
showlegend=True,
hovertemplate="<b>SELL SIGNAL</b><br>" +
"Price: $%{y:.2f}<br>" +
"Time: %{x}<br>" +
"Confidence: %{customdata:.1%}<extra></extra>",
@ -1330,10 +1463,17 @@ class CleanTradingDashboard:
),
row=row, col=1
)
# Log signal counts for debugging with detailed breakdown
total_signals = len(buy_signals) + len(sell_signals)
if total_signals > 0:
manual_count = len([s for s in buy_signals + sell_signals if s.get('manual', False)])
ml_count = len([s for s in buy_signals + sell_signals if not s.get('manual', False) and s['executed']])
logger.debug(f"[MINI-CHART] Added {total_signals} signals: {len(buy_signals)} BUY, {len(sell_signals)} SELL ({manual_count} manual, {ml_count} ML)")
except Exception as e:
logger.warning(f"Error adding signals to mini chart: {e}")
def _add_trades_to_chart(self, fig: go.Figure, symbol: str, df_main: pd.DataFrame, row: int = 1):
"""Add executed trades to the chart"""
try:
@ -1590,10 +1730,17 @@ class CleanTradingDashboard:
except (TypeError, ZeroDivisionError):
return default_improvement
# 1. DQN Model Status - using orchestrator SSOT with real training detection
# 1. DQN Model Status - using orchestrator SSOT with SEPARATE TOGGLES for inference and training
dqn_state = model_states.get('dqn', {})
dqn_training_status = self._is_model_actually_training('dqn')
dqn_active = dqn_training_status['is_training']
# SEPARATE TOGGLES: Inference and Training can be controlled independently
dqn_inference_enabled = getattr(self, 'dqn_inference_enabled', True) # Default: enabled
dqn_training_enabled = getattr(self, 'dqn_training_enabled', True) # Default: enabled
dqn_checkpoint_loaded = dqn_state.get('checkpoint_loaded', False)
# DQN is active if checkpoint is loaded AND inference is enabled
dqn_active = dqn_checkpoint_loaded and dqn_inference_enabled
dqn_prediction_count = len(self.recent_decisions) if signal_generation_active else 0
if signal_generation_active and len(self.recent_decisions) > 0:
@ -1620,13 +1767,27 @@ class CleanTradingDashboard:
dqn_state.get('current_loss', dqn_state.get('initial_loss', 0.2850)),
0.0 if not dqn_active else 94.9 # No improvement if not training
),
'checkpoint_loaded': dqn_state.get('checkpoint_loaded', False),
'checkpoint_loaded': dqn_checkpoint_loaded,
'model_type': 'DQN',
'description': 'Deep Q-Network Agent (Data Bus Input)',
'prediction_count': dqn_prediction_count,
'epsilon': 1.0,
'training_evidence': dqn_training_status['evidence'],
'training_steps': dqn_training_status['training_steps']
'training_steps': dqn_training_status['training_steps'],
# ENHANCED: Add separate toggles and checkpoint information for tooltips
'inference_enabled': dqn_inference_enabled,
'training_enabled': dqn_training_enabled,
'status_details': {
'checkpoint_loaded': dqn_checkpoint_loaded,
'inference_enabled': dqn_inference_enabled,
'training_enabled': dqn_training_enabled,
'is_training': dqn_training_status['is_training']
},
'checkpoint_info': {
'filename': dqn_state.get('checkpoint_filename', 'none'),
'created_at': dqn_state.get('created_at', 'Unknown'),
'performance_score': dqn_state.get('performance_score', 0.0)
}
}
loaded_models['dqn'] = dqn_model_info
@ -1653,7 +1814,13 @@ class CleanTradingDashboard:
'checkpoint_loaded': cnn_state.get('checkpoint_loaded', False),
'model_type': 'CNN',
'description': 'Williams Market Structure CNN (Data Bus Input)',
'pivot_prediction': cnn_prediction
'pivot_prediction': cnn_prediction,
# ENHANCED: Add checkpoint information for tooltips
'checkpoint_info': {
'filename': cnn_state.get('checkpoint_filename', 'none'),
'created_at': cnn_state.get('created_at', 'Unknown'),
'performance_score': cnn_state.get('performance_score', 0.0)
}
}
loaded_models['cnn'] = cnn_model_info
@ -1708,7 +1875,13 @@ class CleanTradingDashboard:
'checkpoint_loaded': decision_state.get('checkpoint_loaded', False),
'model_type': 'DECISION',
'description': 'Final Decision Model (Trained on Signals Only)',
'inputs': 'Data Bus + All Model Outputs'
'inputs': 'Data Bus + All Model Outputs',
# ENHANCED: Add checkpoint information for tooltips
'checkpoint_info': {
'filename': decision_state.get('checkpoint_filename', 'none'),
'created_at': decision_state.get('created_at', 'Unknown'),
'performance_score': decision_state.get('performance_score', 0.0)
}
}
loaded_models['decision'] = decision_model_info
@ -2297,7 +2470,7 @@ class CleanTradingDashboard:
# return []
def _execute_manual_trade(self, action: str):
"""Execute manual trading action - FIXED to properly execute and track trades"""
"""Execute manual trading action - ENHANCED with PERSISTENT SIGNAL STORAGE"""
try:
if not self.trading_executor:
logger.warning("No trading executor available")
@ -2344,11 +2517,12 @@ class CleanTradingDashboard:
logger.warning(f"Failed to capture model inputs with COB data: {e}")
model_inputs = {}
# Create manual trading decision with FULL TIMESTAMP for chart persistence
# Create manual trading decision with ENHANCED TIMESTAMP STORAGE for PERSISTENT CHART DISPLAY
now = datetime.now()
decision = {
'timestamp': now.strftime('%H:%M:%S'),
'full_timestamp': now, # Store full datetime for better chart positioning
'timestamp': now.strftime('%H:%M:%S'), # String format for display
'full_timestamp': now, # Full datetime for accurate chart positioning
'creation_time': now, # ADDITIONAL: Store creation time for persistence tracking
'action': action,
'confidence': 1.0, # Manual trades have 100% confidence
'price': current_price,
@ -2356,9 +2530,11 @@ class CleanTradingDashboard:
'size': 0.01,
'executed': False,
'blocked': False,
'manual': True,
'manual': True, # CRITICAL: Mark as manual for special handling
'reason': f'Manual {action} button',
'model_inputs': model_inputs # Store for training
'model_inputs': model_inputs, # Store for training
'persistent': True, # MARK for persistent display
'chart_priority': 'HIGH' # High priority for chart display
}
# Execute through trading executor
@ -2366,6 +2542,7 @@ class CleanTradingDashboard:
result = self.trading_executor.execute_trade(symbol, action, 0.01) # Small size for testing
if result:
decision['executed'] = True
decision['execution_time'] = datetime.now() # Track execution time
logger.info(f"Manual {action} executed at ${current_price:.2f}")
# Sync position from trading executor after execution
@ -2497,7 +2674,6 @@ class CleanTradingDashboard:
self.pending_trade_case_id = base_case_id
except Exception as e:
logger.warning(f"Failed to store opening trade as base case: {e}")
self.pending_trade_case_id = None
else:
decision['executed'] = False
@ -2511,12 +2687,29 @@ class CleanTradingDashboard:
decision['block_reason'] = str(e)
logger.error(f"Manual {action} failed with error: {e}")
# Add to recent decisions for display
# ENHANCED: Add to recent decisions with PRIORITY INSERTION for better persistence
self.recent_decisions.append(decision)
# Keep more decisions for longer history - extend to 200 decisions
if len(self.recent_decisions) > 200:
self.recent_decisions = self.recent_decisions[-200:]
# CONSERVATIVE: Keep MORE decisions for longer history - extend to 300 decisions
if len(self.recent_decisions) > 300:
# When trimming, PRESERVE MANUAL TRADES at higher priority
manual_decisions = [d for d in self.recent_decisions if self._get_signal_attribute(d, 'manual', False)]
other_decisions = [d for d in self.recent_decisions if not self._get_signal_attribute(d, 'manual', False)]
# Keep all manual decisions + most recent other decisions
max_other_decisions = 300 - len(manual_decisions)
if max_other_decisions > 0:
trimmed_decisions = manual_decisions + other_decisions[-max_other_decisions:]
else:
# If too many manual decisions, keep most recent ones
trimmed_decisions = manual_decisions[-300:]
self.recent_decisions = trimmed_decisions
logger.debug(f"Trimmed decisions: kept {len(manual_decisions)} manual + {len(trimmed_decisions) - len(manual_decisions)} other")
# LOG the manual trade execution with enhanced details
status = "EXECUTED" if decision['executed'] else ("BLOCKED" if decision['blocked'] else "PENDING")
logger.info(f"[MANUAL-{status}] {action} trade at ${current_price:.2f} - Decision stored with enhanced persistence")
except Exception as e:
logger.error(f"Error executing manual {action}: {e}")
@ -2548,10 +2741,6 @@ class CleanTradingDashboard:
market_state['volume_sma_20'] = float(volumes[-20:].mean())
market_state['volume_ratio'] = float(volumes[-1] / volumes[-20:].mean())
# Trend features
market_state['price_momentum_5'] = float((prices[-1] - prices[-5]) / prices[-5])
market_state['price_momentum_20'] = float((prices[-1] - prices[-20]) / prices[-20])
# Add timestamp features
now = datetime.now()
market_state['hour_of_day'] = now.hour
@ -2847,65 +3036,78 @@ class CleanTradingDashboard:
return default
def _clear_old_signals_for_tick_range(self):
"""Clear old signals that are outside the current tick cache time range - CONSERVATIVE APPROACH"""
"""Clear old signals that are outside the current tick cache time range - VERY CONSERVATIVE"""
try:
if not self.tick_cache or len(self.tick_cache) == 0:
return
# Only clear if we have a LOT of signals (more than 500) to prevent memory issues
if len(self.recent_decisions) <= 500:
logger.debug(f"Signal count ({len(self.recent_decisions)}) below threshold - not clearing old signals")
# MUCH MORE CONSERVATIVE: Only clear if we have excessive signals (1000+)
if len(self.recent_decisions) <= 1000:
logger.debug(f"Signal count ({len(self.recent_decisions)}) below conservative threshold - preserving all signals")
return
# Get the time range of the current tick cache - use much older time to preserve more signals
# Get the time range of the current tick cache - use VERY old time to preserve signals
oldest_tick_time = self.tick_cache[0].get('datetime')
if not oldest_tick_time:
return
# Make the cutoff time much more conservative - keep signals from last 2 hours
cutoff_time = oldest_tick_time - timedelta(hours=2)
# EXTENDED PRESERVATION: Keep signals from last 6 hours (was 2 hours)
cutoff_time = oldest_tick_time - timedelta(hours=6)
# Filter recent_decisions to only keep signals within extended time range
# Filter recent_decisions to only keep signals within EXTENDED time range
filtered_decisions = []
for signal in self.recent_decisions:
signal_time = self._get_signal_attribute(signal, 'timestamp')
signal_time = self._get_signal_attribute(signal, 'full_timestamp')
if not signal_time:
signal_time = self._get_signal_attribute(signal, 'timestamp')
if signal_time:
# Convert signal timestamp to datetime for comparison
try:
if isinstance(signal_time, str):
# Handle time-only format (HH:MM:SS)
if ':' in signal_time and len(signal_time.split(':')) == 3:
if ':' in signal_time and len(signal_time.split(':')) >= 2:
signal_datetime = datetime.now().replace(
hour=int(signal_time.split(':')[0]),
minute=int(signal_time.split(':')[1]),
second=int(signal_time.split(':')[2]),
second=int(signal_time.split(':')[2]) if len(signal_time.split(':')) > 2 else 0,
microsecond=0
)
# Handle day boundary
if signal_datetime > datetime.now() + timedelta(minutes=5):
signal_datetime -= timedelta(days=1)
else:
signal_datetime = pd.to_datetime(signal_time)
else:
signal_datetime = signal_time
# Keep signal if it's within the extended time range (2+ hours)
# PRESERVE MORE: Keep signal if it's within the EXTENDED time range (6+ hours)
if signal_datetime >= cutoff_time:
filtered_decisions.append(signal)
else:
# EXTRA PRESERVATION: Keep manual trades regardless of age
if self._get_signal_attribute(signal, 'manual', False):
filtered_decisions.append(signal)
logger.debug("Preserved manual trade signal despite age")
except Exception:
# Keep signal if we can't parse the timestamp
# ALWAYS PRESERVE if we can't parse the timestamp
filtered_decisions.append(signal)
else:
# Keep signal if no timestamp
# ALWAYS PRESERVE if no timestamp
filtered_decisions.append(signal)
# Only update if we actually reduced the count significantly
if len(filtered_decisions) < len(self.recent_decisions) * 0.8: # Only if we remove more than 20%
# Only update if we significantly reduced the count (more than 30% reduction)
reduction_threshold = 0.7 # Keep at least 70% of signals
if len(filtered_decisions) < len(self.recent_decisions) * reduction_threshold:
original_count = len(self.recent_decisions)
self.recent_decisions = filtered_decisions
logger.debug(f"Conservative signal cleanup: kept {len(filtered_decisions)} signals (removed {len(self.recent_decisions) - len(filtered_decisions)})")
logger.info(f"CONSERVATIVE signal cleanup: kept {len(filtered_decisions)} signals (removed {original_count - len(filtered_decisions)})")
else:
logger.debug(f"Conservative signal cleanup: no significant reduction needed")
logger.debug(f"CONSERVATIVE signal cleanup: no significant reduction needed (kept {len(self.recent_decisions)} signals)")
except Exception as e:
logger.warning(f"Error clearing old signals: {e}")
logger.warning(f"Error in conservative signal cleanup: {e}")
def _initialize_enhanced_training_system(self):
"""Initialize enhanced training system for model predictions"""
@ -3049,6 +3251,42 @@ class CleanTradingDashboard:
def get_cob_data(self, symbol: str) -> Optional[Dict]:
"""Get latest COB data for a symbol"""
try:
# First try to get from orchestrator's COB integration
if hasattr(self.orchestrator, 'cob_integration') and self.orchestrator.cob_integration:
cob_snapshot = self.orchestrator.cob_integration.get_consolidated_orderbook(symbol)
if cob_snapshot:
# Convert COB snapshot to dashboard format
bids = []
asks = []
# Convert consolidated levels to simple format
for bid in cob_snapshot.consolidated_bids[:20]:
bids.append({
'price': bid.price,
'size': bid.total_size,
'total': bid.total_volume_usd
})
for ask in cob_snapshot.consolidated_asks[:20]:
asks.append({
'price': ask.price,
'size': ask.total_size,
'total': ask.total_volume_usd
})
return {
'symbol': symbol,
'bids': bids,
'asks': asks,
'stats': {
'spread_bps': cob_snapshot.spread_bps,
'imbalance': cob_snapshot.liquidity_imbalance,
'mid_price': cob_snapshot.volume_weighted_mid,
'total_liquidity': cob_snapshot.total_bid_liquidity + cob_snapshot.total_ask_liquidity
}
}
# Fallback to cached data
return self.latest_cob_data.get(symbol)
except Exception as e:
logger.debug(f"Error getting COB data: {e}")
@ -3757,34 +3995,40 @@ class CleanTradingDashboard:
logger.debug(f"Ignoring BTC signal: {symbol}")
return
# Convert orchestrator decision to dashboard format with FULL TIMESTAMP
# Convert orchestrator decision to dashboard format with ENHANCED PERSISTENCE
# Handle both TradingDecision objects and dictionary formats
now = datetime.now()
if hasattr(decision, 'action'):
# This is a TradingDecision object (dataclass)
dashboard_decision = {
'timestamp': now.strftime('%H:%M:%S'),
'full_timestamp': now, # Add full timestamp for chart persistence
'timestamp': now, # UNIFIED: Use datetime object directly throughout
'action': decision.action,
'confidence': decision.confidence,
'price': decision.price,
'symbol': getattr(decision, 'symbol', 'ETH/USDT'), # Add symbol field
'executed': True, # Orchestrator decisions are executed
'blocked': False,
'manual': False
'manual': False, # ML-generated trade
'source': 'ORCHESTRATOR', # Mark source for tracking
'persistent': True, # MARK for persistent display
'chart_priority': 'HIGH', # High priority for chart display
'model_generated': True # CRITICAL: Mark as ML-generated
}
else:
# This is a dictionary format
dashboard_decision = {
'timestamp': now.strftime('%H:%M:%S'),
'full_timestamp': now, # Add full timestamp for chart persistence
'timestamp': now, # UNIFIED: Use datetime object directly throughout
'action': decision.get('action', 'UNKNOWN'),
'confidence': decision.get('confidence', 0),
'price': decision.get('price', 0),
'symbol': decision.get('symbol', 'ETH/USDT'), # Add symbol field
'executed': True, # Orchestrator decisions are executed
'blocked': False,
'manual': False
'manual': False, # ML-generated trade
'source': 'ORCHESTRATOR', # Mark source for tracking
'persistent': True, # MARK for persistent display
'chart_priority': 'HIGH', # High priority for chart display
'model_generated': True # CRITICAL: Mark as ML-generated
}
# Only show ETH signals in dashboard
@ -3818,15 +4062,30 @@ class CleanTradingDashboard:
# HOLD signals or no trading executor
dashboard_decision['executed'] = True if action == 'HOLD' else False
# Add to recent decisions
# ENHANCED: Add to recent decisions with PRIORITY PRESERVATION for ML-generated signals
self.recent_decisions.append(dashboard_decision)
# Keep more decisions for longer history - extend to 200 decisions
if len(self.recent_decisions) > 200:
self.recent_decisions = self.recent_decisions[-200:]
# CONSERVATIVE: Keep MORE decisions for longer history - extend to 300 decisions
if len(self.recent_decisions) > 300:
# When trimming, PRESERVE ML-GENERATED TRADES and MANUAL TRADES at higher priority
manual_decisions = [d for d in self.recent_decisions if self._get_signal_attribute(d, 'manual', False)]
ml_decisions = [d for d in self.recent_decisions if self._get_signal_attribute(d, 'model_generated', False)]
other_decisions = [d for d in self.recent_decisions if not self._get_signal_attribute(d, 'manual', False) and not self._get_signal_attribute(d, 'model_generated', False)]
# Keep all manual + ML decisions + most recent other decisions
priority_decisions = manual_decisions + ml_decisions
max_other_decisions = 300 - len(priority_decisions)
if max_other_decisions > 0:
trimmed_decisions = priority_decisions + other_decisions[-max_other_decisions:]
else:
# If too many priority decisions, keep most recent ones
trimmed_decisions = priority_decisions[-300:]
self.recent_decisions = trimmed_decisions
logger.debug(f"Trimmed decisions: kept {len(manual_decisions)} manual + {len(ml_decisions)} ML + {len(trimmed_decisions) - len(priority_decisions)} other")
execution_status = "EXECUTED" if dashboard_decision['executed'] else "BLOCKED" if dashboard_decision.get('blocked') else "PENDING"
logger.info(f"[{execution_status}] ETH orchestrator signal: {dashboard_decision['action']} (conf: {dashboard_decision['confidence']:.2f})")
logger.info(f"[ML-{execution_status}] ETH orchestrator signal: {dashboard_decision['action']} (conf: {dashboard_decision['confidence']:.2f}) - Enhanced persistence")
else:
logger.debug(f"Non-ETH signal ignored: {dashboard_decision.get('symbol', 'UNKNOWN')}")