fix emojies
This commit is contained in:
@@ -1962,6 +1962,10 @@ class CleanTradingDashboard:
|
||||
def update_price_chart(n, pivots_value, relayout_data):
|
||||
"""Update price chart every second, persisting user zoom/pan"""
|
||||
try:
|
||||
# Validate and train on predictions every update (once per second)
|
||||
# This checks if any predictions can be validated against real candles
|
||||
self._validate_and_train_on_predictions('ETH/USDT')
|
||||
|
||||
show_pivots = bool(pivots_value and 'enabled' in pivots_value)
|
||||
fig, legend_children = self._create_price_chart('ETH/USDT', show_pivots=show_pivots, return_legend=True)
|
||||
|
||||
@@ -4061,6 +4065,107 @@ class CleanTradingDashboard:
|
||||
logger.debug(f"Error getting CNN predictions: {e}")
|
||||
return []
|
||||
|
||||
def _get_live_transformer_prediction_with_next_candles(self, symbol: str = 'ETH/USDT') -> Optional[Dict]:
|
||||
"""
|
||||
Get live transformer prediction including next_candles for ghost candle display
|
||||
This makes a real-time prediction with the transformer model
|
||||
"""
|
||||
try:
|
||||
if not self.orchestrator or not hasattr(self.orchestrator, 'primary_transformer') or not self.orchestrator.primary_transformer:
|
||||
return None
|
||||
|
||||
transformer = self.orchestrator.primary_transformer
|
||||
transformer.eval()
|
||||
|
||||
# Get recent market data for all timeframes
|
||||
price_data_1s = self.data_provider.get_ohlcv(symbol, '1s', limit=200)
|
||||
price_data_1m = self.data_provider.get_ohlcv(symbol, '1m', limit=150)
|
||||
price_data_1h = self.data_provider.get_ohlcv(symbol, '1h', limit=24)
|
||||
price_data_1d = self.data_provider.get_ohlcv(symbol, '1d', limit=14)
|
||||
btc_data_1m = self.data_provider.get_ohlcv('BTC/USDT', '1m', limit=150)
|
||||
|
||||
if not price_data_1m or len(price_data_1m) < 10:
|
||||
return None
|
||||
|
||||
# Convert to tensors (simplified - you may need proper normalization)
|
||||
import torch
|
||||
device = next(transformer.parameters()).device
|
||||
|
||||
def ohlcv_to_tensor(data, limit=None):
|
||||
if not data:
|
||||
return None
|
||||
data = data[-limit:] if limit and len(data) > limit else data
|
||||
arr = np.array([[d['open'], d['high'], d['low'], d['close'], d['volume']] for d in data], dtype=np.float32)
|
||||
return torch.from_numpy(arr).unsqueeze(0).to(device) # Add batch dim
|
||||
|
||||
# Create input tensors
|
||||
inputs = {
|
||||
'price_data_1s': ohlcv_to_tensor(price_data_1s, 200),
|
||||
'price_data_1m': ohlcv_to_tensor(price_data_1m, 150),
|
||||
'price_data_1h': ohlcv_to_tensor(price_data_1h, 24),
|
||||
'price_data_1d': ohlcv_to_tensor(price_data_1d, 14),
|
||||
'btc_data_1m': ohlcv_to_tensor(btc_data_1m, 150)
|
||||
}
|
||||
|
||||
# Forward pass
|
||||
with torch.no_grad():
|
||||
outputs = transformer(**inputs)
|
||||
|
||||
# Extract next_candles predictions
|
||||
next_candles = outputs.get('next_candles', {})
|
||||
if not next_candles:
|
||||
return None
|
||||
|
||||
# Convert tensors to lists for JSON serialization
|
||||
predicted_candles = {}
|
||||
for tf, candle_tensor in next_candles.items():
|
||||
if candle_tensor is not None:
|
||||
# candle_tensor shape: [batch, 5] where 5 is [O, H, L, C, V]
|
||||
candle_values = candle_tensor.squeeze(0).cpu().numpy().tolist()
|
||||
predicted_candles[tf] = candle_values
|
||||
|
||||
# Get current price for action determination
|
||||
current_price = price_data_1m[-1]['close']
|
||||
predicted_1m_close = predicted_candles.get('1m', [0,0,0,current_price,0])[3]
|
||||
|
||||
# Determine action based on price change
|
||||
price_change = (predicted_1m_close - current_price) / current_price
|
||||
if price_change > 0.001:
|
||||
action = 'BUY'
|
||||
elif price_change < -0.001:
|
||||
action = 'SELL'
|
||||
else:
|
||||
action = 'HOLD'
|
||||
|
||||
# Get confidence from outputs if available
|
||||
confidence = 0.7 # Default confidence
|
||||
if 'confidence' in outputs:
|
||||
conf_tensor = outputs['confidence']
|
||||
confidence = float(conf_tensor.squeeze(0).cpu().numpy()[0])
|
||||
|
||||
prediction = {
|
||||
'timestamp': datetime.now(),
|
||||
'symbol': symbol,
|
||||
'action': action,
|
||||
'confidence': confidence,
|
||||
'predicted_price': predicted_1m_close,
|
||||
'current_price': current_price,
|
||||
'price_change': price_change,
|
||||
'next_candles': predicted_candles, # This is what the frontend needs!
|
||||
'type': 'transformer_prediction'
|
||||
}
|
||||
|
||||
# Store prediction for tracking
|
||||
self.orchestrator.store_transformer_prediction(symbol, prediction)
|
||||
|
||||
logger.debug(f"Generated transformer prediction with next_candles for {len(predicted_candles)} timeframes")
|
||||
|
||||
return prediction
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting live transformer prediction: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
def _get_recent_transformer_predictions(self, symbol: str) -> List[Dict]:
|
||||
"""Get recent Transformer predictions from orchestrator"""
|
||||
try:
|
||||
@@ -4109,6 +4214,217 @@ class CleanTradingDashboard:
|
||||
logger.debug(f"Error getting prediction accuracy history: {e}")
|
||||
return []
|
||||
|
||||
def _validate_and_train_on_predictions(self, symbol: str = 'ETH/USDT'):
|
||||
"""
|
||||
Validate pending predictions against real candles and train on them
|
||||
This is called periodically to check if predictions can be validated
|
||||
"""
|
||||
try:
|
||||
if not self.orchestrator or not hasattr(self.orchestrator, 'recent_transformer_predictions'):
|
||||
return
|
||||
|
||||
# Get recent predictions for this symbol
|
||||
predictions = self.orchestrator.recent_transformer_predictions.get(symbol, [])
|
||||
if not predictions:
|
||||
return
|
||||
|
||||
for prediction in list(predictions):
|
||||
# Skip if already validated
|
||||
if prediction.get('validated', False):
|
||||
continue
|
||||
|
||||
# Check if prediction has next_candles
|
||||
next_candles = prediction.get('next_candles', {})
|
||||
if not next_candles:
|
||||
continue
|
||||
|
||||
pred_timestamp = prediction.get('timestamp')
|
||||
if not pred_timestamp:
|
||||
continue
|
||||
|
||||
# Check each timeframe
|
||||
for timeframe, predicted_ohlcv in next_candles.items():
|
||||
try:
|
||||
# Calculate when this prediction should be validated
|
||||
# For '1s' prediction, validate after 1 second
|
||||
# For '1m' prediction, validate after 60 seconds
|
||||
validation_delay = {'1s': 1, '1m': 60, '1h': 3600, '1d': 86400}.get(timeframe, 60)
|
||||
|
||||
# Check if enough time has passed
|
||||
current_time = datetime.now()
|
||||
if not isinstance(pred_timestamp, datetime):
|
||||
pred_timestamp = pd.to_datetime(pred_timestamp)
|
||||
|
||||
time_elapsed = (current_time - pred_timestamp).total_seconds()
|
||||
if time_elapsed < validation_delay:
|
||||
continue # Not ready to validate yet
|
||||
|
||||
# Get the actual candle at the predicted time
|
||||
target_time = pred_timestamp + timedelta(seconds=validation_delay)
|
||||
actual_candles = self.data_provider.get_ohlcv(symbol, timeframe, limit=10)
|
||||
|
||||
if not actual_candles:
|
||||
continue
|
||||
|
||||
# Find the candle closest to target_time
|
||||
actual_candle = None
|
||||
for candle in actual_candles:
|
||||
candle_time = candle.get('timestamp') or candle.get('time')
|
||||
if not candle_time:
|
||||
continue
|
||||
if not isinstance(candle_time, datetime):
|
||||
candle_time = pd.to_datetime(candle_time)
|
||||
|
||||
# Check if this is the target candle (within 1 second tolerance)
|
||||
if abs((candle_time - target_time).total_seconds()) < 1:
|
||||
actual_candle = candle
|
||||
break
|
||||
|
||||
if not actual_candle:
|
||||
continue # Actual candle not available yet
|
||||
|
||||
# Extract actual OHLCV
|
||||
actual_ohlcv = [
|
||||
actual_candle['open'],
|
||||
actual_candle['high'],
|
||||
actual_candle['low'],
|
||||
actual_candle['close'],
|
||||
actual_candle.get('volume', 0)
|
||||
]
|
||||
|
||||
# Calculate accuracy
|
||||
errors = {
|
||||
'open': abs(predicted_ohlcv[0] - actual_ohlcv[0]),
|
||||
'high': abs(predicted_ohlcv[1] - actual_ohlcv[1]),
|
||||
'low': abs(predicted_ohlcv[2] - actual_ohlcv[2]),
|
||||
'close': abs(predicted_ohlcv[3] - actual_ohlcv[3]),
|
||||
'volume': abs(predicted_ohlcv[4] - actual_ohlcv[4])
|
||||
}
|
||||
|
||||
pct_errors = {
|
||||
'open': (errors['open'] / actual_ohlcv[0]) * 100 if actual_ohlcv[0] > 0 else 0,
|
||||
'high': (errors['high'] / actual_ohlcv[1]) * 100 if actual_ohlcv[1] > 0 else 0,
|
||||
'low': (errors['low'] / actual_ohlcv[2]) * 100 if actual_ohlcv[2] > 0 else 0,
|
||||
'close': (errors['close'] / actual_ohlcv[3]) * 100 if actual_ohlcv[3] > 0 else 0,
|
||||
}
|
||||
|
||||
avg_pct_error = (pct_errors['open'] + pct_errors['high'] + pct_errors['low'] + pct_errors['close']) / 4
|
||||
accuracy = max(0, 100 - avg_pct_error)
|
||||
|
||||
# Check direction correctness
|
||||
pred_direction = 'up' if predicted_ohlcv[3] >= predicted_ohlcv[0] else 'down'
|
||||
actual_direction = 'up' if actual_ohlcv[3] >= actual_ohlcv[0] else 'down'
|
||||
direction_correct = pred_direction == actual_direction
|
||||
|
||||
logger.info(f"Validated {timeframe} prediction: accuracy={accuracy:.1f}%, direction_correct={direction_correct}")
|
||||
|
||||
# Train on this validated prediction
|
||||
self._train_transformer_on_validated_prediction(
|
||||
symbol=symbol,
|
||||
timeframe=timeframe,
|
||||
predicted_ohlcv=predicted_ohlcv,
|
||||
actual_ohlcv=actual_ohlcv,
|
||||
accuracy=accuracy,
|
||||
direction_correct=direction_correct
|
||||
)
|
||||
|
||||
# Mark prediction as validated
|
||||
prediction['validated'] = True
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error validating {timeframe} prediction: {e}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in _validate_and_train_on_predictions: {e}", exc_info=True)
|
||||
|
||||
def _train_transformer_on_validated_prediction(self, symbol: str, timeframe: str,
|
||||
predicted_ohlcv: list, actual_ohlcv: list,
|
||||
accuracy: float, direction_correct: bool):
|
||||
"""
|
||||
Train transformer on validated prediction using backpropagation
|
||||
This implements online learning from prediction errors
|
||||
"""
|
||||
try:
|
||||
if not self.orchestrator or not hasattr(self.orchestrator, 'primary_transformer'):
|
||||
return
|
||||
|
||||
transformer = self.orchestrator.primary_transformer
|
||||
if not HAS_TORCH:
|
||||
return
|
||||
|
||||
# Calculate sample weight based on accuracy
|
||||
# Low accuracy = higher weight (learn more from mistakes)
|
||||
if accuracy < 50:
|
||||
sample_weight = 3.0
|
||||
elif accuracy < 70:
|
||||
sample_weight = 2.0
|
||||
elif accuracy < 85:
|
||||
sample_weight = 1.0
|
||||
else:
|
||||
sample_weight = 0.5
|
||||
|
||||
if not direction_correct:
|
||||
sample_weight *= 1.5 # Wrong direction is critical
|
||||
|
||||
logger.info(f"[{timeframe}] Training on validated prediction: accuracy={accuracy:.1f}%, weight={sample_weight:.1f}x")
|
||||
|
||||
# Get market state for training
|
||||
market_data_1m = self.data_provider.get_ohlcv(symbol, '1m', limit=150)
|
||||
if not market_data_1m or len(market_data_1m) < 10:
|
||||
return
|
||||
|
||||
# Prepare training batch
|
||||
device = next(transformer.parameters()).device
|
||||
transformer.train()
|
||||
|
||||
def ohlcv_to_tensor(data, limit=None):
|
||||
if not data:
|
||||
return None
|
||||
data = data[-limit:] if limit and len(data) > limit else data
|
||||
arr = np.array([[d['open'], d['high'], d['low'], d['close'], d['volume']] for d in data], dtype=np.float32)
|
||||
return torch.from_numpy(arr).unsqueeze(0).to(device)
|
||||
|
||||
# Create input tensors
|
||||
price_data_1s = self.data_provider.get_ohlcv(symbol, '1s', limit=200)
|
||||
price_data_1h = self.data_provider.get_ohlcv(symbol, '1h', limit=24)
|
||||
price_data_1d = self.data_provider.get_ohlcv(symbol, '1d', limit=14)
|
||||
btc_data_1m = self.data_provider.get_ohlcv('BTC/USDT', '1m', limit=150)
|
||||
|
||||
inputs = {
|
||||
'price_data_1s': ohlcv_to_tensor(price_data_1s, 200),
|
||||
'price_data_1m': ohlcv_to_tensor(market_data_1m, 150),
|
||||
'price_data_1h': ohlcv_to_tensor(price_data_1h, 24),
|
||||
'price_data_1d': ohlcv_to_tensor(price_data_1d, 14),
|
||||
'btc_data_1m': ohlcv_to_tensor(btc_data_1m, 150)
|
||||
}
|
||||
|
||||
# Forward pass
|
||||
outputs = transformer(**inputs)
|
||||
|
||||
# Get predicted candle for this timeframe
|
||||
next_candles = outputs.get('next_candles', {})
|
||||
if timeframe not in next_candles:
|
||||
return
|
||||
|
||||
pred_tensor = next_candles[timeframe] # [batch, 5]
|
||||
actual_tensor = torch.tensor([actual_ohlcv], dtype=torch.float32, device=device) # [batch, 5]
|
||||
|
||||
# Calculate loss
|
||||
criterion = torch.nn.MSELoss()
|
||||
loss = criterion(pred_tensor, actual_tensor) * sample_weight
|
||||
|
||||
# Backpropagation
|
||||
optimizer = torch.optim.Adam(transformer.parameters(), lr=0.0001)
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
logger.info(f"[{timeframe}] Backpropagation complete: loss={loss.item():.6f}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error training on validated prediction: {e}", exc_info=True)
|
||||
|
||||
def _add_signals_to_mini_chart(self, fig: go.Figure, symbol: str, ws_data_1s: pd.DataFrame, row: int = 2):
|
||||
"""Add signals to the 1s mini chart - LIMITED TO PRICE DATA TIME RANGE"""
|
||||
try:
|
||||
@@ -6247,20 +6563,23 @@ class CleanTradingDashboard:
|
||||
'type': 'cnn_pivot'
|
||||
}
|
||||
|
||||
# Get latest Transformer prediction
|
||||
# Get latest Transformer prediction with next_candles for ghost candle display
|
||||
if self.orchestrator and hasattr(self.orchestrator, 'primary_transformer'):
|
||||
try:
|
||||
if hasattr(self.orchestrator, 'get_latest_transformer_prediction'):
|
||||
transformer_pred = self.orchestrator.get_latest_transformer_prediction()
|
||||
if transformer_pred:
|
||||
latest_predictions['transformer'] = {
|
||||
'timestamp': transformer_pred.get('timestamp', datetime.now()),
|
||||
'action': transformer_pred.get('action', 'PRICE_PREDICTION'),
|
||||
'confidence': transformer_pred.get('confidence', 0),
|
||||
'predicted_price': transformer_pred.get('predicted_price', 0),
|
||||
'price_change': transformer_pred.get('price_change', 0),
|
||||
'type': 'transformer_prediction'
|
||||
}
|
||||
# Get live prediction with next_candles
|
||||
transformer_pred = self._get_live_transformer_prediction_with_next_candles('ETH/USDT')
|
||||
if transformer_pred:
|
||||
latest_predictions['transformer'] = {
|
||||
'timestamp': transformer_pred.get('timestamp', datetime.now()),
|
||||
'action': transformer_pred.get('action', 'PRICE_PREDICTION'),
|
||||
'confidence': transformer_pred.get('confidence', 0),
|
||||
'predicted_price': transformer_pred.get('predicted_price', 0),
|
||||
'price_change': transformer_pred.get('price_change', 0),
|
||||
'type': 'transformer_prediction',
|
||||
# Add predicted_candle data for ghost candle display
|
||||
'predicted_candle': transformer_pred.get('next_candles', {})
|
||||
}
|
||||
logger.debug(f"Sent transformer prediction with {len(transformer_pred.get('next_candles', {}))} timeframe candles to frontend")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting transformer prediction: {e}")
|
||||
|
||||
@@ -8036,13 +8355,13 @@ class CleanTradingDashboard:
|
||||
logger.info("=" * 60)
|
||||
logger.info(" SESSION CLEAR COMPLETED SUCCESSFULLY")
|
||||
logger.info("=" * 60)
|
||||
logger.info("📊 Session P&L reset to $0.00")
|
||||
logger.info("📈 All positions closed")
|
||||
logger.info("📋 Trade history cleared")
|
||||
logger.info("🎯 Success rate calculations reset")
|
||||
logger.info("📈 Model performance metrics reset")
|
||||
logger.info("Session P&L reset to $0.00")
|
||||
logger.info("All positions closed")
|
||||
logger.info("Trade history cleared")
|
||||
logger.info("Success rate calculations reset")
|
||||
logger.info("Model performance metrics reset")
|
||||
logger.info(" All caches cleared")
|
||||
logger.info("📁 Trade log files cleared")
|
||||
logger.info("Trade log files cleared")
|
||||
logger.info("=" * 60)
|
||||
|
||||
except Exception as e:
|
||||
@@ -8277,7 +8596,7 @@ class CleanTradingDashboard:
|
||||
self.trading_executor._last_stats_update = None
|
||||
|
||||
logger.info(" Trading executor state cleared completely")
|
||||
logger.info("📊 Success rate calculations will start fresh")
|
||||
logger.info("Success rate calculations will start fresh")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing trading executor state: {e}")
|
||||
@@ -8319,13 +8638,13 @@ class CleanTradingDashboard:
|
||||
try:
|
||||
# Store Decision Fusion model
|
||||
if hasattr(self.orchestrator, 'decision_fusion_network') and self.orchestrator.decision_fusion_network:
|
||||
logger.info("💾 Storing Decision Fusion model...")
|
||||
logger.info("Storing Decision Fusion model...")
|
||||
# Add storage logic here
|
||||
except Exception as e:
|
||||
logger.warning(f" Failed to store Decision Fusion model: {e}")
|
||||
|
||||
# 5. Verification Step - Try to load checkpoints to verify they work
|
||||
logger.info("🔍 Verifying stored checkpoints...")
|
||||
logger.info("Verifying stored checkpoints...")
|
||||
|
||||
for model_name, checkpoint_path in stored_models:
|
||||
try:
|
||||
@@ -8379,7 +8698,7 @@ class CleanTradingDashboard:
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
logger.info(f"📋 Stored session metadata: {metadata_path}")
|
||||
logger.info(f"Stored session metadata: {metadata_path}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to store metadata: {e}")
|
||||
@@ -8388,7 +8707,7 @@ class CleanTradingDashboard:
|
||||
if hasattr(self.orchestrator, '_save_ui_state'):
|
||||
try:
|
||||
self.orchestrator._save_ui_state()
|
||||
logger.info("💾 Saved orchestrator UI state")
|
||||
logger.info("Saved orchestrator UI state")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to save UI state: {e}")
|
||||
|
||||
@@ -8397,10 +8716,10 @@ class CleanTradingDashboard:
|
||||
successful_verifications = len([r for r in verification_results if r[1]])
|
||||
|
||||
if stored_models:
|
||||
logger.info(f"📊 STORAGE SUMMARY:")
|
||||
logger.info(f"STORAGE SUMMARY:")
|
||||
logger.info(f" Models stored: {successful_stores}")
|
||||
logger.info(f" Verifications passed: {successful_verifications}/{len(verification_results)}")
|
||||
logger.info(f" 📋 Models: {[name for name, _ in stored_models]}")
|
||||
logger.info(f" Models: {[name for name, _ in stored_models]}")
|
||||
|
||||
# Update button display with success info
|
||||
return True
|
||||
@@ -9290,7 +9609,7 @@ class CleanTradingDashboard:
|
||||
self.cob_cache[symbol]['websocket_status'] = websocket_status
|
||||
self.cob_cache[symbol]['source'] = source
|
||||
|
||||
logger.debug(f"📊 Enhanced COB update for {symbol}: {websocket_status} via {source}")
|
||||
logger.debug(f"Enhanced COB update for {symbol}: {websocket_status} via {source}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f" Error handling enhanced COB update for {symbol}: {e}")
|
||||
|
||||
Reference in New Issue
Block a user