listen to all IPs
This commit is contained in:
@@ -3206,6 +3206,12 @@ class RealTrainingAdapter:
|
|||||||
# Include trend vector if available
|
# Include trend vector if available
|
||||||
if 'trend_vector' in outputs:
|
if 'trend_vector' in outputs:
|
||||||
result_dict['trend_vector'] = outputs['trend_vector']
|
result_dict['trend_vector'] = outputs['trend_vector']
|
||||||
|
|
||||||
|
# DEBUG: Log if we have predicted candles
|
||||||
|
if predicted_candles_denorm:
|
||||||
|
logger.info(f"🔮 Generated prediction with {len(predicted_candles_denorm)} timeframe candles: {list(predicted_candles_denorm.keys())}")
|
||||||
|
else:
|
||||||
|
logger.warning("⚠️ No predicted candles in model output!")
|
||||||
|
|
||||||
return result_dict
|
return result_dict
|
||||||
|
|
||||||
@@ -3993,6 +3999,7 @@ class RealTrainingAdapter:
|
|||||||
predicted_candle_clean[tf] = candle_data
|
predicted_candle_clean[tf] = candle_data
|
||||||
|
|
||||||
prediction_data['predicted_candle'] = predicted_candle_clean
|
prediction_data['predicted_candle'] = predicted_candle_clean
|
||||||
|
logger.info(f"📊 Storing prediction with ghost candles for {len(predicted_candle_clean)} timeframes: {list(predicted_candle_clean.keys())}")
|
||||||
|
|
||||||
# Use actual predicted price from candle close (ensure it's a Python float)
|
# Use actual predicted price from candle close (ensure it's a Python float)
|
||||||
predicted_price_val = None
|
predicted_price_val = None
|
||||||
@@ -4011,6 +4018,7 @@ class RealTrainingAdapter:
|
|||||||
prediction_data['price_change'] = 1.0 if prediction['action'] == 'BUY' else -1.0
|
prediction_data['price_change'] = 1.0 if prediction['action'] == 'BUY' else -1.0
|
||||||
else:
|
else:
|
||||||
# Fallback to estimated price if no candle prediction
|
# Fallback to estimated price if no candle prediction
|
||||||
|
logger.warning(f"!!! No predicted_candle in prediction object - ghost candles will not appear!")
|
||||||
prediction_data['predicted_price'] = prediction.get('predicted_price', current_price * (1.01 if prediction['action'] == 'BUY' else 0.99))
|
prediction_data['predicted_price'] = prediction.get('predicted_price', current_price * (1.01 if prediction['action'] == 'BUY' else 0.99))
|
||||||
prediction_data['price_change'] = 1.0 if prediction['action'] == 'BUY' else -1.0
|
prediction_data['price_change'] = 1.0 if prediction['action'] == 'BUY' else -1.0
|
||||||
|
|
||||||
|
|||||||
@@ -1384,6 +1384,61 @@ class AnnotationDashboard:
|
|||||||
'error': {'code': 'RECALC_ERROR', 'message': str(e)}
|
'error': {'code': 'RECALC_ERROR', 'message': str(e)}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@self.server.route('/api/chart-data', methods=['GET'])
|
||||||
|
def get_chart_data_get():
|
||||||
|
"""GET endpoint for chart data (used by initial chart load)"""
|
||||||
|
try:
|
||||||
|
symbol = request.args.get('symbol', 'ETH/USDT')
|
||||||
|
timeframe = request.args.get('timeframe', '1m')
|
||||||
|
limit = int(request.args.get('limit', 2500))
|
||||||
|
|
||||||
|
webui_logger.info(f"Chart data GET request: {symbol} {timeframe} limit={limit}")
|
||||||
|
|
||||||
|
if not self.data_loader:
|
||||||
|
return jsonify({
|
||||||
|
'success': False,
|
||||||
|
'error': {'code': 'DATA_LOADER_UNAVAILABLE', 'message': 'Data loader not available'}
|
||||||
|
})
|
||||||
|
|
||||||
|
# Fetch data using data loader
|
||||||
|
df = self.data_loader.get_data(
|
||||||
|
symbol=symbol,
|
||||||
|
timeframe=timeframe,
|
||||||
|
limit=limit,
|
||||||
|
direction='latest'
|
||||||
|
)
|
||||||
|
|
||||||
|
if df is not None and not df.empty:
|
||||||
|
webui_logger.info(f" {timeframe}: {len(df)} candles")
|
||||||
|
|
||||||
|
# Get pivot points
|
||||||
|
pivot_markers = {}
|
||||||
|
if len(df) >= 50:
|
||||||
|
pivot_markers = self._get_pivot_markers_for_timeframe(symbol, timeframe, df)
|
||||||
|
|
||||||
|
chart_data = {
|
||||||
|
timeframe: {
|
||||||
|
'timestamps': df.index.strftime('%Y-%m-%d %H:%M:%S').tolist(),
|
||||||
|
'open': df['open'].tolist(),
|
||||||
|
'high': df['high'].tolist(),
|
||||||
|
'low': df['low'].tolist(),
|
||||||
|
'close': df['close'].tolist(),
|
||||||
|
'volume': df['volume'].tolist(),
|
||||||
|
'pivot_markers': pivot_markers
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsonify({'success': True, 'data': chart_data})
|
||||||
|
else:
|
||||||
|
return jsonify({
|
||||||
|
'success': False,
|
||||||
|
'error': {'code': 'NO_DATA', 'message': f'No data available for {symbol} {timeframe}'}
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
webui_logger.error(f"Error in chart-data GET: {e}")
|
||||||
|
return jsonify({'success': False, 'error': {'code': 'ERROR', 'message': str(e)}})
|
||||||
|
|
||||||
@self.server.route('/api/chart-data', methods=['POST'])
|
@self.server.route('/api/chart-data', methods=['POST'])
|
||||||
def get_chart_data():
|
def get_chart_data():
|
||||||
"""Get chart data for specified symbol and timeframes with infinite scroll support"""
|
"""Get chart data for specified symbol and timeframes with infinite scroll support"""
|
||||||
@@ -2405,13 +2460,17 @@ class AnnotationDashboard:
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@self.server.route('/api/live-updates', methods=['POST'])
|
@self.server.route('/api/live-updates', methods=['GET', 'POST'])
|
||||||
def get_live_updates():
|
def get_live_updates():
|
||||||
"""Get live chart and prediction updates (polling endpoint)"""
|
"""Get live chart and prediction updates (polling endpoint)"""
|
||||||
try:
|
try:
|
||||||
data = request.get_json()
|
# Support both GET and POST
|
||||||
symbol = data.get('symbol', 'ETH/USDT')
|
if request.method == 'POST':
|
||||||
timeframe = data.get('timeframe', '1m')
|
data = request.get_json() or {}
|
||||||
|
else:
|
||||||
|
data = {}
|
||||||
|
symbol = data.get('symbol', request.args.get('symbol', 'ETH/USDT'))
|
||||||
|
timeframe = data.get('timeframe', request.args.get('timeframe', '1m'))
|
||||||
|
|
||||||
response = {
|
response = {
|
||||||
'success': True,
|
'success': True,
|
||||||
@@ -3243,9 +3302,11 @@ class AnnotationDashboard:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(f"Error clearing old cached predictions: {e}")
|
logger.debug(f"Error clearing old cached predictions: {e}")
|
||||||
|
|
||||||
def run(self, host='127.0.0.1', port=8051, debug=False):
|
def run(self, host='0.0.0.0', port=8051, debug=False):
|
||||||
"""Run the application"""
|
"""Run the application - binds to all interfaces by default"""
|
||||||
logger.info(f"Starting Annotation Dashboard on http://{host}:{port}")
|
logger.info(f"Starting Annotation Dashboard on http://{host}:{port}")
|
||||||
|
logger.info(f"Access locally at: http://localhost:{port}")
|
||||||
|
logger.info(f"Access from network at: http://<your-ip>:{port}")
|
||||||
|
|
||||||
if self.has_socketio:
|
if self.has_socketio:
|
||||||
logger.info("Running with WebSocket support (SocketIO)")
|
logger.info("Running with WebSocket support (SocketIO)")
|
||||||
|
|||||||
@@ -599,10 +599,11 @@ class AdvancedTradingTransformer(nn.Module):
|
|||||||
batched_tfs = stacked_tfs.reshape(batch_size * num_tfs, seq_len, self.config.d_model)
|
batched_tfs = stacked_tfs.reshape(batch_size * num_tfs, seq_len, self.config.d_model)
|
||||||
|
|
||||||
# Apply single cross-timeframe attention layer
|
# Apply single cross-timeframe attention layer
|
||||||
batched_tfs = self.cross_timeframe_layer(batched_tfs)
|
# Use new variable to avoid inplace modification issues
|
||||||
|
cross_tf_encoded = self.cross_timeframe_layer(batched_tfs)
|
||||||
|
|
||||||
# Reshape back: [batch*num_tfs, seq_len, d_model] -> [batch, num_tfs, seq_len, d_model]
|
# Reshape back: [batch*num_tfs, seq_len, d_model] -> [batch, num_tfs, seq_len, d_model]
|
||||||
cross_tf_output = batched_tfs.reshape(batch_size, num_tfs, seq_len, self.config.d_model)
|
cross_tf_output = cross_tf_encoded.reshape(batch_size, num_tfs, seq_len, self.config.d_model)
|
||||||
|
|
||||||
# Average across timeframes to get unified representation
|
# Average across timeframes to get unified representation
|
||||||
# [batch, num_tfs, seq_len, d_model] -> [batch, seq_len, d_model]
|
# [batch, num_tfs, seq_len, d_model] -> [batch, seq_len, d_model]
|
||||||
@@ -1346,6 +1347,10 @@ class TradingTransformerTrainer:
|
|||||||
for param in self.model.parameters():
|
for param in self.model.parameters():
|
||||||
if param.grad is not None:
|
if param.grad is not None:
|
||||||
param.grad = None
|
param.grad = None
|
||||||
|
|
||||||
|
# Clear CUDA cache to prevent tensor version conflicts
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
# OPTIMIZATION: Only move batch to device if not already there
|
# OPTIMIZATION: Only move batch to device if not already there
|
||||||
# Check if first tensor is already on correct device
|
# Check if first tensor is already on correct device
|
||||||
|
|||||||
Reference in New Issue
Block a user