try to fix chart udates - wip
This commit is contained in:
@@ -768,9 +768,8 @@ class AnnotationDashboard:
|
||||
# Backtest runner for replaying visible chart with predictions
|
||||
self.backtest_runner = BacktestRunner()
|
||||
|
||||
# Prediction cache for training: stores inference inputs/outputs to compare with actual candles
|
||||
# Format: {symbol: {timeframe: [{'timestamp': ts, 'inputs': {...}, 'outputs': {...}, 'norm_params': {...}}, ...]}}
|
||||
self.prediction_cache = {}
|
||||
# NOTE: Prediction caching is now handled by InferenceFrameReference system
|
||||
# See ANNOTATE/core/inference_training_system.py for the unified implementation
|
||||
|
||||
# Check if we should auto-load a model at startup
|
||||
auto_load_model = os.getenv('AUTO_LOAD_MODEL', 'Transformer') # Default: Transformer
|
||||
@@ -2636,6 +2635,7 @@ class AnnotationDashboard:
|
||||
|
||||
response = {
|
||||
'success': True,
|
||||
'server_time': datetime.now(timezone.utc).isoformat(), # Add server timestamp to detect stale data
|
||||
'chart_updates': {}, # Dict of timeframe -> chart_update
|
||||
'prediction': None # Single prediction for all timeframes
|
||||
}
|
||||
@@ -3445,34 +3445,8 @@ class AnnotationDashboard:
|
||||
elif '1s' in predicted_candles_denorm:
|
||||
predicted_price = predicted_candles_denorm['1s'][3]
|
||||
|
||||
# CACHE inference data for later training
|
||||
# Store inputs, outputs, and normalization params so we can train when actual candle arrives
|
||||
if symbol not in self.prediction_cache:
|
||||
self.prediction_cache[symbol] = {}
|
||||
if timeframe not in self.prediction_cache[symbol]:
|
||||
self.prediction_cache[symbol][timeframe] = []
|
||||
|
||||
# Store cached inference data (convert tensors to CPU for storage)
|
||||
cached_data = {
|
||||
'timestamp': timestamp,
|
||||
'symbol': symbol,
|
||||
'timeframe': timeframe,
|
||||
'model_inputs': {k: v.cpu().clone() if isinstance(v, torch.Tensor) else v
|
||||
for k, v in market_data.items()},
|
||||
'model_outputs': {k: v.cpu().clone() if isinstance(v, torch.Tensor) else v
|
||||
for k, v in outputs.items()},
|
||||
'normalization_params': norm_params,
|
||||
'predicted_candle': predicted_candles_denorm.get(timeframe),
|
||||
'prediction_steps': prediction_steps
|
||||
}
|
||||
|
||||
self.prediction_cache[symbol][timeframe].append(cached_data)
|
||||
|
||||
# Keep only last 100 predictions per symbol/timeframe to prevent memory bloat
|
||||
if len(self.prediction_cache[symbol][timeframe]) > 100:
|
||||
self.prediction_cache[symbol][timeframe] = self.prediction_cache[symbol][timeframe][-100:]
|
||||
|
||||
logger.debug(f"Cached prediction for {symbol} {timeframe} @ {timestamp.isoformat()}")
|
||||
# NOTE: Caching is now handled by InferenceFrameReference system in real_training_adapter
|
||||
# This provides more efficient reference-based storage without copying 600 candles
|
||||
|
||||
# Return prediction result (same format as before for compatibility)
|
||||
return {
|
||||
@@ -3492,69 +3466,8 @@ class AnnotationDashboard:
|
||||
logger.debug(traceback.format_exc())
|
||||
return None
|
||||
|
||||
def get_cached_predictions_for_training(self, symbol: str, timeframe: str, actual_candle_timestamp) -> List[Dict]:
|
||||
"""
|
||||
Retrieve cached predictions that match a specific candle timestamp for training
|
||||
|
||||
When an actual candle arrives, we can:
|
||||
1. Find cached predictions made before this candle
|
||||
2. Compare predicted vs actual candle values
|
||||
3. Calculate loss and do backpropagation
|
||||
|
||||
Args:
|
||||
symbol: Trading symbol
|
||||
timeframe: Timeframe
|
||||
actual_candle_timestamp: Timestamp of the actual candle that just arrived
|
||||
|
||||
Returns:
|
||||
List of cached prediction dicts that should be trained on
|
||||
"""
|
||||
try:
|
||||
if symbol not in self.prediction_cache:
|
||||
return []
|
||||
if timeframe not in self.prediction_cache[symbol]:
|
||||
return []
|
||||
|
||||
# Find predictions made before this candle timestamp
|
||||
# Predictions should be for candles that have now completed
|
||||
matching_predictions = []
|
||||
actual_time = actual_candle_timestamp if isinstance(actual_candle_timestamp, datetime) else datetime.fromisoformat(str(actual_candle_timestamp).replace('Z', '+00:00'))
|
||||
|
||||
for cached_pred in self.prediction_cache[symbol][timeframe]:
|
||||
pred_time = cached_pred['timestamp']
|
||||
if isinstance(pred_time, str):
|
||||
pred_time = datetime.fromisoformat(pred_time.replace('Z', '+00:00'))
|
||||
|
||||
# Prediction should be for a candle that comes after the prediction time
|
||||
# We match predictions that were made before the actual candle closed
|
||||
if pred_time < actual_time:
|
||||
matching_predictions.append(cached_pred)
|
||||
|
||||
return matching_predictions
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting cached predictions for training: {e}")
|
||||
return []
|
||||
|
||||
def clear_old_cached_predictions(self, symbol: str, timeframe: str, before_timestamp: datetime):
|
||||
"""
|
||||
Clear cached predictions older than a certain timestamp
|
||||
|
||||
Useful for cleaning up old predictions that are no longer needed
|
||||
"""
|
||||
try:
|
||||
if symbol not in self.prediction_cache:
|
||||
return
|
||||
if timeframe not in self.prediction_cache[symbol]:
|
||||
return
|
||||
|
||||
self.prediction_cache[symbol][timeframe] = [
|
||||
pred for pred in self.prediction_cache[symbol][timeframe]
|
||||
if pred['timestamp'] >= before_timestamp
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error clearing old cached predictions: {e}")
|
||||
# REMOVED: Unused prediction caching methods
|
||||
# Now using InferenceFrameReference system for unified prediction storage and training
|
||||
|
||||
def run(self, host='0.0.0.0', port=8051, debug=False):
|
||||
"""Run the application - binds to all interfaces by default"""
|
||||
|
||||
Reference in New Issue
Block a user