Merge branch 'cleanup' of https://git.d-popov.com/popov/gogo2 into cleanup
This commit is contained in:
@@ -16,7 +16,7 @@ sys.path.insert(0, str(parent_dir))
|
||||
from flask import Flask, render_template, request, jsonify, send_file
|
||||
from dash import Dash, html
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional, Dict, List, Any
|
||||
import json
|
||||
import pandas as pd
|
||||
@@ -538,6 +538,9 @@ class AnnotationDashboard:
|
||||
engineio_logger=False
|
||||
)
|
||||
self.has_socketio = True
|
||||
# Pass socketio to training adapter for live trade updates
|
||||
if self.training_adapter:
|
||||
self.training_adapter.socketio = self.socketio
|
||||
logger.info("SocketIO initialized for real-time updates")
|
||||
except ImportError:
|
||||
self.socketio = None
|
||||
@@ -586,6 +589,8 @@ class AnnotationDashboard:
|
||||
self.annotation_manager = AnnotationManager()
|
||||
# Use REAL training adapter - NO SIMULATION!
|
||||
self.training_adapter = RealTrainingAdapter(None, self.data_provider)
|
||||
# Pass socketio to training adapter for live trade updates
|
||||
self.training_adapter.socketio = None # Will be set after socketio initialization
|
||||
# Backtest runner for replaying visible chart with predictions
|
||||
self.backtest_runner = BacktestRunner()
|
||||
|
||||
@@ -626,63 +631,38 @@ class AnnotationDashboard:
|
||||
if not self.orchestrator:
|
||||
logger.info("Initializing TradingOrchestrator...")
|
||||
self.orchestrator = TradingOrchestrator(
|
||||
data_provider=self.data_provider,
|
||||
config=self.config
|
||||
data_provider=self.data_provider
|
||||
)
|
||||
self.training_adapter.orchestrator = self.orchestrator
|
||||
logger.info("TradingOrchestrator initialized")
|
||||
|
||||
# Get checkpoint info before loading
|
||||
checkpoint_info = self._get_best_checkpoint_info(model_name)
|
||||
|
||||
# Load the specific model
|
||||
# Check if the specific model is already initialized
|
||||
if model_name == 'Transformer':
|
||||
logger.info("Loading Transformer model...")
|
||||
self.orchestrator.load_transformer_model()
|
||||
self.loaded_models['Transformer'] = self.orchestrator.primary_transformer_trainer
|
||||
|
||||
# Store checkpoint info in orchestrator for UI access
|
||||
if checkpoint_info:
|
||||
self.orchestrator.transformer_checkpoint_info = {
|
||||
'status': 'loaded',
|
||||
'filename': checkpoint_info.get('filename', 'unknown'),
|
||||
'epoch': checkpoint_info.get('epoch', 0),
|
||||
'loss': checkpoint_info.get('loss', 0.0),
|
||||
'accuracy': checkpoint_info.get('accuracy', 0.0),
|
||||
'loaded_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
}
|
||||
|
||||
logger.info("Transformer model loaded successfully")
|
||||
logger.info("Checking Transformer model...")
|
||||
if self.orchestrator.primary_transformer:
|
||||
self.loaded_models['Transformer'] = self.orchestrator.primary_transformer
|
||||
logger.info("Transformer model loaded successfully")
|
||||
else:
|
||||
logger.warning("Transformer model not initialized in orchestrator")
|
||||
return
|
||||
|
||||
elif model_name == 'CNN':
|
||||
logger.info("Loading CNN model...")
|
||||
self.orchestrator.load_cnn_model()
|
||||
self.loaded_models['CNN'] = self.orchestrator.cnn_model
|
||||
|
||||
# Store checkpoint info
|
||||
if checkpoint_info:
|
||||
self.orchestrator.cnn_checkpoint_info = {
|
||||
'status': 'loaded',
|
||||
'filename': checkpoint_info.get('filename', 'unknown'),
|
||||
'loaded_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
}
|
||||
|
||||
logger.info("CNN model loaded successfully")
|
||||
logger.info("Checking CNN model...")
|
||||
if self.orchestrator.cnn_model:
|
||||
self.loaded_models['CNN'] = self.orchestrator.cnn_model
|
||||
logger.info("CNN model loaded successfully")
|
||||
else:
|
||||
logger.warning("CNN model not initialized in orchestrator")
|
||||
return
|
||||
|
||||
elif model_name == 'DQN':
|
||||
logger.info("Loading DQN model...")
|
||||
self.orchestrator.load_dqn_model()
|
||||
self.loaded_models['DQN'] = self.orchestrator.dqn_agent
|
||||
|
||||
# Store checkpoint info
|
||||
if checkpoint_info:
|
||||
self.orchestrator.dqn_checkpoint_info = {
|
||||
'status': 'loaded',
|
||||
'filename': checkpoint_info.get('filename', 'unknown'),
|
||||
'loaded_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
}
|
||||
|
||||
logger.info("DQN model loaded successfully")
|
||||
logger.info("Checking DQN model...")
|
||||
if self.orchestrator.rl_agent:
|
||||
self.loaded_models['DQN'] = self.orchestrator.rl_agent
|
||||
logger.info("DQN model loaded successfully")
|
||||
else:
|
||||
logger.warning("DQN model not initialized in orchestrator")
|
||||
return
|
||||
|
||||
else:
|
||||
logger.warning(f"Unknown model name: {model_name}")
|
||||
@@ -1741,6 +1721,9 @@ class AnnotationDashboard:
|
||||
# CRITICAL: Get current symbol to filter annotations
|
||||
current_symbol = data.get('symbol', 'ETH/USDT')
|
||||
|
||||
# Get primary timeframe for display (optional)
|
||||
timeframe = data.get('timeframe', '1m')
|
||||
|
||||
# If no specific annotations provided, use all for current symbol
|
||||
if not annotation_ids:
|
||||
annotations = self.annotation_manager.get_annotations(symbol=current_symbol)
|
||||
@@ -1769,12 +1752,14 @@ class AnnotationDashboard:
|
||||
}
|
||||
})
|
||||
|
||||
logger.info(f"Starting REAL training with {len(test_cases)} test cases for model {model_name}")
|
||||
logger.info(f"Starting REAL training with {len(test_cases)} test cases ({len(annotation_ids)} annotations) for model {model_name} on {timeframe}")
|
||||
|
||||
# Start REAL training (NO SIMULATION!)
|
||||
training_id = self.training_adapter.start_training(
|
||||
model_name=model_name,
|
||||
test_cases=test_cases
|
||||
test_cases=test_cases,
|
||||
annotation_count=len(annotation_ids),
|
||||
timeframe=timeframe
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
@@ -2392,6 +2377,55 @@ class AnnotationDashboard:
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling prediction request: {e}")
|
||||
emit('prediction_error', {'error': str(e)})
|
||||
|
||||
@self.socketio.on('prediction_accuracy')
|
||||
def handle_prediction_accuracy(data):
|
||||
"""
|
||||
Handle validated prediction accuracy - trigger incremental training
|
||||
|
||||
This is called when frontend validates a prediction against actual candle.
|
||||
We use this data to incrementally train the model for continuous improvement.
|
||||
"""
|
||||
from flask_socketio import emit
|
||||
try:
|
||||
timeframe = data.get('timeframe')
|
||||
timestamp = data.get('timestamp')
|
||||
predicted = data.get('predicted') # [O, H, L, C, V]
|
||||
actual = data.get('actual') # [O, H, L, C]
|
||||
errors = data.get('errors') # {open, high, low, close}
|
||||
pct_errors = data.get('pctErrors')
|
||||
direction_correct = data.get('directionCorrect')
|
||||
accuracy = data.get('accuracy')
|
||||
|
||||
if not all([timeframe, timestamp, predicted, actual]):
|
||||
logger.warning("Incomplete prediction accuracy data received")
|
||||
return
|
||||
|
||||
logger.info(f"[{timeframe}] Prediction validated: {accuracy:.1f}% accuracy, direction: {direction_correct}")
|
||||
logger.debug(f" Errors: O={pct_errors['open']:.2f}% H={pct_errors['high']:.2f}% L={pct_errors['low']:.2f}% C={pct_errors['close']:.2f}%")
|
||||
|
||||
# Trigger incremental training on this validated prediction
|
||||
self._train_on_validated_prediction(
|
||||
timeframe=timeframe,
|
||||
timestamp=timestamp,
|
||||
predicted=predicted,
|
||||
actual=actual,
|
||||
errors=errors,
|
||||
direction_correct=direction_correct,
|
||||
accuracy=accuracy
|
||||
)
|
||||
|
||||
# Send confirmation back to frontend
|
||||
emit('training_update', {
|
||||
'status': 'training_triggered',
|
||||
'timestamp': timestamp,
|
||||
'accuracy': accuracy,
|
||||
'message': f'Incremental training triggered on validated prediction'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling prediction accuracy: {e}", exc_info=True)
|
||||
emit('training_error', {'error': str(e)})
|
||||
|
||||
def _start_live_update_thread(self):
|
||||
"""Start background thread for live updates"""
|
||||
@@ -2415,24 +2449,44 @@ class AnnotationDashboard:
|
||||
for timeframe in ['1s', '1m']:
|
||||
room = f"{symbol}_{timeframe}"
|
||||
|
||||
# Get latest candle
|
||||
# Get latest candles (need last 2 to determine confirmation status)
|
||||
try:
|
||||
candles = self.data_provider.get_ohlcv(symbol, timeframe, limit=1)
|
||||
candles = self.data_provider.get_ohlcv(symbol, timeframe, limit=2)
|
||||
if candles and len(candles) > 0:
|
||||
latest_candle = candles[-1]
|
||||
|
||||
# Emit chart update
|
||||
# Determine if candle is confirmed (closed)
|
||||
# For 1s: candle is confirmed when next candle starts (2s delay)
|
||||
# For others: candle is confirmed when next candle starts
|
||||
is_confirmed = len(candles) >= 2 # If we have 2 candles, the first is confirmed
|
||||
|
||||
# Format timestamp consistently
|
||||
timestamp = latest_candle.get('timestamp')
|
||||
if isinstance(timestamp, str):
|
||||
# Already formatted
|
||||
formatted_timestamp = timestamp
|
||||
else:
|
||||
# Convert to ISO string then format
|
||||
from datetime import datetime
|
||||
if isinstance(timestamp, datetime):
|
||||
formatted_timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
||||
else:
|
||||
formatted_timestamp = str(timestamp)
|
||||
|
||||
# Emit chart update with full candle data
|
||||
self.socketio.emit('chart_update', {
|
||||
'symbol': symbol,
|
||||
'timeframe': timeframe,
|
||||
'candle': {
|
||||
'timestamp': latest_candle.get('timestamp'),
|
||||
'open': latest_candle.get('open'),
|
||||
'high': latest_candle.get('high'),
|
||||
'low': latest_candle.get('low'),
|
||||
'close': latest_candle.get('close'),
|
||||
'volume': latest_candle.get('volume')
|
||||
}
|
||||
'timestamp': formatted_timestamp,
|
||||
'open': float(latest_candle.get('open', 0)),
|
||||
'high': float(latest_candle.get('high', 0)),
|
||||
'low': float(latest_candle.get('low', 0)),
|
||||
'close': float(latest_candle.get('close', 0)),
|
||||
'volume': float(latest_candle.get('volume', 0))
|
||||
},
|
||||
'is_confirmed': is_confirmed, # True if this candle is closed/confirmed
|
||||
'has_previous': len(candles) >= 2 # True if we have previous candle for validation
|
||||
}, room=room)
|
||||
|
||||
# Get prediction if model is loaded
|
||||
@@ -2453,6 +2507,144 @@ class AnnotationDashboard:
|
||||
self._live_update_thread = threading.Thread(target=live_update_worker, daemon=True)
|
||||
self._live_update_thread.start()
|
||||
|
||||
def _train_on_validated_prediction(self, timeframe: str, timestamp: str, predicted: list,
|
||||
actual: list, errors: dict, direction_correct: bool, accuracy: float):
|
||||
"""
|
||||
Incrementally train model on validated prediction
|
||||
|
||||
This implements online learning where each validated prediction becomes
|
||||
a training sample, with loss weighting based on prediction accuracy.
|
||||
"""
|
||||
try:
|
||||
if not self.training_adapter:
|
||||
logger.warning("Training adapter not available for incremental training")
|
||||
return
|
||||
|
||||
if not self.orchestrator or not hasattr(self.orchestrator, 'primary_transformer'):
|
||||
logger.warning("Transformer model not available for incremental training")
|
||||
return
|
||||
|
||||
# Get the transformer trainer
|
||||
trainer = getattr(self.orchestrator, 'primary_transformer_trainer', None)
|
||||
if not trainer:
|
||||
logger.warning("Transformer trainer not available")
|
||||
return
|
||||
|
||||
# Calculate sample weight based on accuracy
|
||||
# Low accuracy predictions get higher weight (we need to learn from mistakes)
|
||||
# High accuracy predictions get lower weight (model already knows this)
|
||||
if accuracy < 50:
|
||||
sample_weight = 3.0 # Learn hard from bad predictions
|
||||
elif accuracy < 70:
|
||||
sample_weight = 2.0 # Moderate learning
|
||||
elif accuracy < 85:
|
||||
sample_weight = 1.0 # Normal learning
|
||||
else:
|
||||
sample_weight = 0.5 # Light touch-up for good predictions
|
||||
|
||||
# Also weight by direction correctness
|
||||
if not direction_correct:
|
||||
sample_weight *= 1.5 # Wrong direction is critical - learn more
|
||||
|
||||
logger.info(f"[{timeframe}] Incremental training: accuracy={accuracy:.1f}%, weight={sample_weight:.1f}x")
|
||||
|
||||
# Create training sample from validated prediction
|
||||
# We need to fetch the market state at that timestamp
|
||||
symbol = 'ETH/USDT' # TODO: Get from active trading pair
|
||||
|
||||
training_sample = {
|
||||
'symbol': symbol,
|
||||
'timestamp': timestamp,
|
||||
'predicted_candle': predicted, # [O, H, L, C, V]
|
||||
'actual_candle': actual, # [O, H, L, C]
|
||||
'errors': errors,
|
||||
'accuracy': accuracy,
|
||||
'direction_correct': direction_correct,
|
||||
'sample_weight': sample_weight
|
||||
}
|
||||
|
||||
# Get market state at that timestamp
|
||||
try:
|
||||
market_state = self._fetch_market_state_at_timestamp(symbol, timestamp, timeframe)
|
||||
training_sample['market_state'] = market_state
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not fetch market state: {e}")
|
||||
return
|
||||
|
||||
# Convert to transformer batch format
|
||||
batch = self.training_adapter._convert_prediction_to_batch(training_sample, timeframe)
|
||||
if not batch:
|
||||
logger.warning("Could not convert validated prediction to training batch")
|
||||
return
|
||||
|
||||
# Train on this batch with sample weighting
|
||||
with torch.enable_grad():
|
||||
trainer.model.train()
|
||||
result = trainer.train_step(batch, accumulate_gradients=False, sample_weight=sample_weight)
|
||||
|
||||
if result:
|
||||
loss = result.get('total_loss', 0)
|
||||
candle_accuracy = result.get('candle_accuracy', 0)
|
||||
|
||||
logger.info(f"[{timeframe}] Trained on validated prediction: loss={loss:.4f}, new_acc={candle_accuracy:.2%}")
|
||||
|
||||
# Save checkpoint periodically (every 10 incremental steps)
|
||||
if not hasattr(self, '_incremental_training_steps'):
|
||||
self._incremental_training_steps = 0
|
||||
|
||||
self._incremental_training_steps += 1
|
||||
|
||||
if self._incremental_training_steps % 10 == 0:
|
||||
logger.info(f"Saving checkpoint after {self._incremental_training_steps} incremental training steps")
|
||||
trainer.save_checkpoint(
|
||||
filepath=None, # Auto-generate path
|
||||
metadata={
|
||||
'training_type': 'incremental_online',
|
||||
'steps': self._incremental_training_steps,
|
||||
'last_accuracy': accuracy
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in incremental training: {e}", exc_info=True)
|
||||
|
||||
def _fetch_market_state_at_timestamp(self, symbol: str, timestamp: str, timeframe: str) -> Dict:
|
||||
"""Fetch market state at a specific timestamp for training"""
|
||||
try:
|
||||
from datetime import datetime
|
||||
import pandas as pd
|
||||
|
||||
# Parse timestamp
|
||||
ts = pd.Timestamp(timestamp)
|
||||
|
||||
# Get historical data for multiple timeframes
|
||||
market_state = {'timeframes': {}, 'secondary_timeframes': {}}
|
||||
|
||||
for tf in ['1s', '1m', '1h']:
|
||||
try:
|
||||
df = self.data_provider.get_historical_data(symbol, tf, limit=200)
|
||||
if df is not None and not df.empty:
|
||||
# Find data up to (but not including) the target timestamp
|
||||
df_before = df[df.index < ts]
|
||||
if not df_before.empty:
|
||||
recent = df_before.tail(200)
|
||||
market_state['timeframes'][tf] = {
|
||||
'timestamps': recent.index.strftime('%Y-%m-%d %H:%M:%S').tolist(),
|
||||
'open': recent['open'].tolist(),
|
||||
'high': recent['high'].tolist(),
|
||||
'low': recent['low'].tolist(),
|
||||
'close': recent['close'].tolist(),
|
||||
'volume': recent['volume'].tolist()
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not fetch {tf} data: {e}")
|
||||
|
||||
return market_state
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching market state: {e}")
|
||||
return {}
|
||||
|
||||
def _get_live_prediction(self, symbol: str, timeframe: str, prediction_steps: int = 1):
|
||||
"""Get live prediction from model"""
|
||||
try:
|
||||
@@ -2471,7 +2663,7 @@ class AnnotationDashboard:
|
||||
return {
|
||||
'symbol': symbol,
|
||||
'timeframe': timeframe,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
'action': random.choice(['BUY', 'SELL', 'HOLD']),
|
||||
'confidence': random.uniform(0.6, 0.95),
|
||||
'predicted_price': candles[-1].get('close', 0) * (1 + random.uniform(-0.01, 0.01)),
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
/* Chart Panel */
|
||||
.chart-panel {
|
||||
height: calc(100vh - 150px);
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.chart-panel .card-body {
|
||||
@@ -17,6 +18,29 @@
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* Maximized Chart View */
|
||||
.chart-maximized {
|
||||
width: 100% !important;
|
||||
max-width: 100% !important;
|
||||
flex: 0 0 100% !important;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.chart-panel-maximized {
|
||||
height: calc(100vh - 80px) !important;
|
||||
position: fixed;
|
||||
top: 60px;
|
||||
left: 0;
|
||||
right: 0;
|
||||
z-index: 1040;
|
||||
margin: 0 !important;
|
||||
border-radius: 0 !important;
|
||||
}
|
||||
|
||||
.chart-panel-maximized .card-body {
|
||||
height: calc(100% - 60px);
|
||||
}
|
||||
|
||||
#chart-container {
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
@@ -236,11 +260,32 @@
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
/* Maximized View - Larger Charts */
|
||||
.chart-panel-maximized .chart-plot {
|
||||
height: 400px;
|
||||
}
|
||||
|
||||
@media (min-width: 1400px) {
|
||||
.chart-panel-maximized .chart-plot {
|
||||
height: 450px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width: 1920px) {
|
||||
.chart-panel-maximized .chart-plot {
|
||||
height: 500px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Responsive Adjustments */
|
||||
@media (max-width: 1200px) {
|
||||
.chart-plot {
|
||||
height: 250px;
|
||||
}
|
||||
|
||||
.chart-panel-maximized .chart-plot {
|
||||
height: 350px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -99,6 +99,18 @@ class LiveUpdatesWebSocket {
|
||||
console.error('Prediction error:', data);
|
||||
});
|
||||
|
||||
this.socket.on('executed_trade', (data) => {
|
||||
console.log('Executed trade received:', data);
|
||||
if (this.onExecutedTrade) {
|
||||
this.onExecutedTrade(data);
|
||||
}
|
||||
});
|
||||
|
||||
this.socket.on('training_update', (data) => {
|
||||
console.log('Training update received:', data);
|
||||
// Training feedback from incremental learning
|
||||
});
|
||||
|
||||
// Error events
|
||||
this.socket.on('connect_error', (error) => {
|
||||
console.error('WebSocket connection error:', error);
|
||||
@@ -230,6 +242,26 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
}
|
||||
};
|
||||
|
||||
window.liveUpdatesWS.onExecutedTrade = function(data) {
|
||||
// Visualize executed trade on chart
|
||||
if (window.appState && window.appState.chartManager) {
|
||||
window.appState.chartManager.addExecutedTradeMarker(data.trade, data.position_state);
|
||||
}
|
||||
|
||||
// Update position state display
|
||||
if (typeof updatePositionStateDisplay === 'function') {
|
||||
updatePositionStateDisplay(data.position_state, data.session_metrics);
|
||||
}
|
||||
|
||||
// Log trade details
|
||||
console.log('Executed Trade:', {
|
||||
action: data.trade.action,
|
||||
price: data.trade.price,
|
||||
pnl: data.trade.pnl ? `$${data.trade.pnl.toFixed(2)} (${data.trade.pnl_pct.toFixed(2)}%)` : 'N/A',
|
||||
position: data.position_state.has_position ? `${data.position_state.position_type.toUpperCase()} @ $${data.position_state.entry_price}` : 'CLOSED'
|
||||
});
|
||||
};
|
||||
|
||||
// Auto-connect
|
||||
console.log('Auto-connecting to WebSocket...');
|
||||
window.liveUpdatesWS.connect();
|
||||
|
||||
@@ -101,6 +101,23 @@
|
||||
if (typeof checkActiveTraining === 'function') {
|
||||
checkActiveTraining();
|
||||
}
|
||||
|
||||
// Keyboard shortcuts for chart maximization
|
||||
document.addEventListener('keydown', function(e) {
|
||||
// ESC key to exit maximized mode
|
||||
if (e.key === 'Escape') {
|
||||
const chartArea = document.querySelector('.chart-maximized');
|
||||
if (chartArea) {
|
||||
document.getElementById('maximize-btn').click();
|
||||
}
|
||||
}
|
||||
|
||||
// F key to toggle maximize (when not typing in input)
|
||||
if (e.key === 'f' && !e.ctrlKey && !e.metaKey &&
|
||||
!['INPUT', 'TEXTAREA', 'SELECT'].includes(document.activeElement.tagName)) {
|
||||
document.getElementById('maximize-btn').click();
|
||||
}
|
||||
});
|
||||
|
||||
// Setup keyboard shortcuts
|
||||
setupKeyboardShortcuts();
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
<button type="button" class="btn btn-outline-light" id="reset-zoom-btn" title="Reset Zoom">
|
||||
<i class="fas fa-expand"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-outline-light" id="maximize-btn" title="Maximize Chart Area">
|
||||
<i class="fas fa-arrows-alt"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-outline-light" id="fullscreen-btn" title="Fullscreen">
|
||||
<i class="fas fa-expand-arrows-alt"></i>
|
||||
</button>
|
||||
@@ -110,6 +113,41 @@
|
||||
}
|
||||
});
|
||||
|
||||
document.getElementById('maximize-btn').addEventListener('click', function () {
|
||||
const mainRow = document.querySelector('.row.mt-3');
|
||||
const leftSidebar = mainRow.querySelector('.col-md-2:first-child');
|
||||
const chartArea = mainRow.querySelector('.col-md-8');
|
||||
const rightSidebar = mainRow.querySelector('.col-md-2:last-child');
|
||||
const chartPanel = document.querySelector('.chart-panel');
|
||||
const maximizeIcon = this.querySelector('i');
|
||||
|
||||
// Toggle maximize state
|
||||
if (chartArea.classList.contains('chart-maximized')) {
|
||||
// Restore normal view
|
||||
leftSidebar.style.display = '';
|
||||
rightSidebar.style.display = '';
|
||||
chartArea.classList.remove('chart-maximized');
|
||||
chartPanel.classList.remove('chart-panel-maximized');
|
||||
maximizeIcon.className = 'fas fa-arrows-alt';
|
||||
this.title = 'Maximize Chart Area';
|
||||
} else {
|
||||
// Maximize chart area
|
||||
leftSidebar.style.display = 'none';
|
||||
rightSidebar.style.display = 'none';
|
||||
chartArea.classList.add('chart-maximized');
|
||||
chartPanel.classList.add('chart-panel-maximized');
|
||||
maximizeIcon.className = 'fas fa-compress-arrows-alt';
|
||||
this.title = 'Restore Normal View';
|
||||
}
|
||||
|
||||
// Update chart layouts after transition
|
||||
setTimeout(() => {
|
||||
if (window.appState && window.appState.chartManager) {
|
||||
window.appState.chartManager.updateChartLayout();
|
||||
}
|
||||
}, 350);
|
||||
});
|
||||
|
||||
document.getElementById('fullscreen-btn').addEventListener('click', function () {
|
||||
const chartContainer = document.getElementById('chart-container');
|
||||
if (chartContainer.requestFullscreen) {
|
||||
|
||||
@@ -40,9 +40,13 @@
|
||||
role="progressbar" style="width: 0%"></div>
|
||||
</div>
|
||||
<div class="small">
|
||||
<div>Epoch: <span id="training-epoch">0</span>/<span id="training-total-epochs">0</span></div>
|
||||
<div>Loss: <span id="training-loss">--</span></div>
|
||||
<div>GPU: <span id="training-gpu-util">--</span>% | CPU: <span id="training-cpu-util">--</span>%</div>
|
||||
<div>Annotations: <span id="training-annotation-count" class="fw-bold text-primary">--</span></div>
|
||||
<div>Timeframe: <span id="training-timeframe" class="fw-bold text-info">--</span></div>
|
||||
<div class="mt-1 pt-1 border-top">
|
||||
<div>Epoch: <span id="training-epoch">0</span>/<span id="training-total-epochs">0</span></div>
|
||||
<div>Loss: <span id="training-loss">--</span></div>
|
||||
<div>GPU: <span id="training-gpu-util">--</span>% | CPU: <span id="training-cpu-util">--</span>%</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -139,12 +143,42 @@
|
||||
<!-- Inference Status -->
|
||||
<div id="inference-status" style="display: none;">
|
||||
<div class="alert alert-success py-2 px-2 mb-2">
|
||||
<div class="d-flex align-items-center mb-1">
|
||||
<div class="spinner-border spinner-border-sm me-2" role="status">
|
||||
<span class="visually-hidden">Running...</span>
|
||||
<div class="d-flex align-items-center justify-content-between mb-1">
|
||||
<div class="d-flex align-items-center">
|
||||
<div class="spinner-border spinner-border-sm me-2" role="status">
|
||||
<span class="visually-hidden">Running...</span>
|
||||
</div>
|
||||
<strong class="small">🔴 LIVE</strong>
|
||||
</div>
|
||||
<!-- Model Performance -->
|
||||
<div class="small text-end">
|
||||
<div style="font-size: 0.65rem;">Acc: <span id="live-accuracy" class="fw-bold text-success">--</span></div>
|
||||
<div style="font-size: 0.65rem;">Loss: <span id="live-loss" class="fw-bold text-warning">--</span></div>
|
||||
</div>
|
||||
<strong class="small">🔴 LIVE</strong>
|
||||
</div>
|
||||
|
||||
<!-- Position & PnL Status -->
|
||||
<div class="mb-2 p-2" style="background-color: rgba(0,0,0,0.1); border-radius: 4px;">
|
||||
<div class="small">
|
||||
<div class="d-flex justify-content-between">
|
||||
<span>Position:</span>
|
||||
<span id="position-status" class="fw-bold text-info">NO POSITION</span>
|
||||
</div>
|
||||
<div class="d-flex justify-content-between" id="floating-pnl-row" style="display: none !important;">
|
||||
<span>Floating PnL:</span>
|
||||
<span id="floating-pnl" class="fw-bold">--</span>
|
||||
</div>
|
||||
<div class="d-flex justify-content-between">
|
||||
<span>Session PnL:</span>
|
||||
<span id="session-pnl" class="fw-bold text-success">+$0.00</span>
|
||||
</div>
|
||||
<div class="d-flex justify-content-between" style="font-size: 0.7rem; color: #9ca3af;">
|
||||
<span>Win Rate:</span>
|
||||
<span id="win-rate">0% (0/0)</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="small">
|
||||
<div>Timeframe: <span id="active-timeframe" class="fw-bold text-primary">--</span></div>
|
||||
<div>Signal: <span id="latest-signal" class="fw-bold">--</span></div>
|
||||
@@ -195,6 +229,15 @@
|
||||
// Resume tracking
|
||||
activeTrainingId = data.session.training_id;
|
||||
showTrainingStatus();
|
||||
|
||||
// Populate annotation count and timeframe if available
|
||||
if (data.session.annotation_count) {
|
||||
document.getElementById('training-annotation-count').textContent = data.session.annotation_count;
|
||||
}
|
||||
if (data.session.timeframe) {
|
||||
document.getElementById('training-timeframe').textContent = data.session.timeframe.toUpperCase();
|
||||
}
|
||||
|
||||
pollTrainingProgress(activeTrainingId);
|
||||
} else {
|
||||
console.log('No active training session');
|
||||
@@ -274,6 +317,36 @@
|
||||
|
||||
console.log(`✓ Models available: ${data.available_count}, loaded: ${data.loaded_count}`);
|
||||
|
||||
// Auto-select Transformer (or any loaded model) if available
|
||||
let modelToSelect = null;
|
||||
// First try to find Transformer
|
||||
const transformerModel = data.models.find(m => {
|
||||
const modelName = (m && typeof m === 'object' && m.name) ? m.name : String(m);
|
||||
const isLoaded = (m && typeof m === 'object' && 'loaded' in m) ? m.loaded : false;
|
||||
return modelName === 'Transformer' && isLoaded;
|
||||
});
|
||||
|
||||
if (transformerModel) {
|
||||
modelToSelect = 'Transformer';
|
||||
} else {
|
||||
// If Transformer not loaded, find any loaded model
|
||||
const loadedModel = data.models.find(m => {
|
||||
const isLoaded = (m && typeof m === 'object' && 'loaded' in m) ? m.loaded : false;
|
||||
return isLoaded;
|
||||
});
|
||||
if (loadedModel) {
|
||||
const modelName = (loadedModel && typeof loadedModel === 'object' && loadedModel.name) ? loadedModel.name : String(loadedModel);
|
||||
modelToSelect = modelName;
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-select if found
|
||||
if (modelToSelect) {
|
||||
modelSelect.value = modelToSelect;
|
||||
selectedModel = modelToSelect;
|
||||
console.log(`✓ Auto-selected loaded model: ${modelToSelect}`);
|
||||
}
|
||||
|
||||
// Update button state for currently selected model
|
||||
updateButtonState();
|
||||
} else {
|
||||
@@ -418,10 +491,17 @@
|
||||
// Show training status
|
||||
showTrainingStatus();
|
||||
|
||||
// Get primary timeframe for training
|
||||
const primaryTimeframe = document.getElementById('primary-timeframe-select').value;
|
||||
|
||||
// Reset progress
|
||||
document.getElementById('training-progress-bar').style.width = '0%';
|
||||
document.getElementById('training-epoch').textContent = '0';
|
||||
document.getElementById('training-loss').textContent = '--';
|
||||
|
||||
// Set annotation count and timeframe
|
||||
document.getElementById('training-annotation-count').textContent = annotationIds.length;
|
||||
document.getElementById('training-timeframe').textContent = primaryTimeframe.toUpperCase();
|
||||
|
||||
// Start training request
|
||||
fetch('/api/train-model', {
|
||||
@@ -430,7 +510,8 @@
|
||||
body: JSON.stringify({
|
||||
model_name: modelName,
|
||||
annotation_ids: annotationIds,
|
||||
symbol: appState.currentSymbol // CRITICAL: Filter by current symbol
|
||||
symbol: appState.currentSymbol, // CRITICAL: Filter by current symbol
|
||||
timeframe: primaryTimeframe // Primary timeframe for display
|
||||
})
|
||||
})
|
||||
.then(response => response.json())
|
||||
@@ -977,6 +1058,70 @@
|
||||
}
|
||||
}
|
||||
|
||||
function updatePositionStateDisplay(positionState, sessionMetrics) {
|
||||
/**
|
||||
* Update live trading panel with current position and PnL info
|
||||
*/
|
||||
try {
|
||||
// Update position status
|
||||
const positionStatusEl = document.getElementById('position-status');
|
||||
const floatingPnlRow = document.getElementById('floating-pnl-row');
|
||||
const floatingPnlEl = document.getElementById('floating-pnl');
|
||||
|
||||
if (positionState.has_position) {
|
||||
const posType = positionState.position_type.toUpperCase();
|
||||
const entryPrice = positionState.entry_price.toFixed(2);
|
||||
positionStatusEl.textContent = `${posType} @ $${entryPrice}`;
|
||||
positionStatusEl.className = posType === 'LONG' ? 'fw-bold text-success' : 'fw-bold text-danger';
|
||||
|
||||
// Show floating PnL
|
||||
if (floatingPnlRow) {
|
||||
floatingPnlRow.style.display = 'flex !important';
|
||||
floatingPnlRow.classList.remove('d-none');
|
||||
}
|
||||
const unrealizedPnl = positionState.unrealized_pnl || 0;
|
||||
const pnlColor = unrealizedPnl >= 0 ? 'text-success' : 'text-danger';
|
||||
const pnlSign = unrealizedPnl >= 0 ? '+' : '';
|
||||
floatingPnlEl.textContent = `${pnlSign}${unrealizedPnl.toFixed(2)}%`;
|
||||
floatingPnlEl.className = `fw-bold ${pnlColor}`;
|
||||
} else {
|
||||
positionStatusEl.textContent = 'NO POSITION';
|
||||
positionStatusEl.className = 'fw-bold text-secondary';
|
||||
|
||||
// Hide floating PnL row
|
||||
if (floatingPnlRow) {
|
||||
floatingPnlRow.style.display = 'none !important';
|
||||
floatingPnlRow.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
// Update session PnL
|
||||
const sessionPnlEl = document.getElementById('session-pnl');
|
||||
if (sessionPnlEl && sessionMetrics) {
|
||||
const totalPnl = sessionMetrics.total_pnl || 0;
|
||||
const pnlColor = totalPnl >= 0 ? 'text-success' : 'text-danger';
|
||||
const pnlSign = totalPnl >= 0 ? '+' : '';
|
||||
sessionPnlEl.textContent = `${pnlSign}$${totalPnl.toFixed(2)}`;
|
||||
sessionPnlEl.className = `fw-bold ${pnlColor}`;
|
||||
|
||||
// Update win rate
|
||||
const winRateEl = document.getElementById('win-rate');
|
||||
if (winRateEl) {
|
||||
const winRate = sessionMetrics.win_rate || 0;
|
||||
const winCount = sessionMetrics.win_count || 0;
|
||||
const totalTrades = sessionMetrics.total_trades || 0;
|
||||
winRateEl.textContent = `${winRate.toFixed(1)}% (${winCount}/${totalTrades})`;
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error updating position state display:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Make function globally accessible for WebSocket handler
|
||||
window.updatePositionStateDisplay = updatePositionStateDisplay;
|
||||
|
||||
function updatePredictionHistory() {
|
||||
const historyDiv = document.getElementById('prediction-history');
|
||||
if (predictionHistory.length === 0) {
|
||||
|
||||
Reference in New Issue
Block a user