fix model mappings,dash updates, trading

This commit is contained in:
Dobromir Popov
2025-07-22 15:44:59 +03:00
parent 3e35b9cddb
commit 1a54fb1d56
32 changed files with 6168 additions and 857 deletions

View File

@@ -88,10 +88,23 @@ except ImportError:
logger.warning("Universal Data Adapter not available")
# Import RL COB trader for 1B parameter model integration
from core.realtime_rl_cob_trader import RealtimeRLCOBTrader, PredictionResult
try:
from core.realtime_rl_cob_trader import RealtimeRLCOBTrader, PredictionResult
REALTIME_RL_AVAILABLE = True
except ImportError:
REALTIME_RL_AVAILABLE = False
logger.warning("Realtime RL COB trader not available")
RealtimeRLCOBTrader = None
PredictionResult = None
# Import overnight training coordinator
from core.overnight_training_coordinator import OvernightTrainingCoordinator
try:
from core.overnight_training_coordinator import OvernightTrainingCoordinator
OVERNIGHT_TRAINING_AVAILABLE = True
except ImportError:
OVERNIGHT_TRAINING_AVAILABLE = False
logger.warning("Overnight training coordinator not available")
OvernightTrainingCoordinator = None
# Single unified orchestrator with full ML capabilities
@@ -231,6 +244,19 @@ class CleanTradingDashboard:
# Initialize COB integration with enhanced WebSocket
self._initialize_cob_integration() # Use the working COB integration method
# Subscribe to COB data updates from data provider and start collection
if self.data_provider:
try:
# Start COB collection first
self.data_provider.start_cob_collection()
logger.info("Started COB collection in data provider")
# Then subscribe to updates
self.data_provider.subscribe_to_cob(self._on_cob_data_update)
logger.info("Subscribed to COB data updates from data provider")
except Exception as e:
logger.error(f"Failed to start COB collection or subscribe: {e}")
# Start signal generation loop to ensure continuous trading signals
self._start_signal_generation_loop()
@@ -251,6 +277,75 @@ class CleanTradingDashboard:
logger.debug("Clean Trading Dashboard initialized with HIGH-FREQUENCY COB integration and signal generation")
logger.info("🌙 Overnight Training Coordinator ready - call start_overnight_training() to begin")
def _on_cob_data_update(self, symbol: str, cob_data: dict):
"""Handle COB data updates from data provider"""
try:
# Update latest COB data cache
if not hasattr(self, 'latest_cob_data'):
self.latest_cob_data = {}
# Ensure cob_data is a dictionary with the expected structure
if not isinstance(cob_data, dict):
logger.warning(f"Received non-dict COB data for {symbol}: {type(cob_data)}")
# Try to convert to dict if possible
if hasattr(cob_data, '__dict__'):
cob_data = vars(cob_data)
else:
# Create a minimal valid structure
cob_data = {
'symbol': symbol,
'timestamp': datetime.now(),
'stats': {
'mid_price': 0.0,
'imbalance': 0.0,
'imbalance_5s': 0.0,
'imbalance_15s': 0.0,
'imbalance_60s': 0.0
}
}
# Ensure stats is present
if 'stats' not in cob_data:
cob_data['stats'] = {
'mid_price': 0.0,
'imbalance': 0.0,
'imbalance_5s': 0.0,
'imbalance_15s': 0.0,
'imbalance_60s': 0.0
}
self.latest_cob_data[symbol] = cob_data
# Update last update timestamp
if not hasattr(self, 'cob_last_update'):
self.cob_last_update = {}
import time
self.cob_last_update[symbol] = time.time()
# Update current price from COB data
if 'stats' in cob_data and 'mid_price' in cob_data['stats']:
mid_price = cob_data['stats']['mid_price']
if mid_price > 0:
self.current_prices[symbol] = mid_price
# Log successful price update
logger.debug(f"Updated price for {symbol}: ${mid_price:.2f}")
# Store in history for moving average calculations
if not hasattr(self, 'cob_data_history'):
self.cob_data_history = {
'ETH/USDT': deque(maxlen=61),
'BTC/USDT': deque(maxlen=61)
}
if symbol in self.cob_data_history:
self.cob_data_history[symbol].append(cob_data)
logger.debug(f"Updated COB data for {symbol}: mid_price=${cob_data.get('stats', {}).get('mid_price', 0):.2f}")
except Exception as e:
logger.error(f"Error handling COB data update for {symbol}: {e}")
def start_overnight_training(self):
"""Start the overnight training session"""
@@ -525,13 +620,17 @@ class CleanTradingDashboard:
# Try to get price from COB data as fallback
if hasattr(self, 'latest_cob_data') and 'ETH/USDT' in self.latest_cob_data:
cob_data = self.latest_cob_data['ETH/USDT']
if 'stats' in cob_data and 'mid_price' in cob_data['stats']:
if isinstance(cob_data, dict) and 'stats' in cob_data and 'mid_price' in cob_data['stats']:
current_price = cob_data['stats']['mid_price']
price_str = f"${current_price:.2f}"
else:
price_str = "Loading..."
# Debug log to help diagnose the issue
logger.debug(f"COB data format issue: {type(cob_data)}, keys: {cob_data.keys() if isinstance(cob_data, dict) else 'N/A'}")
else:
price_str = "Loading..."
# Debug log to help diagnose the issue
logger.debug(f"No COB data available for ETH/USDT. Latest COB data keys: {self.latest_cob_data.keys() if hasattr(self, 'latest_cob_data') else 'N/A'}")
# Calculate session P&L including unrealized P&L from current position
total_session_pnl = self.session_pnl # Start with realized P&L
@@ -542,6 +641,34 @@ class CleanTradingDashboard:
size = self.current_position.get('size', 0)
entry_price = self.current_position.get('price', 0)
if entry_price and size > 0:
# Calculate unrealized P&L with current leverage
if side.upper() == 'LONG' or side.upper() == 'BUY':
raw_pnl_per_unit = current_price - entry_price
else: # SHORT or SELL
raw_pnl_per_unit = entry_price - current_price
# Apply current leverage to unrealized P&L
leveraged_unrealized_pnl = raw_pnl_per_unit * size * self.current_leverage
total_session_pnl += leveraged_unrealized_pnlent_position and current_price:ent_position and current_price:ent_position and current_price:ent_position and current_price:ent_position and current_price:
side = self.current_position.get('side', 'UNKNOWN')
size = self.current_position.get('size', 0)
entry_price = self.current_position.get('price', 0)
if entry_price and size > 0:
# Calculate unrealized P&L with current leverage
if side.upper() == 'LONG' or side.upper() == 'BUY':
raw_pnl_per_unit = current_price - entry_price
else: # SHORT or SELL
raw_pnl_per_unit = entry_price - current_price
# Apply current leverage to unrealized P&L
leveraged_unrealized_pnl = raw_pnl_per_unit * size * self.current_leverage
total_session_pnl += leveraged_unrealized_pnlent_position and current_price:
side = self.current_position.get('side', 'UNKNOWN')
size = self.current_position.get('size', 0)
entry_price = self.current_position.get('price', 0)
if entry_price and size > 0:
# Calculate unrealized P&L with current leverage
if side.upper() == 'LONG' or side.upper() == 'BUY':
@@ -772,9 +899,24 @@ class CleanTradingDashboard:
eth_snapshot = self._get_cob_snapshot('ETH/USDT')
btc_snapshot = self._get_cob_snapshot('BTC/USDT')
# Debug: Log COB data availability - OPTIMIZED: Less frequent logging
if n % 30 == 0: # Log every 30 seconds to reduce spam and improve performance
logger.info(f"COB Update #{n % 100}: ETH snapshot: {eth_snapshot is not None}, BTC snapshot: {btc_snapshot is not None}")
# Debug: Log COB data availability more frequently to debug the issue
if n % 10 == 0: # Log every 10 seconds to debug
logger.info(f"COB Update #{n}: ETH snapshot: {eth_snapshot is not None}, BTC snapshot: {btc_snapshot is not None}")
# Check data provider COB data directly
if self.data_provider:
eth_cob = self.data_provider.get_latest_cob_data('ETH/USDT')
btc_cob = self.data_provider.get_latest_cob_data('BTC/USDT')
logger.info(f"Data Provider COB: ETH={eth_cob is not None}, BTC={btc_cob is not None}")
if eth_cob:
eth_stats = eth_cob.get('stats', {})
logger.info(f"ETH COB stats: mid_price=${eth_stats.get('mid_price', 0):.2f}")
if btc_cob:
btc_stats = btc_cob.get('stats', {})
logger.info(f"BTC COB stats: mid_price=${btc_stats.get('mid_price', 0):.2f}")
if hasattr(self, 'latest_cob_data'):
eth_data_time = self.cob_last_update.get('ETH/USDT', 0) if hasattr(self, 'cob_last_update') else 0
btc_data_time = self.cob_last_update.get('BTC/USDT', 0) if hasattr(self, 'cob_last_update') else 0
@@ -2340,18 +2482,18 @@ class CleanTradingDashboard:
return {'error': str(e), 'cob_status': 'Error Getting Status', 'orchestrator_type': 'Unknown'}
def _get_cob_snapshot(self, symbol: str) -> Optional[Any]:
"""Get COB snapshot for symbol - CENTRALIZED: Use data provider's COB data"""
"""Get COB snapshot for symbol - ENHANCED: Use data provider's WebSocket COB data"""
try:
# Priority 1: Use data provider's centralized COB data (primary source)
# Priority 1: Use data provider's latest COB data (WebSocket or REST)
if self.data_provider:
try:
cob_data = self.data_provider.get_latest_cob_data(symbol)
logger.debug(f"COB data type for {symbol}: {type(cob_data)}, data: {cob_data}")
if cob_data and isinstance(cob_data, dict):
# Validate COB data structure
if 'stats' in cob_data and cob_data['stats']:
logger.debug(f"COB snapshot available for {symbol} from centralized data provider")
stats = cob_data.get('stats', {})
if stats and stats.get('mid_price', 0) > 0:
logger.debug(f"COB snapshot available for {symbol} from data provider")
# Create a snapshot object from the data provider's data
class COBSnapshot:
@@ -2381,58 +2523,107 @@ class CleanTradingDashboard:
'total_volume_usd': ask[0] * ask[1]
})
self.stats = data.get('stats', {})
# Add direct attributes for new format compatibility
self.volume_weighted_mid = self.stats.get('mid_price', 0)
self.spread_bps = self.stats.get('spread_bps', 0)
self.liquidity_imbalance = self.stats.get('imbalance', 0)
self.total_bid_liquidity = self.stats.get('bid_liquidity', 0)
self.total_ask_liquidity = self.stats.get('ask_liquidity', 0)
# Use stats from data and calculate liquidity properly
self.stats = stats.copy()
# Calculate total liquidity from order book if not provided
bid_liquidity = stats.get('bid_liquidity', 0) or stats.get('total_bid_liquidity', 0)
ask_liquidity = stats.get('ask_liquidity', 0) or stats.get('total_ask_liquidity', 0)
# If liquidity is still 0, calculate from order book data
if bid_liquidity == 0 and self.consolidated_bids:
bid_liquidity = sum(bid['total_volume_usd'] for bid in self.consolidated_bids)
if ask_liquidity == 0 and self.consolidated_asks:
ask_liquidity = sum(ask['total_volume_usd'] for ask in self.consolidated_asks)
# Update stats with calculated liquidity
self.stats['total_bid_liquidity'] = bid_liquidity
self.stats['total_ask_liquidity'] = ask_liquidity
self.stats['bid_liquidity'] = bid_liquidity
self.stats['ask_liquidity'] = ask_liquidity
# Add direct attributes for compatibility
self.volume_weighted_mid = stats.get('mid_price', 0)
self.spread_bps = stats.get('spread_bps', 0)
self.liquidity_imbalance = stats.get('imbalance', 0)
self.total_bid_liquidity = bid_liquidity
self.total_ask_liquidity = ask_liquidity
self.exchanges_active = ['Binance'] # Default for now
return COBSnapshot(cob_data)
else:
# Data exists but no stats - this is the "Invalid COB data" case
logger.debug(f"COB data for {symbol} missing stats structure: {type(cob_data)}, keys: {list(cob_data.keys()) if isinstance(cob_data, dict) else 'not dict'}")
logger.debug(f"COB data for {symbol} missing valid stats: {stats}")
return None
else:
logger.debug(f"No COB data available for {symbol} from data provider")
logger.debug(f"No valid COB data for {symbol} from data provider")
return None
except Exception as e:
logger.error(f"Error getting COB data from data provider: {e}")
# Priority 2: Use orchestrator's COB integration (secondary source)
if hasattr(self.orchestrator, 'cob_integration') and self.orchestrator.cob_integration:
# Try to get snapshot from orchestrator's COB integration
snapshot = self.orchestrator.cob_integration.get_cob_snapshot(symbol)
if snapshot:
logger.debug(f"COB snapshot available for {symbol} from orchestrator COB integration, type: {type(snapshot)}")
# Check if it's a list (which would cause the error)
if isinstance(snapshot, list):
logger.warning(f"Orchestrator returned list instead of COB snapshot for {symbol}")
# Don't return the list, continue to other sources
else:
return snapshot
# If no snapshot, try to get from orchestrator's cached data
if hasattr(self.orchestrator, 'latest_cob_data') and symbol in self.orchestrator.latest_cob_data:
cob_data = self.orchestrator.latest_cob_data[symbol]
logger.debug(f"COB snapshot available for {symbol} from orchestrator cached data")
# Create a simple snapshot object from the cached data
class COBSnapshot:
def __init__(self, data):
self.consolidated_bids = data.get('bids', [])
self.consolidated_asks = data.get('asks', [])
self.stats = data.get('stats', {})
return COBSnapshot(cob_data)
# Priority 2: Try to get raw WebSocket data directly
if self.data_provider and hasattr(self.data_provider, 'cob_raw_ticks'):
try:
raw_ticks = self.data_provider.get_cob_raw_ticks(symbol, count=1)
if raw_ticks:
latest_tick = raw_ticks[-1]
stats = latest_tick.get('stats', {})
if stats and stats.get('mid_price', 0) > 0:
logger.debug(f"Using raw WebSocket tick for {symbol}")
# Create snapshot from raw tick
class COBSnapshot:
def __init__(self, tick_data):
bids = tick_data.get('bids', [])
asks = tick_data.get('asks', [])
self.consolidated_bids = []
for bid in bids[:20]: # Top 20 levels
self.consolidated_bids.append({
'price': bid['price'],
'size': bid['size'],
'total_size': bid['size'],
'total_volume_usd': bid['price'] * bid['size']
})
self.consolidated_asks = []
for ask in asks[:20]: # Top 20 levels
self.consolidated_asks.append({
'price': ask['price'],
'size': ask['size'],
'total_size': ask['size'],
'total_volume_usd': ask['price'] * ask['size']
})
self.stats = stats
self.volume_weighted_mid = stats.get('mid_price', 0)
self.spread_bps = stats.get('spread_bps', 0)
self.liquidity_imbalance = stats.get('imbalance', 0)
self.total_bid_liquidity = stats.get('bid_volume', 0)
self.total_ask_liquidity = stats.get('ask_volume', 0)
self.exchanges_active = ['Binance']
return COBSnapshot(latest_tick)
except Exception as e:
logger.debug(f"Error getting raw WebSocket data: {e}")
# Priority 3: Use dashboard's cached COB data (last resort fallback)
# Priority 3: Use orchestrator's COB integration (fallback)
if hasattr(self.orchestrator, 'cob_integration') and self.orchestrator.cob_integration:
try:
snapshot = self.orchestrator.cob_integration.get_cob_snapshot(symbol)
if snapshot and not isinstance(snapshot, list):
logger.debug(f"COB snapshot from orchestrator for {symbol}")
return snapshot
except Exception as e:
logger.debug(f"Error getting COB from orchestrator: {e}")
# Priority 4: Use dashboard's cached COB data (last resort)
if symbol in self.latest_cob_data and self.latest_cob_data[symbol]:
cob_data = self.latest_cob_data[symbol]
logger.debug(f"COB snapshot available for {symbol} from dashboard cached data (fallback)")
logger.debug(f"Using dashboard cached COB data for {symbol}")
# Create a simple snapshot object from the cached data
class COBSnapshot:
@@ -2460,18 +2651,40 @@ class CleanTradingDashboard:
def _get_cob_mode(self) -> str:
"""Get current COB data collection mode"""
try:
# Check if orchestrator COB integration is working
if hasattr(self.orchestrator, 'cob_integration') and self.orchestrator.cob_integration:
# Try to get a snapshot from orchestrator
snapshot = self.orchestrator.cob_integration.get_cob_snapshot('ETH/USDT')
if snapshot and hasattr(snapshot, 'consolidated_bids') and snapshot.consolidated_bids:
return "WS" # WebSocket/Advanced mode
# Check if data provider has WebSocket COB integration
if self.data_provider and hasattr(self.data_provider, 'cob_websocket'):
# Check WebSocket status
if hasattr(self.data_provider.cob_websocket, 'status'):
eth_status = self.data_provider.cob_websocket.status.get('ETH/USDT')
if eth_status and eth_status.connected:
return "WS" # WebSocket mode
# Check if we have recent WebSocket data
if hasattr(self.data_provider, 'cob_raw_ticks'):
eth_ticks = self.data_provider.cob_raw_ticks.get('ETH/USDT', [])
if eth_ticks:
import time
latest_tick = eth_ticks[-1]
tick_time = latest_tick.get('timestamp', 0)
if isinstance(tick_time, (int, float)) and (time.time() - tick_time) < 10:
return "WS" # Recent WebSocket data
# Check if fallback data is available
# Check if we have any COB data (REST fallback)
if hasattr(self, 'latest_cob_data') and 'ETH/USDT' in self.latest_cob_data:
if self.latest_cob_data['ETH/USDT']:
return "REST" # REST API fallback mode
# Check data provider cache
if self.data_provider:
latest_cob = self.data_provider.get_latest_cob_data('ETH/USDT')
if latest_cob and latest_cob.get('stats', {}).get('mid_price', 0) > 0:
# Check source to determine mode
source = latest_cob.get('source', 'unknown')
if 'websocket' in source.lower() or 'enhanced' in source.lower():
return "WS"
else:
return "REST"
return "None" # No data available
except Exception as e:
@@ -6313,16 +6526,26 @@ class CleanTradingDashboard:
"""Connect to orchestrator for real trading signals"""
try:
if self.orchestrator and hasattr(self.orchestrator, 'add_decision_callback'):
def connect_worker():
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.orchestrator.add_decision_callback(self._on_trading_decision))
logger.info("Successfully connected to orchestrator for trading signals.")
except Exception as e:
logger.error(f"Orchestrator connection worker failed: {e}")
thread = threading.Thread(target=connect_worker, daemon=True)
thread.start()
# Directly add the callback to the orchestrator's decision_callbacks list
# This is a simpler approach that avoids async/threading issues
if hasattr(self.orchestrator, 'decision_callbacks'):
if self._on_trading_decision not in self.orchestrator.decision_callbacks:
self.orchestrator.decision_callbacks.append(self._on_trading_decision)
logger.info("Successfully connected to orchestrator for trading signals (direct method).")
else:
logger.info("Trading decision callback already registered.")
else:
# Fallback to async method if needed
def connect_worker():
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.orchestrator.add_decision_callback(self._on_trading_decision))
logger.info("Successfully connected to orchestrator for trading signals (async method).")
except Exception as e:
logger.error(f"Orchestrator connection worker failed: {e}")
thread = threading.Thread(target=connect_worker, daemon=True)
thread.start()
else:
logger.warning("Orchestrator not available or doesn't support callbacks")
except Exception as e:

View File

@@ -382,12 +382,6 @@ class DashboardComponentManager:
mode_color = "text-success" if cob_mode == "WS" else "text-warning" if cob_mode == "REST" else "text-muted"
mode_icon = "fas fa-wifi" if cob_mode == "WS" else "fas fa-globe" if cob_mode == "REST" else "fas fa-question"
imbalance_stats_display = []
if cumulative_imbalance_stats:
imbalance_stats_display.append(html.H6("Cumulative Imbalance", className="mt-3 mb-2 small text-muted text-uppercase"))
for period, value in cumulative_imbalance_stats.items():
imbalance_stats_display.append(self._create_imbalance_stat_row(period, value))
return html.Div([
html.H6(f"{symbol} - COB Overview", className="mb-2"),
html.Div([
@@ -406,19 +400,17 @@ class DashboardComponentManager:
html.Span(imbalance_text, className=f"fw-bold small {imbalance_color}")
]),
# Multi-timeframe imbalance metrics
# Multi-timeframe imbalance metrics (single display, not duplicate)
html.Div([
html.Strong("Timeframe Imbalances:", className="small d-block mt-2 mb-1")
]),
html.Div([
self._create_timeframe_imbalance("1s", cumulative_imbalance_stats.get('1s', imbalance)),
self._create_timeframe_imbalance("5s", cumulative_imbalance_stats.get('5s', imbalance)),
self._create_timeframe_imbalance("15s", cumulative_imbalance_stats.get('15s', imbalance)),
self._create_timeframe_imbalance("60s", cumulative_imbalance_stats.get('60s', imbalance)),
self._create_timeframe_imbalance("1s", cumulative_imbalance_stats.get('1s', imbalance) if cumulative_imbalance_stats else imbalance),
self._create_timeframe_imbalance("5s", cumulative_imbalance_stats.get('5s', imbalance) if cumulative_imbalance_stats else imbalance),
self._create_timeframe_imbalance("15s", cumulative_imbalance_stats.get('15s', imbalance) if cumulative_imbalance_stats else imbalance),
self._create_timeframe_imbalance("60s", cumulative_imbalance_stats.get('60s', imbalance) if cumulative_imbalance_stats else imbalance),
], className="d-flex justify-content-between mb-2"),
html.Div(imbalance_stats_display),
html.Hr(className="my-2"),

View File

@@ -39,12 +39,23 @@ class DashboardLayoutManager:
], className="bg-dark p-2 mb-2")
def _create_interval_component(self):
"""Create the auto-refresh interval component"""
return dcc.Interval(
id='interval-component',
interval=250, # Update every 250 ms (4 Hz)
n_intervals=0
)
"""Create the auto-refresh interval components with different frequencies"""
return html.Div([
# Main interval for regular UI updates (1 second)
dcc.Interval(
id='interval-component',
interval=1000, # Update every 1000 ms (1 Hz)
n_intervals=0
),
# Slow interval for non-critical updates (5 seconds)
dcc.Interval(
id='slow-interval-component',
interval=5000, # Update every 5 seconds (0.2 Hz)
n_intervals=0
),
# WebSocket-based updates for high-frequency data (no interval needed)
html.Div(id='websocket-updates-container', style={'display': 'none'})
])
def _create_main_content(self):
"""Create the main content area"""

View File

View File

@@ -0,0 +1,173 @@
#!/usr/bin/env python3
"""
TensorBoard Component for Dashboard
This module provides a Dash component that embeds TensorBoard in the dashboard.
"""
import dash
from dash import html, dcc
import dash_bootstrap_components as dbc
import logging
from typing import Optional, Dict, Any
logger = logging.getLogger(__name__)
def create_tensorboard_tab(tensorboard_url: str = "http://localhost:6006") -> html.Div:
"""
Create a dashboard tab that embeds TensorBoard
Args:
tensorboard_url: URL of the TensorBoard server
Returns:
html.Div: Dash component containing TensorBoard iframe
"""
return html.Div([
dbc.Alert([
html.I(className="fas fa-chart-line me-2"),
"TensorBoard Training Visualization",
html.A(
"Open in New Window",
href=tensorboard_url,
target="_blank",
className="ms-2 btn btn-sm btn-primary"
)
], color="info", className="mb-3"),
# TensorBoard iframe
html.Iframe(
src=tensorboard_url,
style={
'width': '100%',
'height': '800px',
'border': 'none'
}
),
# Training metrics summary
html.Div([
html.H5("Training Metrics Summary", className="mt-3"),
html.Div(id="training-metrics-summary", className="mt-2")
], className="mt-3")
])
def create_training_metrics_card() -> dbc.Card:
"""
Create a card displaying key training metrics
Returns:
dbc.Card: Dash Bootstrap card component
"""
return dbc.Card([
dbc.CardHeader([
html.I(className="fas fa-brain me-2"),
"Training Metrics"
]),
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H6("Model Status"),
html.Div(id="model-training-status", children="Initializing...")
], width=6),
dbc.Col([
html.H6("Training Progress"),
dbc.Progress(id="training-progress-bar", value=0, className="mb-2"),
html.Div(id="training-progress-text", children="0%")
], width=6)
], className="mb-3"),
dbc.Row([
dbc.Col([
html.H6("Loss"),
html.Div(id="training-loss-value", children="N/A")
], width=4),
dbc.Col([
html.H6("Reward"),
html.Div(id="training-reward-value", children="N/A")
], width=4),
dbc.Col([
html.H6("State Quality"),
html.Div(id="training-state-quality", children="N/A")
], width=4)
], className="mb-3"),
dbc.Row([
dbc.Col([
html.A(
dbc.Button([
html.I(className="fas fa-chart-line me-2"),
"Open TensorBoard"
], color="primary", size="sm", className="w-100"),
href="http://localhost:6006",
target="_blank"
)
], width=12)
])
])
], className="mb-3")
def create_tensorboard_status_indicator(tensorboard_url: str = "http://localhost:6006") -> html.Div:
"""
Create a status indicator for TensorBoard
Args:
tensorboard_url: URL of the TensorBoard server
Returns:
html.Div: Dash component showing TensorBoard status
"""
return html.Div([
dbc.Button([
html.I(className="fas fa-chart-line me-2"),
"TensorBoard"
],
id="tensorboard-status-button",
color="success",
size="sm",
href=tensorboard_url,
target="_blank",
external_link=True,
className="ms-2")
], id="tensorboard-status-container")
def update_training_metrics_card(metrics: Dict[str, Any]) -> Dict[str, Any]:
"""
Update training metrics card with latest data
Args:
metrics: Dictionary of training metrics
Returns:
Dict: Dictionary of Dash component updates
"""
# Extract metrics
training_active = metrics.get("training_active", False)
loss = metrics.get("loss", None)
reward = metrics.get("reward", None)
state_quality = metrics.get("state_quality", None)
progress = metrics.get("progress", 0)
# Format values
loss_str = f"{loss:.4f}" if loss is not None else "N/A"
reward_str = f"{reward:.4f}" if reward is not None else "N/A"
state_quality_str = f"{state_quality:.1%}" if state_quality is not None else "N/A"
progress_str = f"{progress:.1%}"
# Determine status
if training_active:
status = "Training Active"
status_class = "text-success"
else:
status = "Training Inactive"
status_class = "text-warning"
# Return updates
return {
"model-training-status": html.Span(status, className=status_class),
"training-progress-bar": progress * 100,
"training-progress-text": progress_str,
"training-loss-value": loss_str,
"training-reward-value": reward_str,
"training-state-quality": state_quality_str
}

View File

@@ -0,0 +1,203 @@
#!/usr/bin/env python3
"""
TensorBoard Integration for Dashboard
This module provides integration between the trading dashboard and TensorBoard,
allowing training metrics to be visualized in real-time.
"""
import os
import sys
import subprocess
import threading
import time
import logging
import webbrowser
from pathlib import Path
from typing import Optional, Dict, Any
logger = logging.getLogger(__name__)
class TensorBoardIntegration:
"""
TensorBoard integration for dashboard
Provides methods to start TensorBoard server and access training metrics
"""
def __init__(self, log_dir: str = "runs", port: int = 6006):
"""
Initialize TensorBoard integration
Args:
log_dir: Directory containing TensorBoard logs
port: Port to run TensorBoard on
"""
self.log_dir = log_dir
self.port = port
self.process = None
self.url = f"http://localhost:{port}"
self.is_running = False
self.latest_metrics = {}
# Create log directory if it doesn't exist
os.makedirs(log_dir, exist_ok=True)
def start_tensorboard(self, open_browser: bool = False) -> bool:
"""
Start TensorBoard server in a separate process
Args:
open_browser: Whether to open browser automatically
Returns:
bool: True if TensorBoard was started successfully
"""
if self.is_running:
logger.info("TensorBoard is already running")
return True
try:
# Check if TensorBoard is available
try:
import tensorboard
logger.info(f"TensorBoard version {tensorboard.__version__} available")
except ImportError:
logger.warning("TensorBoard not installed. Install with: pip install tensorboard")
return False
# Check if log directory exists and has content
log_dir_path = Path(self.log_dir)
if not log_dir_path.exists():
logger.warning(f"Log directory {self.log_dir} does not exist")
os.makedirs(self.log_dir, exist_ok=True)
logger.info(f"Created log directory {self.log_dir}")
# Start TensorBoard process
cmd = [
sys.executable,
"-m",
"tensorboard.main",
"--logdir", self.log_dir,
"--port", str(self.port),
"--reload_interval", "5", # Reload data every 5 seconds
"--reload_multifile", "true" # Better handling of multiple log files
]
logger.info(f"Starting TensorBoard: {' '.join(cmd)}")
# Start process without capturing output
self.process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
# Wait a moment for TensorBoard to start
time.sleep(2)
# Check if process is running
if self.process.poll() is None:
self.is_running = True
logger.info(f"TensorBoard started at {self.url}")
# Open browser if requested
if open_browser:
try:
webbrowser.open(self.url)
logger.info("Browser opened automatically")
except Exception as e:
logger.warning(f"Could not open browser: {e}")
# Start monitoring thread
threading.Thread(target=self._monitor_process, daemon=True).start()
return True
else:
stdout, stderr = self.process.communicate()
logger.error(f"TensorBoard failed to start: {stderr}")
return False
except Exception as e:
logger.error(f"Error starting TensorBoard: {e}")
return False
def _monitor_process(self):
"""Monitor TensorBoard process and capture output"""
try:
while self.process and self.process.poll() is None:
# Read output line by line
for line in iter(self.process.stdout.readline, ''):
if line:
line = line.strip()
if line:
logger.debug(f"TensorBoard: {line}")
time.sleep(0.1)
# Process has ended
self.is_running = False
logger.info("TensorBoard process has ended")
except Exception as e:
logger.error(f"Error monitoring TensorBoard process: {e}")
def stop_tensorboard(self):
"""Stop TensorBoard server"""
if self.process and self.process.poll() is None:
try:
self.process.terminate()
self.process.wait(timeout=5)
logger.info("TensorBoard stopped")
except subprocess.TimeoutExpired:
self.process.kill()
logger.warning("TensorBoard process killed after timeout")
except Exception as e:
logger.error(f"Error stopping TensorBoard: {e}")
self.is_running = False
def get_tensorboard_url(self) -> str:
"""Get TensorBoard URL"""
return self.url
def is_tensorboard_running(self) -> bool:
"""Check if TensorBoard is running"""
if self.process:
return self.process.poll() is None
return False
def get_latest_metrics(self) -> Dict[str, Any]:
"""
Get latest training metrics from TensorBoard
This is a placeholder - in a real implementation, you would
parse TensorBoard event files to extract metrics
"""
# In a real implementation, you would parse TensorBoard event files
# For now, return placeholder data
return {
"training_active": self.is_running,
"tensorboard_url": self.url,
"metrics_available": self.is_running
}
# Singleton instance
_tensorboard_integration = None
def get_tensorboard_integration(log_dir: str = "runs", port: int = 6006) -> TensorBoardIntegration:
"""
Get TensorBoard integration singleton instance
Args:
log_dir: Directory containing TensorBoard logs
port: Port to run TensorBoard on
Returns:
TensorBoardIntegration: Singleton instance
"""
global _tensorboard_integration
if _tensorboard_integration is None:
_tensorboard_integration = TensorBoardIntegration(log_dir, port)
return _tensorboard_integration