UI: fix models info
This commit is contained in:
@ -50,7 +50,7 @@ exchanges:
|
|||||||
bybit:
|
bybit:
|
||||||
enabled: true
|
enabled: true
|
||||||
test_mode: false # Use mainnet (your credentials are for live trading)
|
test_mode: false # Use mainnet (your credentials are for live trading)
|
||||||
trading_mode: "live" # simulation, testnet, live
|
trading_mode: "simulation" # simulation, testnet, live
|
||||||
supported_symbols: ["BTCUSDT", "ETHUSDT"] # Bybit perpetual format
|
supported_symbols: ["BTCUSDT", "ETHUSDT"] # Bybit perpetual format
|
||||||
base_position_percent: 5.0
|
base_position_percent: 5.0
|
||||||
max_position_percent: 20.0
|
max_position_percent: 20.0
|
||||||
|
@ -1137,24 +1137,29 @@ class CleanTradingDashboard:
|
|||||||
[Input('slow-interval-component', 'n_intervals')] # OPTIMIZED: Move to 10s interval
|
[Input('slow-interval-component', 'n_intervals')] # OPTIMIZED: Move to 10s interval
|
||||||
)
|
)
|
||||||
def update_training_metrics(n):
|
def update_training_metrics(n):
|
||||||
# Get toggle states from orchestrator
|
|
||||||
toggle_states = {}
|
|
||||||
if self.orchestrator:
|
|
||||||
for model_name in ["dqn", "cnn", "cob_rl", "decision_fusion"]:
|
|
||||||
toggle_states[model_name] = self.orchestrator.get_model_toggle_state(model_name)
|
|
||||||
else:
|
|
||||||
# Fallback to dashboard state
|
|
||||||
toggle_states = {
|
|
||||||
"dqn": {"inference_enabled": self.dqn_inference_enabled, "training_enabled": self.dqn_training_enabled},
|
|
||||||
"cnn": {"inference_enabled": self.cnn_inference_enabled, "training_enabled": self.cnn_training_enabled},
|
|
||||||
"cob_rl": {"inference_enabled": True, "training_enabled": True},
|
|
||||||
"decision_fusion": {"inference_enabled": True, "training_enabled": True}
|
|
||||||
}
|
|
||||||
"""Update training metrics"""
|
"""Update training metrics"""
|
||||||
try:
|
try:
|
||||||
|
# Get toggle states from orchestrator
|
||||||
|
toggle_states = {}
|
||||||
|
if self.orchestrator:
|
||||||
|
for model_name in ["dqn", "cnn", "cob_rl", "decision_fusion"]:
|
||||||
|
toggle_states[model_name] = self.orchestrator.get_model_toggle_state(model_name)
|
||||||
|
else:
|
||||||
|
# Fallback to dashboard state
|
||||||
|
toggle_states = {
|
||||||
|
"dqn": {"inference_enabled": self.dqn_inference_enabled, "training_enabled": self.dqn_training_enabled},
|
||||||
|
"cnn": {"inference_enabled": self.cnn_inference_enabled, "training_enabled": self.cnn_training_enabled},
|
||||||
|
"cob_rl": {"inference_enabled": True, "training_enabled": True},
|
||||||
|
"decision_fusion": {"inference_enabled": True, "training_enabled": True}
|
||||||
|
}
|
||||||
# Now using slow-interval-component (10s) - no batching needed
|
# Now using slow-interval-component (10s) - no batching needed
|
||||||
|
|
||||||
metrics_data = self._get_training_metrics()
|
metrics_data = self._get_training_metrics(toggle_states)
|
||||||
|
logger.debug(f"update_training_metrics callback: got metrics_data type={type(metrics_data)}")
|
||||||
|
if metrics_data and isinstance(metrics_data, dict):
|
||||||
|
logger.debug(f"Metrics data keys: {list(metrics_data.keys())}")
|
||||||
|
if 'loaded_models' in metrics_data:
|
||||||
|
logger.debug(f"Loaded models count: {len(metrics_data['loaded_models'])}")
|
||||||
return self.component_manager.format_training_metrics(metrics_data)
|
return self.component_manager.format_training_metrics(metrics_data)
|
||||||
except PreventUpdate:
|
except PreventUpdate:
|
||||||
raise
|
raise
|
||||||
@ -3291,7 +3296,7 @@ class CleanTradingDashboard:
|
|||||||
'last_training_loss': '0.000000'
|
'last_training_loss': '0.000000'
|
||||||
}
|
}
|
||||||
|
|
||||||
def _get_training_metrics(self) -> Dict:
|
def _get_training_metrics(self, toggle_states: Dict = None) -> Dict:
|
||||||
"""Get training metrics from unified orchestrator - using orchestrator as SSOT"""
|
"""Get training metrics from unified orchestrator - using orchestrator as SSOT"""
|
||||||
try:
|
try:
|
||||||
metrics = {}
|
metrics = {}
|
||||||
@ -3305,12 +3310,14 @@ class CleanTradingDashboard:
|
|||||||
if self.orchestrator and hasattr(self.orchestrator, 'get_model_states'):
|
if self.orchestrator and hasattr(self.orchestrator, 'get_model_states'):
|
||||||
try:
|
try:
|
||||||
model_states = self.orchestrator.get_model_states()
|
model_states = self.orchestrator.get_model_states()
|
||||||
|
logger.debug(f"Retrieved model states from orchestrator: {model_states}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(f"Error getting model states from orchestrator: {e}")
|
logger.error(f"Error getting model states from orchestrator: {e}")
|
||||||
model_states = None
|
model_states = None
|
||||||
|
|
||||||
# Fallback if orchestrator not available or returns None
|
# Fallback if orchestrator not available or returns None
|
||||||
if model_states is None:
|
if model_states is None:
|
||||||
|
logger.warning("No model states available from orchestrator, using fallback")
|
||||||
# FIXED: No longer using hardcoded placeholder loss values
|
# FIXED: No longer using hardcoded placeholder loss values
|
||||||
# Dashboard should show "No Data" or actual training status instead
|
# Dashboard should show "No Data" or actual training status instead
|
||||||
model_states = {
|
model_states = {
|
||||||
@ -3340,6 +3347,23 @@ class CleanTradingDashboard:
|
|||||||
# Get enhanced training statistics if available
|
# Get enhanced training statistics if available
|
||||||
enhanced_training_stats = self._get_enhanced_training_stats()
|
enhanced_training_stats = self._get_enhanced_training_stats()
|
||||||
|
|
||||||
|
# DEBUG: Check if orchestrator has model statistics
|
||||||
|
if self.orchestrator:
|
||||||
|
try:
|
||||||
|
all_stats = self.orchestrator.get_model_statistics()
|
||||||
|
logger.debug(f"Orchestrator model statistics: {all_stats}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Error getting orchestrator model statistics: {e}")
|
||||||
|
|
||||||
|
# Ensure toggle_states are available
|
||||||
|
if toggle_states is None:
|
||||||
|
toggle_states = {
|
||||||
|
"dqn": {"inference_enabled": True, "training_enabled": True},
|
||||||
|
"cnn": {"inference_enabled": True, "training_enabled": True},
|
||||||
|
"cob_rl": {"inference_enabled": True, "training_enabled": True},
|
||||||
|
"decision_fusion": {"inference_enabled": True, "training_enabled": True}
|
||||||
|
}
|
||||||
|
|
||||||
# Helper function to safely calculate improvement percentage
|
# Helper function to safely calculate improvement percentage
|
||||||
def safe_improvement_calc(initial, current, default_improvement=0.0):
|
def safe_improvement_calc(initial, current, default_improvement=0.0):
|
||||||
try:
|
try:
|
||||||
@ -3844,6 +3868,15 @@ class CleanTradingDashboard:
|
|||||||
# Add enhanced training statistics
|
# Add enhanced training statistics
|
||||||
metrics['enhanced_training_stats'] = enhanced_training_stats
|
metrics['enhanced_training_stats'] = enhanced_training_stats
|
||||||
|
|
||||||
|
# DEBUG: Log what we're returning
|
||||||
|
models_count = len(metrics.get('loaded_models', {}))
|
||||||
|
logger.info(f"Training metrics being returned: {models_count} models loaded")
|
||||||
|
if models_count == 0:
|
||||||
|
logger.warning("No models in loaded_models!")
|
||||||
|
logger.warning(f"Metrics keys: {list(metrics.keys())}")
|
||||||
|
for model_name, model_info in metrics.get('loaded_models', {}).items():
|
||||||
|
logger.info(f"Model {model_name}: active={model_info.get('active', False)}, checkpoint_loaded={model_info.get('checkpoint_loaded', False)}")
|
||||||
|
|
||||||
return metrics
|
return metrics
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -713,7 +713,15 @@ class DashboardComponentManager:
|
|||||||
def format_training_metrics(self, metrics_data):
|
def format_training_metrics(self, metrics_data):
|
||||||
"""Format training metrics for display - Enhanced with loaded models"""
|
"""Format training metrics for display - Enhanced with loaded models"""
|
||||||
try:
|
try:
|
||||||
|
# DEBUG: Log what we're receiving
|
||||||
|
logger.debug(f"format_training_metrics received: {type(metrics_data)}")
|
||||||
|
if metrics_data:
|
||||||
|
logger.debug(f"Metrics keys: {list(metrics_data.keys()) if isinstance(metrics_data, dict) else 'Not a dict'}")
|
||||||
|
if isinstance(metrics_data, dict) and 'loaded_models' in metrics_data:
|
||||||
|
logger.debug(f"Loaded models: {list(metrics_data['loaded_models'].keys())}")
|
||||||
|
|
||||||
if not metrics_data or 'error' in metrics_data:
|
if not metrics_data or 'error' in metrics_data:
|
||||||
|
logger.warning(f"No training data or error in metrics_data: {metrics_data}")
|
||||||
return [html.P("No training data", className="text-muted small")]
|
return [html.P("No training data", className="text-muted small")]
|
||||||
|
|
||||||
content = []
|
content = []
|
||||||
|
Reference in New Issue
Block a user