TZ wip, UI model stats fix
This commit is contained in:
3
.vscode/tasks.json
vendored
3
.vscode/tasks.json
vendored
@ -6,7 +6,8 @@
|
||||
"type": "shell",
|
||||
"command": "powershell",
|
||||
"args": [
|
||||
"-ExecutionPolicy", "Bypass",
|
||||
"-ExecutionPolicy",
|
||||
"Bypass",
|
||||
"-File",
|
||||
"scripts/kill_stale_processes.ps1"
|
||||
],
|
||||
|
@ -1515,9 +1515,9 @@ class DataProvider:
|
||||
|
||||
# Ensure gap_start has same timezone as end_time for comparison
|
||||
if gap_start.tzinfo is None:
|
||||
gap_start = sofia_tz.localize(gap_start)
|
||||
elif gap_start.tzinfo != sofia_tz:
|
||||
gap_start = gap_start.astimezone(sofia_tz)
|
||||
gap_start = SOFIA_TZ.localize(gap_start)
|
||||
elif gap_start.tzinfo != SOFIA_TZ:
|
||||
gap_start = gap_start.astimezone(SOFIA_TZ)
|
||||
|
||||
if gap_start < end_time:
|
||||
# Need to fill gap from cache_end to now
|
||||
|
@ -73,6 +73,20 @@ from core.standardized_data_provider import StandardizedDataProvider
|
||||
from core.orchestrator import TradingOrchestrator
|
||||
from core.trading_executor import TradingExecutor
|
||||
|
||||
# Import timezone utilities for Sofia timezone handling
|
||||
try:
|
||||
from utils.timezone_utils import SOFIA_TZ, to_sofia, now_system
|
||||
except ImportError:
|
||||
# Fallback if timezone utils not available
|
||||
import pytz
|
||||
SOFIA_TZ = pytz.timezone('Europe/Sofia')
|
||||
def to_sofia(dt):
|
||||
if dt.tzinfo is None:
|
||||
dt = pytz.UTC.localize(dt)
|
||||
return dt.astimezone(SOFIA_TZ)
|
||||
def now_system():
|
||||
return datetime.now(SOFIA_TZ)
|
||||
|
||||
# Import standardized models
|
||||
from NN.models.standardized_cnn import StandardizedCNN
|
||||
|
||||
@ -668,39 +682,41 @@ class CleanTradingDashboard:
|
||||
return {'loaded_models': {}, 'total_models': 0, 'system_status': 'ERROR'}
|
||||
|
||||
def _convert_utc_to_local(self, utc_timestamp):
|
||||
"""Convert UTC timestamp to local timezone for display"""
|
||||
"""Convert UTC timestamp to Sofia timezone for display"""
|
||||
try:
|
||||
if utc_timestamp is None:
|
||||
return datetime.now()
|
||||
return now_system()
|
||||
|
||||
# Handle different input types
|
||||
if isinstance(utc_timestamp, str):
|
||||
try:
|
||||
utc_timestamp = pd.to_datetime(utc_timestamp)
|
||||
except:
|
||||
return datetime.now()
|
||||
return now_system()
|
||||
|
||||
# If it's already a datetime object
|
||||
if isinstance(utc_timestamp, datetime):
|
||||
# If it has timezone info and is UTC, convert to local
|
||||
# If it has timezone info and is UTC, convert to Sofia timezone
|
||||
if utc_timestamp.tzinfo is not None:
|
||||
if str(utc_timestamp.tzinfo) == 'UTC':
|
||||
# Convert UTC to local timezone
|
||||
local_timestamp = utc_timestamp.replace(tzinfo=timezone.utc).astimezone()
|
||||
return local_timestamp.replace(tzinfo=None) # Remove timezone info for display
|
||||
# Convert UTC to Sofia timezone
|
||||
sofia_timestamp = utc_timestamp.replace(tzinfo=timezone.utc).astimezone(SOFIA_TZ)
|
||||
return sofia_timestamp.replace(tzinfo=None) # Remove timezone info for display
|
||||
else:
|
||||
# Already has timezone, convert to local
|
||||
return utc_timestamp.astimezone().replace(tzinfo=None)
|
||||
# Already has timezone, convert to Sofia
|
||||
return utc_timestamp.astimezone(SOFIA_TZ).replace(tzinfo=None)
|
||||
else:
|
||||
# No timezone info, assume it's already local
|
||||
return utc_timestamp
|
||||
# No timezone info, assume it's UTC and convert to Sofia
|
||||
utc_timestamp = utc_timestamp.replace(tzinfo=timezone.utc)
|
||||
sofia_timestamp = utc_timestamp.astimezone(SOFIA_TZ)
|
||||
return sofia_timestamp.replace(tzinfo=None)
|
||||
|
||||
# Fallback
|
||||
return datetime.now()
|
||||
return now_system()
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error converting UTC to local time: {e}")
|
||||
return datetime.now()
|
||||
logger.debug(f"Error converting UTC to Sofia time: {e}")
|
||||
return now_system()
|
||||
|
||||
def _safe_strftime(self, timestamp_val, format_str='%H:%M:%S'):
|
||||
"""Safely format timestamp, handling both string and datetime objects"""
|
||||
@ -3394,11 +3410,14 @@ class CleanTradingDashboard:
|
||||
# Get enhanced training statistics if available
|
||||
enhanced_training_stats = self._get_enhanced_training_stats()
|
||||
|
||||
# DEBUG: Check if orchestrator has model statistics
|
||||
# Get real model statistics from orchestrator
|
||||
orchestrator_stats = {}
|
||||
if self.orchestrator:
|
||||
try:
|
||||
all_stats = self.orchestrator.get_model_statistics()
|
||||
logger.debug(f"Orchestrator model statistics: {all_stats}")
|
||||
if all_stats:
|
||||
orchestrator_stats = all_stats
|
||||
logger.debug(f"Retrieved orchestrator model statistics for {len(all_stats)} models")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting orchestrator model statistics: {e}")
|
||||
|
||||
@ -3486,7 +3505,16 @@ class CleanTradingDashboard:
|
||||
dqn_active = dqn_checkpoint_loaded and dqn_inference_enabled and dqn_model_available
|
||||
dqn_prediction_count = len(self.recent_decisions) if signal_generation_active else 0
|
||||
|
||||
# Get latest DQN prediction
|
||||
# Get latest DQN prediction from orchestrator statistics
|
||||
dqn_stats = orchestrator_stats.get('dqn_agent')
|
||||
if dqn_stats and dqn_stats.predictions_history:
|
||||
# Get the most recent prediction
|
||||
latest_pred = list(dqn_stats.predictions_history)[-1]
|
||||
last_action = latest_pred.get('action', 'NONE')
|
||||
last_confidence = latest_pred.get('confidence', 0.0)
|
||||
last_timestamp = latest_pred.get('timestamp', datetime.now()).strftime('%H:%M:%S') if hasattr(latest_pred.get('timestamp'), 'strftime') else datetime.now().strftime('%H:%M:%S')
|
||||
else:
|
||||
# Fallback to dashboard predictions
|
||||
dqn_latest = latest_predictions.get('dqn', {})
|
||||
if dqn_latest:
|
||||
last_action = dqn_latest.get('action', 'NONE')
|
||||
@ -3509,6 +3537,18 @@ class CleanTradingDashboard:
|
||||
last_confidence = 0.68
|
||||
last_timestamp = datetime.now().strftime('%H:%M:%S')
|
||||
|
||||
# Get real DQN statistics from orchestrator
|
||||
dqn_stats = orchestrator_stats.get('dqn_agent')
|
||||
dqn_current_loss = dqn_stats.current_loss if dqn_stats else None
|
||||
dqn_best_loss = dqn_stats.best_loss if dqn_stats else None
|
||||
dqn_accuracy = dqn_stats.accuracy if dqn_stats else None
|
||||
dqn_total_inferences = dqn_stats.total_inferences if dqn_stats else 0
|
||||
dqn_total_trainings = dqn_stats.total_trainings if dqn_stats else 0
|
||||
dqn_inference_rate = dqn_stats.inference_rate_per_minute if dqn_stats else 0.0
|
||||
dqn_training_rate = dqn_stats.training_rate_per_minute if dqn_stats else 0.0
|
||||
dqn_avg_inference_time = dqn_stats.average_inference_time_ms if dqn_stats else 0.0
|
||||
dqn_avg_training_time = dqn_stats.average_training_time_ms if dqn_stats else 0.0
|
||||
|
||||
dqn_model_info = {
|
||||
'active': dqn_active,
|
||||
'parameters': 5000000, # ~5M params for DQN
|
||||
@ -3516,21 +3556,22 @@ class CleanTradingDashboard:
|
||||
'timestamp': last_timestamp,
|
||||
'action': last_action,
|
||||
'confidence': last_confidence,
|
||||
'type': dqn_latest.get('type', 'dqn_signal') if dqn_latest else 'dqn_signal'
|
||||
'type': 'dqn_signal'
|
||||
},
|
||||
# FIXED: Get REAL loss values from orchestrator model, not placeholders
|
||||
'loss_5ma': self._get_real_model_loss('dqn'),
|
||||
# REAL: Get actual loss values from orchestrator statistics
|
||||
'loss_5ma': dqn_current_loss,
|
||||
'initial_loss': dqn_state.get('initial_loss'), # No fallback - show None if unknown
|
||||
'best_loss': self._get_real_best_loss('dqn'),
|
||||
'best_loss': dqn_best_loss,
|
||||
'accuracy': dqn_accuracy,
|
||||
'improvement': safe_improvement_calc(
|
||||
dqn_state.get('initial_loss'),
|
||||
self._get_real_model_loss('dqn'),
|
||||
dqn_current_loss,
|
||||
0.0 # No synthetic default improvement
|
||||
),
|
||||
'checkpoint_loaded': dqn_checkpoint_loaded,
|
||||
'model_type': 'DQN',
|
||||
'description': 'Deep Q-Network Agent (Data Bus Input)',
|
||||
'prediction_count': dqn_prediction_count,
|
||||
'prediction_count': dqn_total_inferences,
|
||||
'epsilon': 1.0,
|
||||
'training_evidence': dqn_training_status['evidence'],
|
||||
'training_steps': dqn_training_status['training_steps'],
|
||||
@ -3548,14 +3589,16 @@ class CleanTradingDashboard:
|
||||
'created_at': dqn_state.get('created_at', 'Unknown'),
|
||||
'performance_score': dqn_state.get('performance_score', 0.0)
|
||||
},
|
||||
# NEW: Timing information
|
||||
# REAL: Timing information from orchestrator
|
||||
'timing': {
|
||||
'last_inference': dqn_timing['last_inference'].strftime('%H:%M:%S') if dqn_timing['last_inference'] else 'None',
|
||||
'last_training': dqn_timing['last_training'].strftime('%H:%M:%S') if dqn_timing['last_training'] else 'None',
|
||||
'inferences_per_second': f"{dqn_timing['inferences_per_second']:.2f}",
|
||||
'predictions_24h': dqn_timing['prediction_count_24h'],
|
||||
'average_inference_time_ms': f"{dqn_timing.get('average_inference_time_ms', 0):.1f}",
|
||||
'average_training_time_ms': f"{dqn_timing.get('average_training_time_ms', 0):.1f}"
|
||||
'last_inference': dqn_stats.last_inference_time.strftime('%H:%M:%S') if dqn_stats and dqn_stats.last_inference_time else 'None',
|
||||
'last_training': dqn_stats.last_training_time.strftime('%H:%M:%S') if dqn_stats and dqn_stats.last_training_time else 'None',
|
||||
'inferences_per_second': f"{dqn_inference_rate/60:.2f}",
|
||||
'trainings_per_second': f"{dqn_training_rate/60:.2f}",
|
||||
'predictions_24h': dqn_total_inferences,
|
||||
'trainings_24h': dqn_total_trainings,
|
||||
'average_inference_time_ms': f"{dqn_avg_inference_time:.1f}",
|
||||
'average_training_time_ms': f"{dqn_avg_training_time:.1f}"
|
||||
},
|
||||
# NEW: Performance metrics for split-second decisions
|
||||
'performance': self.get_model_performance_metrics().get('dqn', {}),
|
||||
@ -3571,19 +3614,29 @@ class CleanTradingDashboard:
|
||||
}
|
||||
loaded_models['dqn'] = dqn_model_info
|
||||
|
||||
# 2. CNN Model Status - using enhanced CNN adapter data
|
||||
# 2. CNN Model Status - using real orchestrator statistics
|
||||
cnn_state = model_states.get('cnn', {})
|
||||
cnn_timing = get_model_timing_info('CNN')
|
||||
|
||||
# Get enhanced CNN panel data with detailed metrics
|
||||
cnn_panel_data = self._update_cnn_model_panel()
|
||||
cnn_active = cnn_panel_data.get('status') not in ['NOT_AVAILABLE', 'ERROR', 'NOT_LOADED']
|
||||
# Get real CNN statistics from orchestrator
|
||||
cnn_stats = orchestrator_stats.get('enhanced_cnn')
|
||||
cnn_active = cnn_stats is not None
|
||||
|
||||
# Use enhanced CNN data for display
|
||||
# Get latest CNN prediction from orchestrator statistics
|
||||
if cnn_stats and cnn_stats.predictions_history:
|
||||
# Get the most recent prediction
|
||||
latest_pred = list(cnn_stats.predictions_history)[-1]
|
||||
cnn_action = latest_pred.get('action', 'PATTERN_ANALYSIS')
|
||||
cnn_confidence = latest_pred.get('confidence', 0.0)
|
||||
cnn_timestamp = latest_pred.get('timestamp', datetime.now()).strftime('%H:%M:%S') if hasattr(latest_pred.get('timestamp'), 'strftime') else datetime.now().strftime('%H:%M:%S')
|
||||
else:
|
||||
# Fallback to enhanced CNN panel data
|
||||
cnn_panel_data = self._update_cnn_model_panel()
|
||||
cnn_action = cnn_panel_data.get('suggested_action', 'PATTERN_ANALYSIS')
|
||||
cnn_confidence = cnn_panel_data.get('confidence', 0.0)
|
||||
cnn_timestamp = cnn_panel_data.get('last_inference_time', 'Never')
|
||||
cnn_pivot_price = cnn_panel_data.get('pivot_price', 'N/A')
|
||||
|
||||
cnn_pivot_price = 'N/A' # Will be updated from panel data if needed
|
||||
|
||||
# Parse pivot price for prediction
|
||||
cnn_predicted_price = 0
|
||||
@ -3598,6 +3651,17 @@ class CleanTradingDashboard:
|
||||
cnn_inference_enabled = cnn_toggle_state.get("inference_enabled", True)
|
||||
cnn_training_enabled = cnn_toggle_state.get("training_enabled", True)
|
||||
|
||||
# Get real CNN statistics from orchestrator
|
||||
cnn_current_loss = cnn_stats.current_loss if cnn_stats else None
|
||||
cnn_best_loss = cnn_stats.best_loss if cnn_stats else None
|
||||
cnn_accuracy = cnn_stats.accuracy if cnn_stats else None
|
||||
cnn_total_inferences = cnn_stats.total_inferences if cnn_stats else 0
|
||||
cnn_total_trainings = cnn_stats.total_trainings if cnn_stats else 0
|
||||
cnn_inference_rate = cnn_stats.inference_rate_per_minute if cnn_stats else 0.0
|
||||
cnn_training_rate = cnn_stats.training_rate_per_minute if cnn_stats else 0.0
|
||||
cnn_avg_inference_time = cnn_stats.average_inference_time_ms if cnn_stats else 0.0
|
||||
cnn_avg_training_time = cnn_stats.average_training_time_ms if cnn_stats else 0.0
|
||||
|
||||
cnn_model_info = {
|
||||
'active': cnn_active,
|
||||
'parameters': 50000000, # ~50M params
|
||||
@ -3609,25 +3673,27 @@ class CleanTradingDashboard:
|
||||
'pivot_price': cnn_pivot_price,
|
||||
'type': 'enhanced_cnn_pivot'
|
||||
},
|
||||
'loss_5ma': float(cnn_panel_data.get('last_training_loss', '0.0').replace('f', '')),
|
||||
# REAL: Get actual loss values from orchestrator statistics
|
||||
'loss_5ma': cnn_current_loss,
|
||||
'initial_loss': cnn_state.get('initial_loss'),
|
||||
'best_loss': cnn_state.get('best_loss'),
|
||||
'best_loss': cnn_best_loss,
|
||||
'accuracy': cnn_accuracy,
|
||||
'improvement': safe_improvement_calc(
|
||||
cnn_state.get('initial_loss'),
|
||||
float(cnn_panel_data.get('last_training_loss', '0.0').replace('f', '')),
|
||||
cnn_current_loss,
|
||||
0.0
|
||||
),
|
||||
|
||||
# Enhanced timing metrics
|
||||
'enhanced_timing': {
|
||||
'last_inference_time': cnn_panel_data.get('last_inference_time', 'Never'),
|
||||
'last_inference_duration': cnn_panel_data.get('last_inference_duration', '0.0ms'),
|
||||
'inference_count': cnn_panel_data.get('inference_count', 0),
|
||||
'inference_rate': cnn_panel_data.get('inference_rate', '0.00/s'),
|
||||
'last_training_time': cnn_panel_data.get('last_training_time', 'Never'),
|
||||
'last_training_duration': cnn_panel_data.get('last_training_duration', '0.0ms'),
|
||||
'training_count': cnn_panel_data.get('training_count', 0),
|
||||
'training_samples': cnn_panel_data.get('training_samples', 0)
|
||||
'last_inference_time': cnn_stats.last_inference_time.strftime('%H:%M:%S') if cnn_stats and cnn_stats.last_inference_time else 'Never',
|
||||
'last_inference_duration': f"{cnn_avg_inference_time:.1f}ms",
|
||||
'inference_count': cnn_total_inferences,
|
||||
'inference_rate': f"{cnn_inference_rate/60:.2f}/s",
|
||||
'last_training_time': cnn_stats.last_training_time.strftime('%H:%M:%S') if cnn_stats and cnn_stats.last_training_time else 'Never',
|
||||
'last_training_duration': f"{cnn_avg_training_time:.1f}ms",
|
||||
'training_count': cnn_total_trainings,
|
||||
'training_samples': cnn_total_trainings
|
||||
},
|
||||
'checkpoint_loaded': cnn_state.get('checkpoint_loaded', False),
|
||||
'model_type': 'CNN',
|
||||
@ -3639,14 +3705,16 @@ class CleanTradingDashboard:
|
||||
'created_at': cnn_state.get('created_at', 'Unknown'),
|
||||
'performance_score': cnn_state.get('performance_score', 0.0)
|
||||
},
|
||||
# NEW: Timing information
|
||||
# REAL: Timing information from orchestrator
|
||||
'timing': {
|
||||
'last_inference': cnn_timing['last_inference'].strftime('%H:%M:%S') if cnn_timing['last_inference'] else 'None',
|
||||
'last_training': cnn_timing['last_training'].strftime('%H:%M:%S') if cnn_timing['last_training'] else 'None',
|
||||
'inferences_per_second': f"{cnn_timing['inferences_per_second']:.2f}",
|
||||
'predictions_24h': cnn_timing['prediction_count_24h'],
|
||||
'average_inference_time_ms': f"{cnn_timing.get('average_inference_time_ms', 0):.1f}",
|
||||
'average_training_time_ms': f"{cnn_timing.get('average_training_time_ms', 0):.1f}"
|
||||
'last_inference': cnn_stats.last_inference_time.strftime('%H:%M:%S') if cnn_stats and cnn_stats.last_inference_time else 'None',
|
||||
'last_training': cnn_stats.last_training_time.strftime('%H:%M:%S') if cnn_stats and cnn_stats.last_training_time else 'None',
|
||||
'inferences_per_second': f"{cnn_inference_rate/60:.2f}",
|
||||
'trainings_per_second': f"{cnn_training_rate/60:.2f}",
|
||||
'predictions_24h': cnn_total_inferences,
|
||||
'trainings_24h': cnn_total_trainings,
|
||||
'average_inference_time_ms': f"{cnn_avg_inference_time:.1f}",
|
||||
'average_training_time_ms': f"{cnn_avg_training_time:.1f}"
|
||||
},
|
||||
# NEW: Performance metrics for split-second decisions
|
||||
'performance': self.get_model_performance_metrics().get('cnn', {}),
|
||||
@ -6362,75 +6430,34 @@ class CleanTradingDashboard:
|
||||
return default
|
||||
|
||||
def _get_real_model_loss(self, model_name: str) -> Optional[float]:
|
||||
"""Get REAL current loss from the actual model, not placeholders"""
|
||||
"""Get REAL current loss from the orchestrator's model statistics"""
|
||||
try:
|
||||
if not self.orchestrator:
|
||||
return None # No orchestrator = no real data
|
||||
|
||||
if model_name == 'dqn' and hasattr(self.orchestrator, 'rl_agent') and self.orchestrator.rl_agent:
|
||||
# Get real loss from DQN agent
|
||||
agent = self.orchestrator.rl_agent
|
||||
if hasattr(agent, 'losses') and len(agent.losses) > 0:
|
||||
# Average of last 50 losses for current loss
|
||||
recent_losses = agent.losses[-50:]
|
||||
return sum(recent_losses) / len(recent_losses)
|
||||
elif hasattr(agent, 'current_loss') and agent.current_loss is not None:
|
||||
return float(agent.current_loss)
|
||||
# Use the orchestrator's model statistics system
|
||||
model_stats = self.orchestrator.get_model_statistics(model_name)
|
||||
if model_stats:
|
||||
return model_stats.current_loss
|
||||
|
||||
elif model_name == 'cnn' and hasattr(self.orchestrator, 'cnn_model') and self.orchestrator.cnn_model:
|
||||
# Get real loss from CNN model
|
||||
model = self.orchestrator.cnn_model
|
||||
if hasattr(model, 'training_losses') and len(getattr(model, 'training_losses',[])) > 0:
|
||||
recent_losses = getattr(model, 'training_losses',[])[-50:]
|
||||
return sum(recent_losses) / len(recent_losses)
|
||||
elif hasattr(model, 'current_loss') and model.current_loss is not None:
|
||||
return float(model.current_loss)
|
||||
|
||||
elif model_name == 'decision' and hasattr(self.orchestrator, 'decision_fusion_network'):
|
||||
# Get real loss from decision fusion
|
||||
if hasattr(self.orchestrator, 'fusion_training_data') and len(self.orchestrator.fusion_training_data) > 0:
|
||||
recent_losses = [entry['loss'] for entry in self.orchestrator.fusion_training_data[-50:]]
|
||||
if recent_losses:
|
||||
return sum(recent_losses) / len(recent_losses)
|
||||
|
||||
# Fallback to model states
|
||||
model_states = self.orchestrator.get_model_states() if hasattr(self.orchestrator, 'get_model_states') else {}
|
||||
state = model_states.get(model_name, {})
|
||||
return state.get('current_loss') # Return None if no real data
|
||||
return None # Return None if no real data
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting real loss for {model_name}: {e}")
|
||||
return None # Return None instead of synthetic data
|
||||
|
||||
def _get_real_best_loss(self, model_name: str) -> Optional[float]:
|
||||
"""Get REAL best loss from the actual model"""
|
||||
"""Get REAL best loss from the orchestrator's model statistics"""
|
||||
try:
|
||||
if not self.orchestrator:
|
||||
return None # No orchestrator = no real data
|
||||
|
||||
if model_name == 'dqn' and hasattr(self.orchestrator, 'rl_agent') and self.orchestrator.rl_agent:
|
||||
agent = self.orchestrator.rl_agent
|
||||
if hasattr(agent, 'best_loss') and agent.best_loss is not None:
|
||||
return float(agent.best_loss)
|
||||
elif hasattr(agent, 'losses') and len(agent.losses) > 0:
|
||||
return min(agent.losses)
|
||||
# Use the orchestrator's model statistics system
|
||||
model_stats = self.orchestrator.get_model_statistics(model_name)
|
||||
if model_stats:
|
||||
return model_stats.best_loss
|
||||
|
||||
elif model_name == 'cnn' and hasattr(self.orchestrator, 'cnn_model') and self.orchestrator.cnn_model:
|
||||
model = self.orchestrator.cnn_model
|
||||
if hasattr(model, 'best_loss') and model.best_loss is not None:
|
||||
return float(model.best_loss)
|
||||
elif hasattr(model, 'training_losses') and len(getattr(model, 'training_losses', [])) > 0:
|
||||
return min(getattr(model, 'training_losses', []))
|
||||
|
||||
elif model_name == 'decision' and hasattr(self.orchestrator, 'fusion_training_data'):
|
||||
if len(self.orchestrator.fusion_training_data) > 0:
|
||||
all_losses = [entry['loss'] for entry in self.orchestrator.fusion_training_data]
|
||||
return min(all_losses) if all_losses else None
|
||||
|
||||
# Fallback to model states
|
||||
model_states = self.orchestrator.get_model_states() if hasattr(self.orchestrator, 'get_model_states') else {}
|
||||
state = model_states.get(model_name, {})
|
||||
return state.get('best_loss') # Return None if no real data
|
||||
return None # Return None if no real data
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting best loss for {model_name}: {e}")
|
||||
|
Reference in New Issue
Block a user