LR training wip
This commit is contained in:
@@ -613,7 +613,11 @@ class TradingOrchestrator:
|
||||
# CRITICAL: Initialize checkpoint manager for saving training progress
|
||||
self.checkpoint_manager = None
|
||||
self.training_iterations = 0 # Track training iterations for periodic saves
|
||||
self._initialize_checkpoint_manager()
|
||||
try:
|
||||
self._initialize_checkpoint_manager()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize checkpoint manager in __init__: {e}")
|
||||
self.checkpoint_manager = None
|
||||
|
||||
# Initialize models, COB integration, and training system
|
||||
self._initialize_ml_models()
|
||||
@@ -828,7 +832,7 @@ class TradingOrchestrator:
|
||||
# Try to load best checkpoint
|
||||
checkpoint_loaded = False
|
||||
try:
|
||||
if self.checkpoint_manager:
|
||||
if hasattr(self, 'checkpoint_manager') and self.checkpoint_manager:
|
||||
checkpoint_path, checkpoint_metadata = self.checkpoint_manager.load_best_checkpoint("transformer")
|
||||
if checkpoint_path and checkpoint_metadata:
|
||||
# Load the checkpoint
|
||||
|
||||
Reference in New Issue
Block a user