LR training wip

This commit is contained in:
Dobromir Popov
2025-12-08 21:52:26 +02:00
parent 1ab1c02889
commit 08ee2b6a3a
3 changed files with 61 additions and 31 deletions

View File

@@ -613,7 +613,11 @@ class TradingOrchestrator:
# CRITICAL: Initialize checkpoint manager for saving training progress
self.checkpoint_manager = None
self.training_iterations = 0 # Track training iterations for periodic saves
self._initialize_checkpoint_manager()
try:
self._initialize_checkpoint_manager()
except Exception as e:
logger.error(f"Failed to initialize checkpoint manager in __init__: {e}")
self.checkpoint_manager = None
# Initialize models, COB integration, and training system
self._initialize_ml_models()
@@ -828,7 +832,7 @@ class TradingOrchestrator:
# Try to load best checkpoint
checkpoint_loaded = False
try:
if self.checkpoint_manager:
if hasattr(self, 'checkpoint_manager') and self.checkpoint_manager:
checkpoint_path, checkpoint_metadata = self.checkpoint_manager.load_best_checkpoint("transformer")
if checkpoint_path and checkpoint_metadata:
# Load the checkpoint