live trading options
This commit is contained in:
@@ -1051,6 +1051,12 @@ class TradingTransformerTrainer:
|
||||
|
||||
# Move model to device
|
||||
self.model.to(self.device)
|
||||
logger.info(f"✅ Model moved to device: {self.device}")
|
||||
|
||||
# Log GPU info if available
|
||||
if torch.cuda.is_available():
|
||||
logger.info(f" GPU: {torch.cuda.get_device_name(0)}")
|
||||
logger.info(f" GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
|
||||
|
||||
# MEMORY OPTIMIZATION: Enable gradient checkpointing if configured
|
||||
# This trades 20% compute for 30-40% memory savings
|
||||
@@ -1512,9 +1518,19 @@ class TradingTransformerTrainer:
|
||||
del batch[key]
|
||||
del batch
|
||||
|
||||
# Clear CUDA cache
|
||||
# Clear CUDA cache and log GPU memory usage
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Log GPU memory usage periodically (every 10 steps)
|
||||
if not hasattr(self, '_step_counter'):
|
||||
self._step_counter = 0
|
||||
self._step_counter += 1
|
||||
|
||||
if self._step_counter % 10 == 0:
|
||||
allocated = torch.cuda.memory_allocated() / 1024**2
|
||||
reserved = torch.cuda.memory_reserved() / 1024**2
|
||||
logger.debug(f"GPU Memory: {allocated:.1f}MB allocated, {reserved:.1f}MB reserved")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
Reference in New Issue
Block a user