From 5383ac7df85f30901e76a4978ae176a5e962cb9b Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Mon, 8 Dec 2025 19:37:07 +0200 Subject: [PATCH] more training fixes --- ANNOTATE/core/real_training_adapter.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/ANNOTATE/core/real_training_adapter.py b/ANNOTATE/core/real_training_adapter.py index a98a828..c2964ec 100644 --- a/ANNOTATE/core/real_training_adapter.py +++ b/ANNOTATE/core/real_training_adapter.py @@ -2507,10 +2507,11 @@ class RealTrainingAdapter: for batch in cached_batches: grouped_batches.append(batch) - # Clear cached_batches to free memory - cached_batches.clear() - del cached_batches - gc.collect() + # Don't clear cached_batches yet - grouped_batches contains references to them + # We'll clear after training completes + # cached_batches.clear() + # del cached_batches + # gc.collect() def batch_generator(): """ @@ -2577,6 +2578,15 @@ class RealTrainingAdapter: # Generate batches fresh for each epoch for i, batch in enumerate(batch_generator()): try: + # DEBUG: Check if batch has timeframe data + if epoch > 0 and i == 0: + has_1m = batch.get('price_data_1m') is not None + has_1h = batch.get('price_data_1h') is not None + has_1d = batch.get('price_data_1d') is not None + logger.debug(f"Epoch {epoch+1}, Batch 1: has_1m={has_1m}, has_1h={has_1h}, has_1d={has_1d}") + if has_1m: + logger.debug(f" price_data_1m shape: {batch['price_data_1m'].shape}") + # Store prediction before training (for visualization) # Only store predictions on first epoch and every 10th batch to avoid clutter if epoch == 0 and i % 10 == 0 and self.orchestrator: