T predictions WIP
This commit is contained in:
@@ -1336,13 +1336,16 @@ class RealTrainingAdapter:
|
||||
if result_1s:
|
||||
price_data_1s, norm_params_dict['1s'] = result_1s
|
||||
else:
|
||||
# Don't fail on missing 1s data, it's often unavailable in annotations
|
||||
price_data_1s = None
|
||||
|
||||
result_1m = self._extract_timeframe_data(timeframes.get('1m', {}), target_seq_len) if '1m' in timeframes else None
|
||||
if result_1m:
|
||||
price_data_1m, norm_params_dict['1m'] = result_1m
|
||||
else:
|
||||
price_data_1m = None
|
||||
# Warning: 1m data is critical
|
||||
logger.warning(f"Missing 1m data for transformer batch (sample: {training_sample.get('test_case_id')})")
|
||||
return None
|
||||
|
||||
result_1h = self._extract_timeframe_data(timeframes.get('1h', {}), target_seq_len) if '1h' in timeframes else None
|
||||
if result_1h:
|
||||
@@ -1558,6 +1561,12 @@ class RealTrainingAdapter:
|
||||
# Model predicts price change ratio, not absolute price
|
||||
exit_price = training_sample.get('exit_price')
|
||||
|
||||
# Handle 'expected_outcome' nesting from LivePivotTrainer
|
||||
if exit_price is None:
|
||||
expected_outcome = training_sample.get('expected_outcome', {})
|
||||
if isinstance(expected_outcome, dict):
|
||||
exit_price = expected_outcome.get('exit_price')
|
||||
|
||||
if exit_price and current_price > 0:
|
||||
# Normalize: (exit_price - current_price) / current_price
|
||||
# This gives the expected price change as a ratio
|
||||
@@ -2547,6 +2556,7 @@ class RealTrainingAdapter:
|
||||
if session['last_candle_time'] == latest_candle_time:
|
||||
return # Same candle, no training needed
|
||||
|
||||
logger.debug(f"New candle detected: {latest_candle_time} (last: {session['last_candle_time']})")
|
||||
session['last_candle_time'] = latest_candle_time
|
||||
|
||||
# Get the completed candle (second to last)
|
||||
@@ -2613,6 +2623,7 @@ class RealTrainingAdapter:
|
||||
# Convert to batch format
|
||||
batch = self._convert_annotation_to_transformer_batch(training_sample)
|
||||
if not batch:
|
||||
logger.warning(f"Per-candle training failed: Could not convert sample to batch")
|
||||
return
|
||||
|
||||
# Train on this batch
|
||||
|
||||
Reference in New Issue
Block a user