normalize by unified price range

This commit is contained in:
Dobromir Popov
2025-07-29 22:05:28 +03:00
parent aa2a1bf7ee
commit ab5784b890
2 changed files with 89 additions and 71 deletions

View File

@ -3117,87 +3117,86 @@ class DataProvider:
return basic_cols # Fallback to basic OHLCV
def _normalize_features(self, df: pd.DataFrame, symbol: str = None) -> Optional[pd.DataFrame]:
"""Normalize features for CNN training using pivot-based bounds when available"""
"""Normalize features for CNN training using unified normalization across all timeframes"""
try:
df_norm = df.copy()
# Try to use pivot-based normalization if available
# Get unified normalization bounds for all timeframes
if symbol and symbol in self.pivot_bounds:
bounds = self.pivot_bounds[symbol]
price_range = bounds.get_price_range()
volume_range = bounds.volume_max - bounds.volume_min
# Normalize price-based features using pivot bounds
price_cols = ['open', 'high', 'low', 'close', 'sma_10', 'sma_20', 'sma_50',
'ema_12', 'ema_26', 'ema_50', 'bb_upper', 'bb_lower', 'bb_middle',
'keltner_upper', 'keltner_lower', 'keltner_middle', 'psar', 'vwap']
for col in price_cols:
if col in df_norm.columns:
# Use pivot bounds for normalization
df_norm[col] = (df_norm[col] - bounds.price_min) / price_range
# Normalize volume using pivot bounds
if 'volume' in df_norm.columns:
volume_range = bounds.volume_max - bounds.volume_min
if volume_range > 0:
df_norm['volume'] = (df_norm['volume'] - bounds.volume_min) / volume_range
else:
df_norm['volume'] = 0.5 # Default to middle if no volume range
logger.debug(f"Applied pivot-based normalization for {symbol}")
logger.debug(f"Using unified pivot-based normalization for {symbol} (price_range: {price_range:.2f})")
else:
# Fallback to traditional normalization when pivot bounds not available
logger.debug("Using traditional normalization (no pivot bounds available)")
for col in df_norm.columns:
if col in ['open', 'high', 'low', 'close', 'sma_10', 'sma_20', 'sma_50',
'ema_12', 'ema_26', 'ema_50', 'bb_upper', 'bb_lower', 'bb_middle',
'keltner_upper', 'keltner_lower', 'keltner_middle', 'psar', 'vwap']:
# Price-based indicators: normalize by close price
# Fallback: calculate unified bounds from available data
price_range = self._get_price_range_for_symbol(symbol) if symbol else 1000.0
volume_range = 1000000.0 # Default volume range
logger.debug(f"Using fallback unified normalization for {symbol} (price_range: {price_range:.2f})")
# UNIFIED NORMALIZATION: All timeframes use the same normalization range
# This preserves relationships between different timeframes
# Price-based features (OHLCV + indicators)
price_cols = ['open', 'high', 'low', 'close', 'sma_10', 'sma_20', 'sma_50',
'ema_12', 'ema_26', 'ema_50', 'bb_upper', 'bb_lower', 'bb_middle',
'keltner_upper', 'keltner_lower', 'keltner_middle', 'psar', 'vwap']
for col in price_cols:
if col in df_norm.columns:
if symbol and symbol in self.pivot_bounds:
# Use pivot bounds for unified normalization
df_norm[col] = (df_norm[col] - bounds.price_min) / price_range
else:
# Fallback: normalize by current price range
if 'close' in df_norm.columns:
base_price = df_norm['close'].iloc[-1] # Use latest close as reference
base_price = df_norm['close'].iloc[-1]
if base_price > 0:
df_norm[col] = df_norm[col] / base_price
elif col == 'volume':
# Volume: normalize by its own rolling mean
volume_mean = df_norm[col].rolling(window=min(20, len(df_norm))).mean().iloc[-1]
if volume_mean > 0:
df_norm[col] = df_norm[col] / volume_mean
# Normalize indicators that have standard ranges (regardless of pivot bounds)
# Volume normalization (unified across timeframes)
if 'volume' in df_norm.columns:
if symbol and symbol in self.pivot_bounds and volume_range > 0:
df_norm['volume'] = (df_norm['volume'] - bounds.volume_min) / volume_range
else:
# Fallback: normalize by rolling mean
volume_mean = df_norm['volume'].rolling(window=min(20, len(df_norm))).mean().iloc[-1]
if volume_mean > 0:
df_norm['volume'] = df_norm['volume'] / volume_mean
else:
df_norm['volume'] = 0.5
# Standard range indicators (already 0-1 or 0-100)
for col in df_norm.columns:
if col in ['rsi_14', 'rsi_7', 'rsi_21']:
# RSI: already 0-100, normalize to 0-1
# RSI: 0-100 -> 0-1
df_norm[col] = df_norm[col] / 100.0
elif col in ['stoch_k', 'stoch_d']:
# Stochastic: already 0-100, normalize to 0-1
# Stochastic: 0-100 -> 0-1
df_norm[col] = df_norm[col] / 100.0
elif col == 'williams_r':
# Williams %R: -100 to 0, normalize to 0-1
# Williams %R: -100 to 0 -> 0-1
df_norm[col] = (df_norm[col] + 100) / 100.0
elif col in ['macd', 'macd_signal', 'macd_histogram']:
# MACD: normalize by ATR or close price
if 'atr' in df_norm.columns and df_norm['atr'].iloc[-1] > 0:
df_norm[col] = df_norm[col] / df_norm['atr'].iloc[-1]
# MACD: normalize by unified price range
if symbol and symbol in self.pivot_bounds:
df_norm[col] = df_norm[col] / price_range
elif 'close' in df_norm.columns and df_norm['close'].iloc[-1] > 0:
df_norm[col] = df_norm[col] / df_norm['close'].iloc[-1]
elif col in ['bb_width', 'bb_percent', 'price_position', 'trend_strength',
'momentum_composite', 'volatility_regime', 'pivot_price_position',
'pivot_support_distance', 'pivot_resistance_distance']:
# Already normalized indicators: ensure 0-1 range
# Already normalized: ensure 0-1 range
df_norm[col] = np.clip(df_norm[col], 0, 1)
elif col in ['atr', 'true_range']:
# Volatility indicators: normalize by close price or pivot range
# Volatility: normalize by unified price range
if symbol and symbol in self.pivot_bounds:
bounds = self.pivot_bounds[symbol]
df_norm[col] = df_norm[col] / bounds.get_price_range()
df_norm[col] = df_norm[col] / price_range
elif 'close' in df_norm.columns and df_norm['close'].iloc[-1] > 0:
df_norm[col] = df_norm[col] / df_norm['close'].iloc[-1]
@ -3210,12 +3209,19 @@ class DataProvider:
else:
df_norm[col] = 0
# Replace inf/-inf with 0
# Clean up any invalid values
df_norm = df_norm.replace([np.inf, -np.inf], 0)
# Fill any remaining NaN values
df_norm = df_norm.fillna(0)
# Ensure all values are in reasonable range for neural networks
df_norm = np.clip(df_norm, -10, 10)
return df_norm
except Exception as e:
logger.error(f"Error in unified feature normalization: {e}")
return None
return df_norm
except Exception as e: