revision, pending fixes

This commit is contained in:
Dobromir Popov
2025-09-26 10:49:45 +03:00
parent 2b09e7fb5a
commit 468a2c2a66
8 changed files with 61 additions and 53 deletions

View File

@@ -1110,6 +1110,7 @@ class DataProvider:
"""Add pivot-derived context features for normalization"""
try:
if symbol not in self.pivot_bounds:
logger.warning("Pivot bounds missing for %s; access will be blocked until real data is ready (guideline: no stubs)", symbol)
return df
bounds = self.pivot_bounds[symbol]
@@ -1820,30 +1821,7 @@ class DataProvider:
df_norm = df.copy()
# Get symbol-specific price ranges for consistent normalization
symbol_price_ranges = {
'ETH/USDT': {'min': 1000, 'max': 5000}, # ETH price range
'BTC/USDT': {'min': 90000, 'max': 120000} # BTC price range
}
if symbol in symbol_price_ranges:
price_range = symbol_price_ranges[symbol]
range_size = price_range['max'] - price_range['min']
# Normalize price columns to [0, 1] range specific to symbol
price_cols = ['open', 'high', 'low', 'close']
for col in price_cols:
if col in df_norm.columns:
df_norm[col] = (df_norm[col] - price_range['min']) / range_size
df_norm[col] = np.clip(df_norm[col], 0, 1) # Ensure [0,1] range
# Normalize volume to [0, 1] using log scale
if 'volume' in df_norm.columns:
df_norm['volume'] = np.log1p(df_norm['volume'])
vol_max = df_norm['volume'].max()
if vol_max > 0:
df_norm['volume'] = df_norm['volume'] / vol_max
logger.debug(f"Applied symbol-grouped normalization for {symbol}")
# TODO(Guideline: no synthetic ranges) Replace placeholder price ranges with real statistics or remove this fallback.
# Fill any NaN values
df_norm = df_norm.fillna(0)

View File

@@ -1843,7 +1843,7 @@ class TradingOrchestrator:
dashboard=None
)
logger.info("Enhanced training system initialized successfully")
logger.info("Enhanced training system initialized successfully")
# Auto-start training by default
logger.info("🚀 Auto-starting enhanced real-time training...")
@@ -2204,21 +2204,12 @@ class TradingOrchestrator:
return float(data_stream.current_price)
except Exception as e:
logger.debug(f"Could not get price from universal adapter: {e}")
# Fallback to default prices
default_prices = {
'ETH/USDT': 2500.0,
'BTC/USDT': 108000.0
}
return default_prices.get(symbol, 1000.0)
# TODO(Guideline: no synthetic fallback) Provide a real-time or cached market price here instead of hardcoding.
raise RuntimeError("Current price unavailable; per guidelines do not substitute synthetic values.")
except Exception as e:
logger.error(f"Error getting current price for {symbol}: {e}")
# Return default price based on symbol
if 'ETH' in symbol:
return 2500.0
elif 'BTC' in symbol:
return 108000.0
else:
return 1000.0
raise RuntimeError("Current price unavailable; per guidelines do not substitute synthetic values.")
# SINGLE-USE FUNCTION - Called only once in codebase
def _generate_fallback_prediction(self, symbol: str) -> Dict[str, Any]:
@@ -2443,7 +2434,7 @@ class TradingOrchestrator:
if df is not None and not df.empty:
loaded_data[f"{symbol}_{timeframe}"] = df
total_candles += len(df)
logger.info(f"Loaded {len(df)} {timeframe} candles for {symbol}")
logger.info(f"Loaded {len(df)} {timeframe} candles for {symbol}")
# Store in data provider's historical cache for quick access
cache_key = f"{symbol}_{timeframe}_300"
@@ -2500,7 +2491,7 @@ class TradingOrchestrator:
logger.info("Initializing Decision Fusion with multi-symbol features...")
self._initialize_decision_with_provider_data(symbol_features)
logger.info("All models initialized with data provider's normalized historical data")
logger.info("All models initialized with data provider's normalized historical data")
except Exception as e:
logger.error(f"Error initializing models with historical data: {e}")
@@ -2720,7 +2711,7 @@ class TradingOrchestrator:
logger.error(f"Error in chained inference step {step}: {e}")
break
logger.info(f"Chained inference completed: {len(predictions)} predictions generated")
logger.info(f"Chained inference completed: {len(predictions)} predictions generated")
return predictions
except Exception as e:

View File

@@ -850,6 +850,10 @@ class TradingExecutor:
"""Get trade history"""
return self.trade_history.copy()
def get_balance(self) -> Dict[str, float]:
"""TODO(Guideline: expose real account state) Return actual account balances instead of raising."""
raise NotImplementedError("Implement TradingExecutor.get_balance to supply real balance data; stubs are forbidden.")
def export_trades_to_csv(self, filename: Optional[str] = None) -> str:
"""Export trade history to CSV file with comprehensive analysis"""
import csv

View File

@@ -1,10 +1,12 @@
# Enhanced RL Training with Real Data Integration
## Implementation Complete ✅
## Pending Work (Guideline compliance required)
I have successfully implemented and integrated the comprehensive RL training system that replaces the existing mock code with real-life data processing.
Transparent note: real-data integration remains TODO; the current code still
contains mock fallbacks and placeholders. The plan below is the desired end
state once the guidelines are satisfied.
## Major Transformation: Mock → Real Data
## Outstanding Gap: Mock → Real Data (still required)
### Before (Mock Implementation)
```python

View File

@@ -190,7 +190,7 @@ def start_web_ui(port=8051):
logger.info("Clean Trading Dashboard created successfully")
logger.info("Features: Live trading, COB visualization, ML pipeline monitoring, Position management")
logger.info("Unified orchestrator with decision-making model and checkpoint management")
logger.info("Unified orchestrator with decision-making model and checkpoint management")
# Run the dashboard server (COB integration will start automatically)
dashboard.run_server(host='127.0.0.1', port=port, debug=False)

View File

@@ -0,0 +1,31 @@
# Pending Guideline Fixes (September 2025)
## Overview
The following gaps violate our "no stubs, no synthetic data" policy and must
be resolved before the dashboard can operate in production. Inline TODOs with
matching wording have been added in the codebase.
## Items
1. **Prediction aggregation** `TradingOrchestrator._get_all_predictions` still
raises until the real ModelManager integration is written. The decision loop
intentionally skips synthetic fallback signals.
2. **Device handling for CNN checkpoints** the orchestrator references
`self.device` while loading weights; define and manage the device before the
load occurs.
3. **Trading balance access** `TradingExecutor.get_balance` is currently
`NotImplementedError`. Provide a real balance snapshot (simulation and live).
4. **Fallback pricing** `_get_current_price` now raises when no market price
is available. Implement a real degraded-mode data path instead of hardcoded
ETH/BTC prices.
5. **Pivot context prerequisites** ensure pivot bounds exist (or are freshly
calculated) before requesting normalized pivot features.
6. **Decision-fusion training features** the dashboard still relies on random
vectors for decision fusion. Replace them with real feature tensors derived
from market data.
## Next Steps
- Prioritise restoring real prediction outputs so the orchestrator can resume
trading decisions without synthetic stand-ins.
- Sequence the remaining work so that downstream components (dashboard panels,
executor feedback) receive genuine data once more.

View File

@@ -4852,7 +4852,7 @@ class CleanTradingDashboard:
avg_reward = total_rewards / training_sessions if training_sessions > 0 else 0
avg_loss = total_losses / training_sessions if training_sessions > 0 else 0
logger.info("📊 COMPREHENSIVE TRAINING REPORT:")
logger.info("COMPREHENSIVE TRAINING REPORT:")
logger.info(f" Total Signals: {total_signals}")
logger.info(f" Success Rate: {success_rate:.1f}%")
logger.info(f" Training Sessions: {training_sessions}")
@@ -4869,20 +4869,20 @@ class CleanTradingDashboard:
# Performance analysis
if avg_loss < 0.01:
logger.info(" 🎉 EXCELLENT: Very low loss indicates strong learning")
logger.info(" EXCELLENT: Very low loss indicates strong learning")
elif avg_loss < 0.1:
logger.info(" GOOD: Moderate loss with consistent improvement")
logger.info(" GOOD: Moderate loss with consistent improvement")
elif avg_loss < 1.0:
logger.info(" ⚠️ FAIR: Loss reduction needed for better performance")
logger.info(" FAIR: Loss reduction needed for better performance")
else:
logger.info(" POOR: High loss indicates training issues")
logger.info(" POOR: High loss indicates training issues")
if abs(avg_reward) > 10:
logger.info(" 💰 STRONG REWARDS: Models responding well to feedback")
logger.info(" STRONG REWARDS: Models responding well to feedback")
elif abs(avg_reward) > 1:
logger.info(" 📈 MODERATE REWARDS: Learning progressing steadily")
logger.info(" MODERATE REWARDS: Learning progressing steadily")
else:
logger.info(" 🔄 LOW REWARDS: May need reward scaling adjustment")
logger.info(" LOW REWARDS: May need reward scaling adjustment")
except Exception as e:
logger.warning(f"Error generating training performance report: {e}")
@@ -7834,6 +7834,8 @@ class CleanTradingDashboard:
price_change = (next_price - current_price) / current_price if current_price > 0 else 0
cumulative_imbalance = current_data.get('cumulative_imbalance', {})
# TODO(Guideline: no synthetic data) Replace the random baseline with real orchestrator features.
# TODO(Guideline: no synthetic data) Replace the random baseline with real orchestrator features.
features = np.random.randn(100)
features[0] = current_price / 10000
features[1] = price_change
@@ -7964,7 +7966,7 @@ class CleanTradingDashboard:
price_change = (next_price - current_price) / current_price if current_price > 0 else 0
cumulative_imbalance = current_data.get('cumulative_imbalance', {})
# Create decision fusion features
# TODO(Guideline: no synthetic data) Replace random feature vectors with real market-derived inputs.
features = np.random.randn(32) # Decision fusion expects 32 features
features[0] = current_price / 10000
features[1] = price_change