try to fix input dimentions
This commit is contained in:
@ -99,6 +99,10 @@ class TradingOrchestrator:
|
||||
self.model_registry = model_registry or get_model_registry()
|
||||
self.enhanced_rl_training = enhanced_rl_training
|
||||
|
||||
# Determine the device to use (GPU if available, else CPU)
|
||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
logger.info(f"Using device: {self.device}")
|
||||
|
||||
# Configuration - AGGRESSIVE for more training data
|
||||
self.confidence_threshold = self.config.orchestrator.get('confidence_threshold', 0.15) # Lowered from 0.20
|
||||
self.confidence_threshold_close = self.config.orchestrator.get('confidence_threshold_close', 0.08) # Lowered from 0.10
|
||||
@ -226,6 +230,7 @@ class TradingOrchestrator:
|
||||
state_size = self.config.rl.get('state_size', 13800) # Enhanced with COB features
|
||||
action_size = self.config.rl.get('action_space', 3)
|
||||
self.rl_agent = DQNAgent(state_shape=state_size, n_actions=action_size)
|
||||
self.rl_agent.to(self.device) # Move DQN agent to the determined device
|
||||
|
||||
# Load best checkpoint and capture initial state
|
||||
checkpoint_loaded = False
|
||||
@ -268,6 +273,7 @@ class TradingOrchestrator:
|
||||
cnn_input_shape = self.config.cnn.get('input_shape', 100)
|
||||
cnn_n_actions = self.config.cnn.get('n_actions', 3)
|
||||
self.cnn_model = EnhancedCNN(input_shape=cnn_input_shape, n_actions=cnn_n_actions)
|
||||
self.cnn_model.to(self.device) # Move CNN model to the determined device
|
||||
self.cnn_optimizer = optim.Adam(self.cnn_model.parameters(), lr=0.001) # Initialize optimizer for CNN
|
||||
|
||||
# Load best checkpoint and capture initial state
|
||||
@ -300,6 +306,7 @@ class TradingOrchestrator:
|
||||
try:
|
||||
from NN.models.cnn_model import CNNModel
|
||||
self.cnn_model = CNNModel()
|
||||
self.cnn_model.to(self.device) # Move basic CNN model to the determined device
|
||||
self.cnn_optimizer = optim.Adam(self.cnn_model.parameters(), lr=0.001) # Initialize optimizer for basic CNN
|
||||
|
||||
# Load checkpoint for basic CNN as well
|
||||
@ -355,6 +362,9 @@ class TradingOrchestrator:
|
||||
try:
|
||||
from NN.models.cob_rl_model import COBRLModelInterface
|
||||
self.cob_rl_agent = COBRLModelInterface()
|
||||
# Move COB RL agent to the determined device if it supports it
|
||||
if hasattr(self.cob_rl_agent, 'to'):
|
||||
self.cob_rl_agent.to(self.device)
|
||||
|
||||
# Load best checkpoint and capture initial state
|
||||
checkpoint_loaded = False
|
||||
@ -458,7 +468,19 @@ class TradingOrchestrator:
|
||||
def predict(self, data):
|
||||
try:
|
||||
if hasattr(self.model, 'predict'):
|
||||
return self.model.predict(data)
|
||||
# Ensure data has correct dimensions for COB RL model (2000 features)
|
||||
if isinstance(data, np.ndarray):
|
||||
features = data.flatten()
|
||||
# COB RL expects 2000 features
|
||||
if len(features) < 2000:
|
||||
padded_features = np.zeros(2000)
|
||||
padded_features[:len(features)] = features
|
||||
features = padded_features
|
||||
elif len(features) > 2000:
|
||||
features = features[:2000]
|
||||
return self.model.predict(features)
|
||||
else:
|
||||
return self.model.predict(data)
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error in COB RL prediction: {e}")
|
||||
@ -929,7 +951,7 @@ class TradingOrchestrator:
|
||||
logger.error(f"Error in decision callback: {e}")
|
||||
|
||||
# Clean up memory periodically
|
||||
if len(self.recent_decisions[symbol]) % 50 == 0:
|
||||
if len(self.recent_decisions[symbol]) % 200 == 0: # Reduced from 50 to 200
|
||||
self.model_registry.cleanup_all_models()
|
||||
|
||||
return decision
|
||||
@ -970,119 +992,93 @@ class TradingOrchestrator:
|
||||
async def _get_cnn_predictions(self, model: CNNModelInterface, symbol: str) -> List[Prediction]:
|
||||
"""Get predictions from CNN model for all timeframes with enhanced COB features"""
|
||||
predictions = []
|
||||
|
||||
try:
|
||||
# Safely get timeframes from config
|
||||
timeframes = getattr(self.config, 'timeframes', None)
|
||||
if timeframes is None:
|
||||
timeframes = ['1m', '5m', '15m', '1h'] # Default timeframes
|
||||
|
||||
timeframes = getattr(self.config, 'timeframes', ['1m','5m','15m','1h'])
|
||||
for timeframe in timeframes:
|
||||
# Get standard feature matrix for this timeframe
|
||||
# 1) build or fetch your feature matrix (and optionally augment with COB)…
|
||||
feature_matrix = self.data_provider.get_feature_matrix(
|
||||
symbol=symbol,
|
||||
timeframes=[timeframe],
|
||||
window_size=getattr(model, 'window_size', 20)
|
||||
)
|
||||
|
||||
# Enhance with COB feature matrix if available
|
||||
enhanced_features = feature_matrix
|
||||
if feature_matrix is not None and self.cob_integration:
|
||||
try:
|
||||
# Get COB feature matrix (5-minute history)
|
||||
cob_feature_matrix = self.get_cob_feature_matrix(symbol, sequence_length=60)
|
||||
|
||||
if cob_feature_matrix is not None:
|
||||
# Take the latest COB features to augment the standard features
|
||||
latest_cob_features = cob_feature_matrix[-1:, :] # Shape: (1, 400)
|
||||
|
||||
# Resize to match the feature matrix timeframe dimension
|
||||
timeframe_count = feature_matrix.shape[0]
|
||||
cob_features_expanded = np.repeat(latest_cob_features, timeframe_count, axis=0)
|
||||
|
||||
# Concatenate COB features with standard features
|
||||
# Standard features shape: (timeframes, window_size, features)
|
||||
# COB features shape: (timeframes, 400)
|
||||
# We'll add COB as additional features to each timeframe
|
||||
window_size = feature_matrix.shape[1]
|
||||
cob_features_reshaped = cob_features_expanded.reshape(timeframe_count, 1, 400)
|
||||
cob_features_tiled = np.tile(cob_features_reshaped, (1, window_size, 1))
|
||||
|
||||
# Concatenate along feature dimension
|
||||
enhanced_features = np.concatenate([feature_matrix, cob_features_tiled], axis=2)
|
||||
|
||||
logger.debug(f"Enhanced CNN features with COB data for {symbol}: "
|
||||
f"{feature_matrix.shape} + COB -> {enhanced_features.shape}")
|
||||
|
||||
except Exception as cob_error:
|
||||
logger.debug(f"Could not enhance CNN features with COB data: {cob_error}")
|
||||
enhanced_features = feature_matrix
|
||||
|
||||
if enhanced_features is not None:
|
||||
# Get CNN prediction - use the actual underlying model
|
||||
try:
|
||||
if hasattr(model.model, 'act'):
|
||||
# Use the CNN's act method
|
||||
action_result = model.model.act(enhanced_features, explore=False)
|
||||
if isinstance(action_result, tuple):
|
||||
action_idx, confidence = action_result
|
||||
else:
|
||||
action_idx = action_result
|
||||
confidence = 0.7 # Default confidence
|
||||
|
||||
# Convert to action probabilities
|
||||
action_probs = [0.1, 0.1, 0.8] # Default distribution
|
||||
action_probs[action_idx] = confidence
|
||||
if feature_matrix is None:
|
||||
continue
|
||||
|
||||
# …apply COB‐augmentation here (omitted for brevity)—
|
||||
enhanced_features = self._augment_with_cob(feature_matrix, symbol)
|
||||
|
||||
# 2) Initialize these before we call the model
|
||||
action_probs, confidence = None, None
|
||||
|
||||
# 3) Try the actual model inference
|
||||
try:
|
||||
# if your model has an .act() that returns (probs, conf)
|
||||
if hasattr(model.model, 'act'):
|
||||
# Flatten / reshape enhanced_features as needed…
|
||||
x = self._prepare_cnn_input(enhanced_features)
|
||||
action_probs, confidence = model.model.act(x, explore=False)
|
||||
else:
|
||||
# fallback to generic predict
|
||||
result = model.predict(enhanced_features)
|
||||
if isinstance(result, tuple) and len(result)==2:
|
||||
action_probs, confidence = result
|
||||
else:
|
||||
# Fallback to generic predict method
|
||||
prediction_result = model.predict(enhanced_features)
|
||||
if prediction_result is not None:
|
||||
if isinstance(prediction_result, tuple) and len(prediction_result) == 2:
|
||||
action_probs, confidence = prediction_result
|
||||
else:
|
||||
action_probs = prediction_result
|
||||
confidence = 0.7
|
||||
else:
|
||||
action_probs, confidence = None, None
|
||||
except Exception as e:
|
||||
logger.warning(f"CNN prediction failed: {e}")
|
||||
action_probs, confidence = None, None
|
||||
|
||||
if action_probs is not None:
|
||||
# Convert to prediction object
|
||||
action_names = ['SELL', 'HOLD', 'BUY']
|
||||
best_action_idx = np.argmax(action_probs)
|
||||
best_action = action_names[best_action_idx]
|
||||
|
||||
prediction = Prediction(
|
||||
action=best_action,
|
||||
confidence=float(confidence) if confidence is not None else float(action_probs[best_action_idx]),
|
||||
probabilities={name: float(prob) for name, prob in zip(action_names, action_probs)},
|
||||
timeframe=timeframe,
|
||||
timestamp=datetime.now(),
|
||||
model_name=model.name,
|
||||
metadata={
|
||||
'timeframe_specific': True,
|
||||
'cob_enhanced': enhanced_features is not feature_matrix,
|
||||
'feature_shape': str(enhanced_features.shape)
|
||||
}
|
||||
)
|
||||
|
||||
predictions.append(prediction)
|
||||
|
||||
# Capture CNN prediction for dashboard visualization
|
||||
current_price = self._get_current_price(symbol)
|
||||
if current_price:
|
||||
direction = best_action_idx # 0=SELL, 1=HOLD, 2=BUY
|
||||
pred_confidence = float(confidence) if confidence is not None else float(action_probs[best_action_idx])
|
||||
predicted_price = current_price * (1 + (pred_confidence * 0.01 if best_action == 'BUY' else -pred_confidence * 0.01 if best_action == 'SELL' else 0))
|
||||
self.capture_cnn_prediction(symbol, int(direction), pred_confidence, current_price, predicted_price)
|
||||
|
||||
action_probs = result
|
||||
confidence = 0.7
|
||||
except Exception as e:
|
||||
logger.warning(f"CNN inference failed for {symbol}@{timeframe}: {e}")
|
||||
continue # skip this timeframe entirely
|
||||
|
||||
# 4) If we still don’t have valid probs, skip
|
||||
if action_probs is None:
|
||||
continue
|
||||
|
||||
# 5) Build your Prediction
|
||||
action_names = ['SELL','HOLD','BUY']
|
||||
best_idx = int(np.argmax(action_probs))
|
||||
best_action = action_names[best_idx]
|
||||
pred = Prediction(
|
||||
action=best_action,
|
||||
confidence=float(confidence),
|
||||
probabilities={n: float(p) for n,p in zip(action_names, action_probs)},
|
||||
timeframe=timeframe,
|
||||
timestamp=datetime.now(),
|
||||
model_name=model.name,
|
||||
metadata={
|
||||
'feature_shape': str(enhanced_features.shape),
|
||||
'cob_enhanced': enhanced_features is not feature_matrix
|
||||
}
|
||||
)
|
||||
predictions.append(pred)
|
||||
|
||||
# …and capture for the dashboard if you like…
|
||||
current_price = self._get_current_price(symbol)
|
||||
if current_price is not None:
|
||||
predicted_price = current_price * (1 + (0.01 * (confidence if best_action=='BUY' else -confidence if best_action=='SELL' else 0)))
|
||||
self.capture_cnn_prediction(
|
||||
symbol,
|
||||
direction=best_idx,
|
||||
confidence=confidence,
|
||||
current_price=current_price,
|
||||
predicted_price=predicted_price
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting CNN predictions: {e}")
|
||||
|
||||
logger.error(f"Orch: Error getting CNN predictions: {e}")
|
||||
return predictions
|
||||
|
||||
|
||||
# helper stubs for clarity
|
||||
def _augment_with_cob(self, feature_matrix, symbol):
|
||||
# your existing cob‐augmentation logic…
|
||||
return feature_matrix
|
||||
|
||||
def _prepare_cnn_input(self, features):
|
||||
arr = features.flatten()
|
||||
# pad/truncate to 300, reshape to (1,300)
|
||||
if len(arr) < 300:
|
||||
arr = np.pad(arr, (0,300-len(arr)), 'constant')
|
||||
else:
|
||||
arr = arr[:300]
|
||||
return arr.reshape(1,-1)
|
||||
async def _get_rl_prediction(self, model: RLAgentInterface, symbol: str) -> Optional[Prediction]:
|
||||
"""Get prediction from RL agent"""
|
||||
try:
|
||||
@ -1230,7 +1226,20 @@ class TradingOrchestrator:
|
||||
# This would come from a portfolio manager in a real implementation
|
||||
additional_state = np.array([0.0, 1.0, 0.0]) # [position, balance, unrealized_pnl]
|
||||
|
||||
return np.concatenate([state, additional_state])
|
||||
combined_state = np.concatenate([state, additional_state])
|
||||
|
||||
# Ensure DQN gets exactly 403 features (expected by the model)
|
||||
target_size = 403
|
||||
if len(combined_state) < target_size:
|
||||
# Pad with zeros
|
||||
padded_state = np.zeros(target_size)
|
||||
padded_state[:len(combined_state)] = combined_state
|
||||
combined_state = padded_state
|
||||
elif len(combined_state) > target_size:
|
||||
# Truncate to target size
|
||||
combined_state = combined_state[:target_size]
|
||||
|
||||
return combined_state
|
||||
|
||||
return None
|
||||
|
||||
@ -1547,7 +1556,9 @@ class TradingOrchestrator:
|
||||
return torch.softmax(self.fc3(x), dim=1)
|
||||
|
||||
self.decision_fusion_network = DecisionFusionNet()
|
||||
logger.info("Decision fusion network initialized")
|
||||
# Move decision fusion network to the device
|
||||
self.decision_fusion_network.to(self.device)
|
||||
logger.info(f"Decision fusion network initialized on device: {self.device}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Decision fusion initialization failed: {e}")
|
||||
|
Reference in New Issue
Block a user