fixes around pivot points and BOM matrix

This commit is contained in:
Dobromir Popov
2025-06-24 21:09:35 +03:00
parent 6a4a73ff0b
commit 8685319989
7 changed files with 136 additions and 23 deletions

View File

@ -443,11 +443,33 @@ class EnhancedCNNModel(nn.Module):
# Forward pass
outputs = self.forward(x)
# Extract results
# Extract results with proper shape handling
probs = outputs['probabilities'].cpu().numpy()[0]
confidence = outputs['confidence'].cpu().numpy()[0]
confidence_tensor = outputs['confidence'].cpu().numpy()
regime = outputs['regime'].cpu().numpy()[0]
volatility = outputs['volatility'].cpu().numpy()[0]
volatility = outputs['volatility'].cpu().numpy()
# Handle confidence shape properly
if isinstance(confidence_tensor, np.ndarray):
if confidence_tensor.ndim == 0:
confidence = float(confidence_tensor.item())
elif confidence_tensor.size == 1:
confidence = float(confidence_tensor.flatten()[0])
else:
confidence = float(confidence_tensor[0] if len(confidence_tensor) > 0 else 0.7)
else:
confidence = float(confidence_tensor)
# Handle volatility shape properly
if isinstance(volatility, np.ndarray):
if volatility.ndim == 0:
volatility = float(volatility.item())
elif volatility.size == 1:
volatility = float(volatility.flatten()[0])
else:
volatility = float(volatility[0] if len(volatility) > 0 else 0.0)
else:
volatility = float(volatility)
# Determine action (0=BUY, 1=SELL for 2-action system)
action = int(np.argmax(probs))

View File

@ -396,11 +396,33 @@ class EnhancedCNNModel(nn.Module):
# Forward pass
outputs = self.forward(x)
# Extract results
# Extract results with proper shape handling
probs = outputs['probabilities'].cpu().numpy()[0]
confidence = outputs['confidence'].cpu().numpy()[0]
confidence_tensor = outputs['confidence'].cpu().numpy()
regime = outputs['regime'].cpu().numpy()[0]
volatility = outputs['volatility'].cpu().numpy()[0]
volatility_tensor = outputs['volatility'].cpu().numpy()
# Handle confidence shape properly to avoid scalar conversion errors
if isinstance(confidence_tensor, np.ndarray):
if confidence_tensor.ndim == 0:
confidence = float(confidence_tensor.item())
elif confidence_tensor.size == 1:
confidence = float(confidence_tensor.flatten()[0])
else:
confidence = float(confidence_tensor[0] if len(confidence_tensor) > 0 else 0.7)
else:
confidence = float(confidence_tensor)
# Handle volatility shape properly
if isinstance(volatility_tensor, np.ndarray):
if volatility_tensor.ndim == 0:
volatility = float(volatility_tensor.item())
elif volatility_tensor.size == 1:
volatility = float(volatility_tensor.flatten()[0])
else:
volatility = float(volatility_tensor[0] if len(volatility_tensor) > 0 else 0.0)
else:
volatility = float(volatility_tensor)
# Determine action (0=BUY, 1=SELL for 2-action system)
action = int(np.argmax(probs))

View File

@ -1507,8 +1507,15 @@ class DataProvider:
timeframe_secs = self.timeframe_seconds.get(timeframe, 3600)
current_time = tick['timestamp']
# Calculate candle start time
candle_start = current_time.floor(f'{timeframe_secs}s')
# Calculate candle start time using proper datetime truncation
if isinstance(current_time, datetime):
timestamp_seconds = current_time.timestamp()
else:
timestamp_seconds = current_time.timestamp() if hasattr(current_time, 'timestamp') else current_time
# Truncate to timeframe boundary
candle_start_seconds = int(timestamp_seconds // timeframe_secs) * timeframe_secs
candle_start = datetime.fromtimestamp(candle_start_seconds)
# Get current candle queue
candle_queue = self.real_time_data[symbol][timeframe]

View File

@ -1389,19 +1389,58 @@ class EnhancedTradingOrchestrator(TradingOrchestrator):
# Get model prediction
prediction_result = model.predict(feature_matrix)
# Extract predictions (action probabilities)
# Extract predictions (action probabilities) - ensure proper array handling
if isinstance(prediction_result, dict):
# Get probabilities as flat array
predictions = prediction_result.get('probabilities', [0.33, 0.33, 0.34])
confidence = prediction_result.get('confidence', 0.7)
# Ensure predictions is a flat list first
# Convert predictions to numpy array first
if isinstance(predictions, np.ndarray):
predictions = predictions.flatten().tolist()
elif not isinstance(predictions, list):
predictions = [float(predictions)]
# Add confidence as a single float
predictions.append(float(confidence))
# Convert to flat numpy array
predictions = np.array(predictions, dtype=np.float32)
predictions_array = predictions.flatten()
elif isinstance(predictions, (list, tuple)):
predictions_array = np.array(predictions, dtype=np.float32).flatten()
else:
predictions_array = np.array([float(predictions)], dtype=np.float32)
# Create final predictions array with confidence
# Ensure confidence is a scalar value - handle all array shapes safely
if isinstance(confidence, np.ndarray):
if confidence.ndim == 0:
# 0-dimensional array (scalar)
confidence_scalar = float(confidence.item())
elif confidence.size == 1:
# 1-element array
confidence_scalar = float(confidence.item())
else:
# Multi-element array - take first element or mean
confidence_scalar = float(confidence.flat[0]) # Use flat[0] to safely get first element
else:
confidence_scalar = float(confidence)
# Combine predictions and confidence as separate elements
predictions = np.concatenate([
predictions_array,
np.array([confidence_scalar], dtype=np.float32)
])
elif isinstance(prediction_result, tuple) and len(prediction_result) == 2:
# Handle (pred_class, pred_proba) tuple from CNN models
pred_class, pred_proba = prediction_result
# Flatten and process the probability array
if isinstance(pred_proba, np.ndarray):
if pred_proba.ndim > 1:
# Handle 2D arrays like [[0.1, 0.2, 0.7]]
pred_proba_flat = pred_proba.flatten()
else:
# Already 1D
pred_proba_flat = pred_proba
# Use the probability values as the predictions array
predictions = pred_proba_flat.astype(np.float32)
else:
# Fallback: use class prediction only
predictions = np.array([float(pred_class)], dtype=np.float32)
else:
# Handle direct prediction result
if isinstance(prediction_result, np.ndarray):

View File

@ -25,9 +25,11 @@ import logging
import time
try:
import websockets
from websockets.client import connect as websockets_connect
except ImportError:
# Fallback for environments where websockets is not available
websockets = None
websockets_connect = None
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
@ -465,9 +467,9 @@ class MultiExchangeCOBProvider:
ws_url = f"{config.websocket_url}{config.symbols_mapping[symbol].lower()}@depth20@100ms"
logger.info(f"Connecting to Binance WebSocket: {ws_url}")
if websockets is None:
if websockets is None or websockets_connect is None:
raise ImportError("websockets module not available")
async with websockets.connect(ws_url) as websocket:
async with websockets_connect(ws_url) as websocket:
self.exchange_order_books[symbol]['binance']['connected'] = True
logger.info(f"Connected to Binance order book stream for {symbol}")
@ -696,9 +698,9 @@ class MultiExchangeCOBProvider:
ws_url = f"{config.websocket_url}{config.symbols_mapping[symbol].lower()}@trade"
logger.info(f"Connecting to Binance trade stream: {ws_url}")
if websockets is None:
if websockets is None or websockets_connect is None:
raise ImportError("websockets module not available")
async with websockets.connect(ws_url) as websocket:
async with websockets_connect(ws_url) as websocket:
logger.info(f"Connected to Binance trade stream for {symbol}")
async for message in websocket:

13
main.py
View File

@ -142,6 +142,19 @@ def start_web_ui(port=8051):
config = get_config()
data_provider = DataProvider()
# Start real-time streaming for BOM caching (non-blocking)
try:
import threading
def start_streaming():
import asyncio
asyncio.run(data_provider.start_real_time_streaming())
streaming_thread = threading.Thread(target=start_streaming, daemon=True)
streaming_thread.start()
logger.info("[SUCCESS] Real-time streaming thread started for dashboard")
except Exception as e:
logger.warning(f"[WARNING] Dashboard streaming setup failed: {e}")
# Load model registry for enhanced features
try:
from models import get_model_registry

View File

@ -125,8 +125,16 @@ except ImportError:
outputs = self.model(X)
probs = F.softmax(outputs, dim=1)
pred_class = torch.argmax(probs, dim=1).numpy()
pred_proba = probs.numpy()
# Ensure proper tensor conversion to avoid scalar conversion errors
pred_class = torch.argmax(probs, dim=1).detach().cpu().numpy()
pred_proba = probs.detach().cpu().numpy()
# Handle single batch case - ensure scalars are properly extracted
if pred_class.ndim > 0 and pred_class.size == 1:
pred_class = pred_class.item() # Convert to Python scalar
if pred_proba.ndim > 1 and pred_proba.shape[0] == 1:
pred_proba = pred_proba[0] # Remove batch dimension
logger.debug(f"Fallback CNN prediction: class={pred_class}, proba_shape={pred_proba.shape}")
return pred_class, pred_proba