device fix , TZ fix

This commit is contained in:
Dobromir Popov
2025-07-27 22:13:28 +03:00
parent 9e1684f9f8
commit 368c49df50
6 changed files with 194 additions and 163 deletions

View File

@ -1056,7 +1056,55 @@ class DQNAgent:
if isinstance(state, torch.Tensor):
state = state.detach().cpu().numpy()
elif not isinstance(state, np.ndarray):
state = np.array(state, dtype=np.float32)
# Check if state is a dict or complex object
if isinstance(state, dict):
logger.error(f"State is a dict: {state}")
# Extract numerical values from dict if possible
if 'features' in state:
state = state['features']
elif 'state' in state:
state = state['state']
else:
# Try to extract all numerical values
numerical_values = []
for key, value in state.items():
if isinstance(value, (int, float)):
numerical_values.append(float(value))
elif isinstance(value, (list, np.ndarray)):
try:
# Handle nested structures safely
flattened = np.array(value).flatten()
for x in flattened:
if isinstance(x, (int, float)):
numerical_values.append(float(x))
elif hasattr(x, 'item'): # numpy scalar
numerical_values.append(float(x.item()))
except (ValueError, TypeError):
continue
elif isinstance(value, dict):
# Recursively extract from nested dicts
try:
nested_values = self._extract_numeric_from_dict(value)
numerical_values.extend(nested_values)
except Exception:
continue
if numerical_values:
state = np.array(numerical_values, dtype=np.float32)
else:
logger.error("No numerical values found in state dict, using default state")
expected_size = getattr(self, 'state_size', 403)
if isinstance(expected_size, tuple):
expected_size = np.prod(expected_size)
return np.zeros(int(expected_size), dtype=np.float32)
else:
try:
state = np.array(state, dtype=np.float32)
except (ValueError, TypeError) as e:
logger.error(f"Cannot convert state to numpy array: {type(state)}, {e}")
expected_size = getattr(self, 'state_size', 403)
if isinstance(expected_size, tuple):
expected_size = np.prod(expected_size)
return np.zeros(int(expected_size), dtype=np.float32)
# Flatten if multi-dimensional
if state.ndim > 1:
@ -1762,3 +1810,33 @@ class DQNAgent:
except:
return 0.0
def _extract_numeric_from_dict(self, data_dict):
"""Recursively extract all numeric values from a dictionary"""
numeric_values = []
try:
for key, value in data_dict.items():
if isinstance(value, (int, float)):
numeric_values.append(float(value))
elif isinstance(value, (list, np.ndarray)):
try:
flattened = np.array(value).flatten()
for x in flattened:
if isinstance(x, (int, float)):
numeric_values.append(float(x))
elif hasattr(x, 'item'): # numpy scalar
numeric_values.append(float(x.item()))
except (ValueError, TypeError):
continue
elif isinstance(value, dict):
# Recursively extract from nested dicts
nested_values = self._extract_numeric_from_dict(value)
numeric_values.extend(nested_values)
elif isinstance(value, torch.Tensor):
try:
numeric_values.append(float(value.item()))
except Exception:
continue
except Exception as e:
logger.debug(f"Error extracting numeric values from dict: {e}")
return numeric_values

View File

@ -1478,7 +1478,11 @@ class DataProvider:
# Check for cached data and determine what we need to fetch
cached_data = self._load_monthly_data_from_cache(symbol)
end_time = datetime.utcnow()
import pytz
utc = pytz.UTC
sofia_tz = pytz.timezone('Europe/Sofia')
end_time = datetime.utcnow().replace(tzinfo=utc).astimezone(sofia_tz)
start_time = end_time - timedelta(days=30)
if cached_data is not None and not cached_data.empty:
@ -1496,6 +1500,12 @@ class DataProvider:
# Check if we need to fill gaps
gap_start = cache_end + timedelta(minutes=1)
# Ensure gap_start has same timezone as end_time for comparison
if gap_start.tzinfo is None:
gap_start = sofia_tz.localize(gap_start)
elif gap_start.tzinfo != sofia_tz:
gap_start = gap_start.astimezone(sofia_tz)
if gap_start < end_time:
# Need to fill gap from cache_end to now
logger.info(f"Filling gap from {gap_start} to {end_time}")
@ -1573,8 +1583,10 @@ class DataProvider:
'taker_buy_quote', 'ignore'
])
# Process columns
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
# Process columns with proper timezone handling
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms', utc=True)
# Convert from UTC to Europe/Sofia timezone to match cached data
df['timestamp'] = df['timestamp'].dt.tz_convert('Europe/Sofia')
for col in ['open', 'high', 'low', 'close', 'volume']:
df[col] = df[col].astype(float)
@ -1644,8 +1656,10 @@ class DataProvider:
'taker_buy_quote', 'ignore'
])
# Process columns
batch_df['timestamp'] = pd.to_datetime(batch_df['timestamp'], unit='ms')
# Process columns with proper timezone handling
batch_df['timestamp'] = pd.to_datetime(batch_df['timestamp'], unit='ms', utc=True)
# Convert from UTC to Europe/Sofia timezone to match cached data
batch_df['timestamp'] = batch_df['timestamp'].dt.tz_convert('Europe/Sofia')
for col in ['open', 'high', 'low', 'close', 'volume']:
batch_df[col] = batch_df[col].astype(float)

View File

@ -2377,11 +2377,10 @@ class TradingOrchestrator:
return None
async def _train_cnn_model(self, model, model_name: str, record: Dict, prediction: Dict, reward: float) -> bool:
"""Train CNN model with training samples"""
"""Train CNN model directly (no adapter)"""
try:
# Direct CNN model training (no adapter)
if hasattr(self, 'cnn_model') and self.cnn_model and 'cnn' in model_name.lower():
try:
# Direct CNN model training (no adapter)
if hasattr(self, 'cnn_model') and self.cnn_model and 'cnn' in model_name.lower():
symbol = record.get('symbol', 'ETH/USDT')
actual_action = prediction['action']
@ -2442,46 +2441,35 @@ class TradingOrchestrator:
logger.warning(f"No model input available for CNN training")
return False
except Exception as e:
logger.error(f"Error in direct CNN training: {e}")
return False
# Try direct model training methods
# Try model interface training methods
elif hasattr(model, 'add_training_sample'):
symbol = record.get('symbol', 'ETH/USDT')
actual_action = prediction['action']
model.add_training_sample(symbol, actual_action, reward)
logger.debug(f"Added training sample to {model_name}: action={actual_action}, reward={reward:.3f}")
# Trigger training if batch size is met
if hasattr(model, 'train') and hasattr(model, 'training_data') and hasattr(model, 'batch_size'):
if len(model.training_data) >= model.batch_size:
# If model has train method, trigger training
if hasattr(model, 'train') and callable(getattr(model, 'train')):
try:
training_start_time = time.time()
training_results = model.train(epochs=1)
training_duration_ms = (time.time() - training_start_time) * 1000
if training_results and 'loss' in training_results:
current_loss = training_results['loss']
accuracy = training_results.get('accuracy', 0.0)
# Validate training results
if accuracy >= 0.99:
logger.warning(f"CNN training shows suspiciously high accuracy: {accuracy:.4f} - possible overfitting")
else:
self.update_model_loss(model_name, current_loss)
self.update_model_loss(model_name, current_loss)
self._update_model_training_statistics(model_name, current_loss, training_duration_ms)
logger.debug(f"CNN training completed: loss={current_loss:.4f}, time={training_duration_ms:.1f}ms")
return True
logger.debug(f"Model {model_name} training completed: loss={current_loss:.4f}")
else:
# Still update training statistics even if no loss returned
self._update_model_training_statistics(model_name, training_duration_ms=training_duration_ms)
return True # Sample added successfully
except Exception as e:
logger.error(f"Error training {model_name}: {e}")
# Try basic training method for EnhancedCNN
return True
# Basic acknowledgment for other training methods
elif hasattr(model, 'train'):
logger.debug(f"Using basic train method for {model_name}")
# For now, just acknowledge that training was attempted
logger.debug(f"CNN model {model_name} training acknowledged (basic train method available)")
return True
@ -2622,7 +2610,8 @@ class TradingOrchestrator:
# Convert to tensor and ensure proper device placement
device = next(self.cnn_model.parameters()).device
features_tensor = torch.tensor(features, dtype=torch.float32, device=device)
import torch as torch_module # Explicit import to avoid scoping issues
features_tensor = torch_module.tensor(features, dtype=torch_module.float32, device=device)
# Ensure batch dimension
if features_tensor.dim() == 1:
@ -2632,12 +2621,12 @@ class TradingOrchestrator:
self.cnn_model.eval()
# Get prediction from CNN model
with torch.no_grad():
with torch_module.no_grad():
q_values, extrema_pred, price_pred, features_refined, advanced_pred = self.cnn_model(features_tensor)
# Convert to probabilities using softmax
action_probs = torch.softmax(q_values, dim=1)
action_idx = torch.argmax(action_probs, dim=1).item()
action_probs = torch_module.softmax(q_values, dim=1)
action_idx = torch_module.argmax(action_probs, dim=1).item()
confidence = float(action_probs[0, action_idx].item())
# Map action index to action string
@ -2679,9 +2668,9 @@ class TradingOrchestrator:
import traceback
traceback.print_exc()
# Fallback to direct model inference using BaseDataInput (unified approach)
# Remove this fallback - direct CNN inference should work above
if not predictions:
logger.warning(f"CNN adapter failed for {symbol}, trying direct model inference with BaseDataInput")
logger.debug(f"No CNN predictions generated for {symbol} - this is expected if CNN model is not properly initialized")
try:
# Use the already available base_data (no need to rebuild)
@ -2695,9 +2684,8 @@ class TradingOrchestrator:
# Use the model's act method with unified input
if hasattr(model.model, 'act'):
# Convert to tensor format expected by enhanced_cnn
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
features_tensor = torch.tensor(feature_vector, dtype=torch.float32, device=device)
device = torch_module.device('cuda' if torch_module.cuda.is_available() else 'cpu')
features_tensor = torch_module.tensor(feature_vector, dtype=torch_module.float32, device=device)
# Call the model's act method
action_idx, confidence, action_probs = model.model.act(features_tensor, explore=False)
@ -3263,9 +3251,6 @@ class TradingOrchestrator:
if not self.decision_fusion_enabled:
return
import torch
import torch.nn as nn
# Create decision fusion network
class DecisionFusionNet(nn.Module):
def __init__(self, input_size=32, hidden_size=64):

View File

@ -11,7 +11,7 @@ import os
# Add the project root to the path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from core.enhanced_cnn_adapter import EnhancedCNNAdapter
from NN.models.enhanced_cnn import EnhancedCNN
from core.data_models import BaseDataInput, OHLCVBar
# Configure logging

View File

@ -1,102 +0,0 @@
#!/usr/bin/env python3
"""
Test Timezone Fix
This script tests that historical data timestamps are properly converted to Europe/Sofia timezone.
"""
import asyncio
import pandas as pd
from datetime import datetime
from core.data_provider import DataProvider
async def test_timezone_fix():
"""Test the timezone conversion fix"""
print("=== Testing Timezone Fix ===")
# Initialize data provider
print("1. Initializing data provider...")
data_provider = DataProvider()
# Wait for initialization
await asyncio.sleep(2)
# Test different timeframes
timeframes = ['1m', '1h', '1d']
symbol = 'ETH/USDT'
for timeframe in timeframes:
print(f"\n2. Testing {timeframe} data for {symbol}:")
# Get historical data
df = data_provider.get_historical_data(symbol, timeframe, limit=5)
if df is not None and not df.empty:
print(f" ✅ Got {len(df)} candles")
# Check timezone
if 'timestamp' in df.columns:
first_timestamp = df['timestamp'].iloc[0]
last_timestamp = df['timestamp'].iloc[-1]
print(f" First timestamp: {first_timestamp}")
print(f" Last timestamp: {last_timestamp}")
# Check if timezone is Europe/Sofia
if hasattr(first_timestamp, 'tz') and first_timestamp.tz is not None:
timezone_str = str(first_timestamp.tz)
if 'Europe/Sofia' in timezone_str or 'EET' in timezone_str or 'EEST' in timezone_str:
print(f" ✅ Timezone is correct: {timezone_str}")
else:
print(f" ❌ Timezone is incorrect: {timezone_str}")
else:
print(" ❌ No timezone information found")
# Show time difference from UTC
if hasattr(first_timestamp, 'utcoffset') and first_timestamp.utcoffset() is not None:
offset_hours = first_timestamp.utcoffset().total_seconds() / 3600
print(f" UTC offset: {offset_hours:+.0f} hours")
if offset_hours == 2 or offset_hours == 3: # EET (+2) or EEST (+3)
print(" ✅ UTC offset is correct for Europe/Sofia")
else:
print(f" ❌ UTC offset is incorrect: {offset_hours:+.0f} hours")
# Show sample data
print(" Sample data:")
for i in range(min(3, len(df))):
row = df.iloc[i]
print(f" {row['timestamp']}: O={row['open']:.2f} H={row['high']:.2f} L={row['low']:.2f} C={row['close']:.2f}")
else:
print(" ❌ No timestamp column found")
else:
print(f" ❌ No data available for {timeframe}")
# Test current time comparison
print(f"\n3. Current time comparison:")
current_utc = datetime.utcnow()
current_sofia = datetime.now()
print(f" Current UTC time: {current_utc}")
print(f" Current local time: {current_sofia}")
# Calculate expected offset
import pytz
sofia_tz = pytz.timezone('Europe/Sofia')
current_sofia_tz = datetime.now(sofia_tz)
offset_hours = current_sofia_tz.utcoffset().total_seconds() / 3600
print(f" Europe/Sofia current time: {current_sofia_tz}")
print(f" Current UTC offset: {offset_hours:+.0f} hours")
if offset_hours == 2:
print(" ✅ Currently in EET (Eastern European Time)")
elif offset_hours == 3:
print(" ✅ Currently in EEST (Eastern European Summer Time)")
else:
print(f" ❌ Unexpected offset: {offset_hours:+.0f} hours")
print("\n✅ Timezone fix test completed!")
if __name__ == "__main__":
asyncio.run(test_timezone_fix())

View File

@ -5851,20 +5851,76 @@ class CleanTradingDashboard:
logger.debug(f"Base data input created successfully for {symbol}")
# Make prediction using CNN adapter
model_output = self.cnn_adapter.predict(base_data_input)
# Make prediction using CNN model directly (EnhancedCNN uses act method)
if hasattr(self.cnn_adapter, 'act'):
# Use the act method for EnhancedCNN
features = base_data_input.get_feature_vector()
# Convert to dictionary for dashboard use
prediction = {
'action': model_output.predictions.get('action', 'HOLD'),
'confidence': model_output.confidence,
'buy_probability': model_output.predictions.get('buy_probability', 0.0),
'sell_probability': model_output.predictions.get('sell_probability', 0.0),
'hold_probability': model_output.predictions.get('hold_probability', 0.0),
'timestamp': model_output.timestamp,
'hidden_states': model_output.hidden_states,
'metadata': model_output.metadata
}
# Convert to tensor and ensure proper device placement
import torch
device = next(self.cnn_adapter.parameters()).device
features_tensor = torch.tensor(features, dtype=torch.float32, device=device)
# Ensure batch dimension
if features_tensor.dim() == 1:
features_tensor = features_tensor.unsqueeze(0)
# Set model to evaluation mode
self.cnn_adapter.eval()
# Get prediction from CNN model
with torch.no_grad():
q_values, extrema_pred, price_pred, features_refined, advanced_pred = self.cnn_adapter(features_tensor)
# Convert to probabilities using softmax
action_probs = torch.softmax(q_values, dim=1)
action_idx = torch.argmax(action_probs, dim=1).item()
confidence = float(action_probs[0, action_idx].item())
# Map action index to action string
actions = ['BUY', 'SELL', 'HOLD']
action = actions[action_idx]
# Create probabilities dictionary
probabilities = {
'BUY': float(action_probs[0, 0].item()),
'SELL': float(action_probs[0, 1].item()),
'HOLD': float(action_probs[0, 2].item())
}
# Extract price predictions if available
price_prediction = None
if price_pred is not None:
price_prediction = price_pred.squeeze(0).cpu().numpy().tolist()
prediction = {
'action': action,
'confidence': confidence,
'buy_probability': probabilities['BUY'],
'sell_probability': probabilities['SELL'],
'hold_probability': probabilities['HOLD'],
'timestamp': datetime.now(),
'hidden_states': features_refined.squeeze(0).cpu().numpy().tolist() if features_refined is not None else None,
'metadata': {
'price_prediction': price_prediction,
'extrema_prediction': extrema_pred.squeeze(0).cpu().numpy().tolist() if extrema_pred is not None else None
}
}
else:
# Fallback for other CNN models that might have predict method
model_output = self.cnn_adapter.predict(base_data_input)
# Convert to dictionary for dashboard use
prediction = {
'action': model_output.predictions.get('action', 'HOLD'),
'confidence': model_output.confidence,
'buy_probability': model_output.predictions.get('buy_probability', 0.0),
'sell_probability': model_output.predictions.get('sell_probability', 0.0),
'hold_probability': model_output.predictions.get('hold_probability', 0.0),
'timestamp': model_output.timestamp,
'hidden_states': model_output.hidden_states,
'metadata': model_output.metadata
}
logger.debug(f"CNN prediction for {symbol}: {prediction['action']} ({prediction['confidence']:.3f})")
return prediction