COB integration and refactoring

This commit is contained in:
Dobromir Popov
2025-06-25 02:48:00 +03:00
parent afefcea308
commit e57c6df7e1
6 changed files with 564 additions and 128 deletions

View File

@ -34,6 +34,7 @@ import os
# Local imports
from .cob_integration import COBIntegration
from .trading_executor import TradingExecutor
from NN.models.cob_rl_model import MassiveRLNetwork, COBRLModelInterface
logger = logging.getLogger(__name__)
@ -90,130 +91,8 @@ class TradeSignal:
signals_count: int
reason: str
class MassiveRLNetwork(nn.Module):
"""
Massive 1B+ parameter RL network optimized for real-time COB trading
"""
def __init__(self, input_size: int = 2000, hidden_size: int = 4096, num_layers: int = 12):
super(MassiveRLNetwork, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
# Massive input processing layers
self.input_projection = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.LayerNorm(hidden_size),
nn.GELU(),
nn.Dropout(0.1)
)
# Massive transformer-style encoder layers
self.encoder_layers = nn.ModuleList([
nn.TransformerEncoderLayer(
d_model=hidden_size,
nhead=32, # Large number of attention heads
dim_feedforward=hidden_size * 4, # 16K feedforward
dropout=0.1,
activation='gelu',
batch_first=True
) for _ in range(num_layers)
])
# Market regime understanding layers
self.regime_encoder = nn.Sequential(
nn.Linear(hidden_size, hidden_size * 2),
nn.LayerNorm(hidden_size * 2),
nn.GELU(),
nn.Dropout(0.1),
nn.Linear(hidden_size * 2, hidden_size),
nn.LayerNorm(hidden_size),
nn.GELU()
)
# Price prediction head (main RL objective)
self.price_head = nn.Sequential(
nn.Linear(hidden_size, hidden_size // 2),
nn.LayerNorm(hidden_size // 2),
nn.GELU(),
nn.Dropout(0.2),
nn.Linear(hidden_size // 2, hidden_size // 4),
nn.LayerNorm(hidden_size // 4),
nn.GELU(),
nn.Linear(hidden_size // 4, 3) # DOWN, SIDEWAYS, UP
)
# Value estimation head for RL
self.value_head = nn.Sequential(
nn.Linear(hidden_size, hidden_size // 2),
nn.LayerNorm(hidden_size // 2),
nn.GELU(),
nn.Dropout(0.2),
nn.Linear(hidden_size // 2, hidden_size // 4),
nn.LayerNorm(hidden_size // 4),
nn.GELU(),
nn.Linear(hidden_size // 4, 1)
)
# Confidence head
self.confidence_head = nn.Sequential(
nn.Linear(hidden_size, hidden_size // 4),
nn.LayerNorm(hidden_size // 4),
nn.GELU(),
nn.Linear(hidden_size // 4, 1),
nn.Sigmoid()
)
# Initialize weights
self.apply(self._init_weights)
# Calculate total parameters
total_params = sum(p.numel() for p in self.parameters())
logger.info(f"Massive RL Network initialized with {total_params:,} parameters")
def _init_weights(self, module):
"""Initialize weights with proper scaling for large models"""
if isinstance(module, nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
torch.nn.init.ones_(module.weight)
torch.nn.init.zeros_(module.bias)
def forward(self, x):
"""Forward pass through massive network"""
batch_size = x.size(0)
# Project input
x = self.input_projection(x) # [batch, hidden_size]
# Add sequence dimension for transformer
x = x.unsqueeze(1) # [batch, 1, hidden_size]
# Pass through transformer layers
for layer in self.encoder_layers:
x = layer(x)
# Remove sequence dimension
x = x.squeeze(1) # [batch, hidden_size]
# Apply regime encoding
x = self.regime_encoder(x)
# Generate predictions
price_logits = self.price_head(x)
value = self.value_head(x)
confidence = self.confidence_head(x)
return {
'price_logits': price_logits,
'value': value,
'confidence': confidence,
'features': x # Hidden features for analysis
}
# MassiveRLNetwork is now imported from NN.models.cob_rl_model
class RealtimeRLCOBTrader:
"""