new bot 2
This commit is contained in:
@ -133,6 +133,7 @@ class Transformer(nn.Module):
|
||||
def __init__(self, input_dim, d_model, num_heads, num_layers, d_ff, dropout=0.1):
|
||||
super(Transformer, self).__init__()
|
||||
|
||||
self.input_dim = input_dim
|
||||
self.candle_embedding = nn.Linear(input_dim, d_model)
|
||||
self.tick_embedding = nn.Linear(2, d_model) # Each tick has price and quantity
|
||||
|
||||
@ -152,10 +153,11 @@ class Transformer(nn.Module):
|
||||
self.future_ticks_decoder = Decoder(num_layers, d_model, num_heads, d_ff, dropout)
|
||||
self.future_ticks_projection = nn.Linear(d_model, 60) # 30 ticks * (price, quantity) = 60
|
||||
|
||||
def forward(self, candle_data, tick_data, future_candle_mask, future_ticks_mask):
|
||||
# candle_data: [batch_size, seq_len, input_dim]
|
||||
# tick_data: [batch_size, tick_seq_len, 2]
|
||||
|
||||
def forward(self, candle_data, tick_data, future_candle_mask=None, future_ticks_mask=None):
|
||||
# Print shapes for debugging
|
||||
# print(f"Candle data shape: {candle_data.shape}, Expected input dim: {self.input_dim}")
|
||||
|
||||
# Embed candle data
|
||||
candle_embedded = self.candle_embedding(candle_data)
|
||||
candle_embedded = self.positional_encoding(candle_embedded) # Add positional info
|
||||
|
||||
@ -189,7 +191,7 @@ class Transformer(nn.Module):
|
||||
|
||||
# Example instantiation (adjust parameters for ~1B parameters)
|
||||
if __name__ == '__main__':
|
||||
input_dim = 6 + len([5, 10, 20, 60, 120, 200]) # OHLCV + EMAs
|
||||
input_dim = 11 # Changed from 12 to 11 to match your data
|
||||
d_model = 512 # Hidden dimension
|
||||
num_heads = 8
|
||||
num_layers = 6 # Number of encoder/decoder layers
|
||||
@ -220,3 +222,22 @@ if __name__ == '__main__':
|
||||
print("Future Candle Prediction Shape:", future_candle_pred.shape) # Expected: [batch_size, 1, 5]
|
||||
print("Future Volume Prediction Shape:", future_volume_pred.shape) # Expected: [batch_size, 1, 1]
|
||||
print("Future Ticks Prediction Shape:", future_ticks_pred.shape) # Expected: [batch_size, 30, 2]
|
||||
|
||||
# Make sure to use this when instantiating the model
|
||||
def create_model(input_dim=11):
|
||||
d_model = 512 # Hidden dimension
|
||||
num_heads = 8
|
||||
num_layers = 6 # Number of encoder/decoder layers
|
||||
d_ff = 2048 # Feedforward dimension
|
||||
dropout = 0.1
|
||||
|
||||
model = Transformer(
|
||||
input_dim=input_dim,
|
||||
d_model=d_model,
|
||||
num_heads=num_heads,
|
||||
num_layers=num_layers,
|
||||
d_ff=d_ff,
|
||||
dropout=dropout
|
||||
)
|
||||
|
||||
return model
|
||||
|
Reference in New Issue
Block a user