303 lines
11 KiB
Python
303 lines
11 KiB
Python
#!/usr/bin/env python3
|
|
import asyncio
|
|
import os
|
|
import time
|
|
import ccxt.async_support as ccxt
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.optim as optim
|
|
import numpy as np
|
|
from collections import deque
|
|
|
|
# -------------------------------------
|
|
# Neural Network Architecture Definition
|
|
# -------------------------------------
|
|
class TradingModel(nn.Module):
|
|
def __init__(self, input_dim, hidden_dim, output_dim):
|
|
super(TradingModel, self).__init__()
|
|
# This is a minimal feed-forward network.
|
|
self.net = nn.Sequential(
|
|
nn.Linear(input_dim, hidden_dim),
|
|
nn.ReLU(),
|
|
nn.Linear(hidden_dim, hidden_dim),
|
|
nn.ReLU(),
|
|
nn.Linear(hidden_dim, output_dim)
|
|
)
|
|
|
|
def forward(self, x):
|
|
return self.net(x)
|
|
|
|
# -------------------------------------
|
|
# Replay Buffer for Experience Storage
|
|
# -------------------------------------
|
|
class ReplayBuffer:
|
|
def __init__(self, capacity=10000):
|
|
self.buffer = deque(maxlen=capacity)
|
|
|
|
def add(self, experience):
|
|
self.buffer.append(experience)
|
|
|
|
def sample(self, batch_size):
|
|
indices = np.random.choice(len(self.buffer), size=batch_size, replace=False)
|
|
return [self.buffer[i] for i in indices]
|
|
|
|
def __len__(self):
|
|
return len(self.buffer)
|
|
|
|
# -------------------------------------
|
|
# A Simple Indicator and Feature Preparation Function
|
|
# -------------------------------------
|
|
def compute_indicators(candle, additional_data):
|
|
"""
|
|
Combine OHLCV candle data with extra indicator information.
|
|
Base features: open, high, low, close, volume.
|
|
Additional channels (e.g. sentiment score, news volume, etc.) are appended.
|
|
"""
|
|
features = []
|
|
features.extend([
|
|
candle.get('open', 0.0),
|
|
candle.get('high', 0.0),
|
|
candle.get('low', 0.0),
|
|
candle.get('close', 0.0),
|
|
candle.get('volume', 0.0)
|
|
])
|
|
# Append additional indicators (e.g., simulated sentiment here)
|
|
for key, value in additional_data.items():
|
|
features.append(value)
|
|
return np.array(features, dtype=np.float32)
|
|
|
|
# -------------------------------------
|
|
# RL Agent that Uses the Neural Network
|
|
# -------------------------------------
|
|
class ContinuousRLAgent:
|
|
def __init__(self, model, optimizer, replay_buffer, batch_size=32):
|
|
self.model = model
|
|
self.optimizer = optimizer
|
|
self.replay_buffer = replay_buffer
|
|
self.batch_size = batch_size
|
|
self.loss_fn = nn.MSELoss() # Using a simple MSE loss for demonstration.
|
|
|
|
def act(self, state):
|
|
"""
|
|
Given state features, output an action.
|
|
Mapping: 0 -> SELL, 1 -> HOLD, 2 -> BUY.
|
|
"""
|
|
state_tensor = torch.tensor(state, dtype=torch.float32).unsqueeze(0)
|
|
with torch.no_grad():
|
|
output = self.model(state_tensor)
|
|
action = torch.argmax(output, dim=1).item()
|
|
return action
|
|
|
|
def train_step(self):
|
|
"""
|
|
Sample a batch from the replay buffer and update the network.
|
|
(Note: A real RL algorithm will have a more-complex target calculation.)
|
|
"""
|
|
if len(self.replay_buffer) < self.batch_size:
|
|
return # Not enough samples yet
|
|
|
|
batch = self.replay_buffer.sample(self.batch_size)
|
|
states, rewards, next_states, dones = [], [], [], []
|
|
for experience in batch:
|
|
state, reward, next_state, done = experience
|
|
states.append(state)
|
|
rewards.append(reward)
|
|
next_states.append(next_state)
|
|
dones.append(done)
|
|
|
|
states_tensor = torch.tensor(states, dtype=torch.float32)
|
|
targets_tensor = torch.tensor(rewards, dtype=torch.float32).unsqueeze(1)
|
|
|
|
outputs = self.model(states_tensor)
|
|
# For this simple demonstration we use the first output as the value estimate.
|
|
predictions = outputs[:, 0].unsqueeze(1)
|
|
loss = self.loss_fn(predictions, targets_tensor)
|
|
|
|
self.optimizer.zero_grad()
|
|
loss.backward()
|
|
self.optimizer.step()
|
|
|
|
# -------------------------------------
|
|
# Historical Data Fetching Function
|
|
# -------------------------------------
|
|
async def fetch_historical_data(exchange, symbol, timeframe, since, end_time, batch_size=500):
|
|
"""
|
|
Fetch historical OHLCV data for the given symbol and timeframe.
|
|
The "since" and "end_time" parameters are in milliseconds.
|
|
"""
|
|
candles = []
|
|
since_ms = since
|
|
while True:
|
|
try:
|
|
batch = await exchange.fetch_ohlcv(symbol, timeframe=timeframe, since=since_ms, limit=batch_size)
|
|
except Exception as e:
|
|
print("Error fetching historical data:", e)
|
|
break
|
|
if not batch:
|
|
break
|
|
# Convert each candle from a list to a dict.
|
|
for c in batch:
|
|
candle_dict = {
|
|
'timestamp': c[0],
|
|
'open': c[1],
|
|
'high': c[2],
|
|
'low': c[3],
|
|
'close': c[4],
|
|
'volume': c[5]
|
|
}
|
|
candles.append(candle_dict)
|
|
last_timestamp = batch[-1][0]
|
|
if last_timestamp >= end_time:
|
|
break
|
|
since_ms = last_timestamp + 1
|
|
print(f"Fetched {len(candles)} candles.")
|
|
return candles
|
|
|
|
# -------------------------------------
|
|
# Backtest Environment Class Definition
|
|
# -------------------------------------
|
|
class BacktestEnvironment:
|
|
def __init__(self, candles):
|
|
self.candles = candles
|
|
self.current_index = 0
|
|
self.position = None # Will hold a dict once a BUY is simulated.
|
|
|
|
def reset(self):
|
|
self.current_index = 0
|
|
self.position = None
|
|
return self.get_state(self.current_index)
|
|
|
|
def get_state(self, index):
|
|
candle = self.candles[index]
|
|
# Simulate additional sentiment features.
|
|
sentiment = {
|
|
'sentiment_score': np.random.rand(),
|
|
'news_volume': np.random.rand(),
|
|
'social_engagement': np.random.rand()
|
|
}
|
|
return compute_indicators(candle, sentiment)
|
|
|
|
def step(self, action):
|
|
"""
|
|
Simulate a trading step.
|
|
• Uses the current candle as state.
|
|
• Decides on an action:
|
|
- If not in a position and action is BUY (2), we buy at the next candle's open.
|
|
- If in position and action is SELL (0), we sell at the next candle's open.
|
|
- Otherwise, no trade is executed.
|
|
• Returns: (state, reward, next_state, done)
|
|
"""
|
|
if self.current_index >= len(self.candles) - 1:
|
|
# End of the historical data.
|
|
return self.get_state(self.current_index), 0, None, True
|
|
|
|
state = self.get_state(self.current_index)
|
|
next_index = self.current_index + 1
|
|
next_state = self.get_state(next_index)
|
|
current_candle = self.candles[self.current_index]
|
|
next_candle = self.candles[next_index]
|
|
reward = 0.0
|
|
|
|
# Action mapping: 0 -> SELL, 1 -> HOLD, 2 -> BUY.
|
|
if self.position is None:
|
|
if action == 2: # BUY signal: enter a long position.
|
|
# Buy at the next candle's open price.
|
|
entry_price = next_candle['open']
|
|
self.position = {'entry_price': entry_price, 'entry_index': self.current_index}
|
|
# No immediate reward on entry.
|
|
else:
|
|
if action == 0: # SELL signal: exit the long position.
|
|
sell_price = next_candle['open']
|
|
reward = sell_price - self.position['entry_price']
|
|
self.position = None
|
|
self.current_index = next_index
|
|
done = (self.current_index >= len(self.candles) - 1)
|
|
return state, reward, next_state, done
|
|
|
|
# -------------------------------------
|
|
# Training Loop Over Historical Data (Backtest)
|
|
# -------------------------------------
|
|
def train_on_historical_data(env, rl_agent, num_epochs=10):
|
|
"""
|
|
For each epoch, run through the historical data episode.
|
|
At every step, let the agent decide an action and simulate a trade.
|
|
The experience (state, reward, next_state, done) is stored and used to update the network.
|
|
"""
|
|
for epoch in range(num_epochs):
|
|
state = env.reset()
|
|
done = False
|
|
total_reward = 0.0
|
|
steps = 0
|
|
while not done:
|
|
action = rl_agent.act(state)
|
|
state_next, reward, next_state, done = env.step(action)
|
|
if next_state is None:
|
|
next_state = np.zeros_like(state)
|
|
rl_agent.replay_buffer.add((state, reward, next_state, done))
|
|
rl_agent.train_step()
|
|
state = next_state
|
|
total_reward += reward
|
|
steps += 1
|
|
print(f"Epoch {epoch+1}/{num_epochs} completed, total reward: {total_reward:.4f} over {steps} steps.")
|
|
|
|
# -------------------------------------
|
|
# Main Asynchronous Function for Backtest Training
|
|
# -------------------------------------
|
|
async def main_backtest():
|
|
# Define symbol, timeframe, and historical period.
|
|
symbol = 'BTC/USDT'
|
|
timeframe = '1m'
|
|
now = int(time.time() * 1000)
|
|
one_day_ms = 24 * 60 * 60 * 1000
|
|
# For example, fetch a 1-day period from 2 days ago until 1 day ago.
|
|
since = now - one_day_ms * 2
|
|
end_time = now - one_day_ms
|
|
|
|
# Initialize the exchange (using MEXC in this example).
|
|
mexc_api_key = os.environ.get('MEXC_API_KEY', 'YOUR_API_KEY')
|
|
mexc_api_secret = os.environ.get('MEXC_API_SECRET', 'YOUR_SECRET_KEY')
|
|
exchange = ccxt.mexc({
|
|
'apiKey': mexc_api_key,
|
|
'secret': mexc_api_secret,
|
|
'enableRateLimit': True,
|
|
})
|
|
|
|
print("Fetching historical data...")
|
|
candles = await fetch_historical_data(exchange, symbol, timeframe, since, end_time)
|
|
if not candles:
|
|
print("No historical data fetched.")
|
|
await exchange.close()
|
|
return
|
|
|
|
# Initialize the backtest environment with the historical candles.
|
|
env = BacktestEnvironment(candles)
|
|
|
|
# Model dimensions:
|
|
# 5 base OHLCV features + 3 simulated sentiment features.
|
|
input_dim = 5 + 3
|
|
hidden_dim = 128
|
|
output_dim = 3 # SELL, HOLD, BUY
|
|
|
|
model = TradingModel(input_dim, hidden_dim, output_dim)
|
|
optimizer = optim.Adam(model.parameters(), lr=1e-4)
|
|
replay_buffer = ReplayBuffer(capacity=10000)
|
|
rl_agent = ContinuousRLAgent(model, optimizer, replay_buffer, batch_size=32)
|
|
|
|
# Train the RL agent via backtesting.
|
|
num_epochs = 10 # Adjust number of epochs as needed.
|
|
train_on_historical_data(env, rl_agent, num_epochs=num_epochs)
|
|
|
|
# Optionally, perform a final test episode to simulate trading with the trained model.
|
|
state = env.reset()
|
|
done = False
|
|
cumulative_reward = 0.0
|
|
while not done:
|
|
action = rl_agent.act(state)
|
|
state, reward, next_state, done = env.step(action)
|
|
cumulative_reward += reward
|
|
print("Final backtest simulation cumulative profit:", cumulative_reward)
|
|
|
|
await exchange.close()
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main_backtest()) |