This commit is contained in:
Dobromir Popov
2025-02-02 00:46:58 +02:00
parent 24f26f5e98
commit 78005c2d84

View File

@ -7,6 +7,7 @@ if sys.platform == 'win32':
from dotenv import load_dotenv from dotenv import load_dotenv
import os import os
import time import time
import json
import ccxt.async_support as ccxt import ccxt.async_support as ccxt
import torch import torch
import torch.nn as nn import torch.nn as nn
@ -14,13 +15,35 @@ import torch.optim as optim
import numpy as np import numpy as np
from collections import deque from collections import deque
# -------------------------------------
# Utility functions for caching candles to file
# -------------------------------------
CACHE_FILE = "candles_cache.json"
def load_candles_cache(filename):
if os.path.exists(filename):
try:
with open(filename, "r") as f:
data = json.load(f)
print(f"Loaded {len(data)} candles from cache.")
return data
except Exception as e:
print("Error reading cache file:", e)
return []
def save_candles_cache(filename, candles):
try:
with open(filename, "w") as f:
json.dump(candles, f)
except Exception as e:
print("Error saving cache file:", e)
# ------------------------------------- # -------------------------------------
# Neural Network Architecture Definition # Neural Network Architecture Definition
# ------------------------------------- # -------------------------------------
class TradingModel(nn.Module): class TradingModel(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim): def __init__(self, input_dim, hidden_dim, output_dim):
super(TradingModel, self).__init__() super(TradingModel, self).__init__()
# This is a minimal feed-forward network.
self.net = nn.Sequential( self.net = nn.Sequential(
nn.Linear(input_dim, hidden_dim), nn.Linear(input_dim, hidden_dim),
nn.ReLU(), nn.ReLU(),
@ -56,68 +79,65 @@ def compute_indicators(candle, additional_data):
""" """
Combine OHLCV candle data with extra indicator information. Combine OHLCV candle data with extra indicator information.
Base features: open, high, low, close, volume. Base features: open, high, low, close, volume.
Additional channels (e.g. sentiment score, news volume, etc.) are appended. Additional channels (e.g. simulated sentiment) are appended.
""" """
features = [] features = [
features.extend([
candle.get('open', 0.0), candle.get('open', 0.0),
candle.get('high', 0.0), candle.get('high', 0.0),
candle.get('low', 0.0), candle.get('low', 0.0),
candle.get('close', 0.0), candle.get('close', 0.0),
candle.get('volume', 0.0) candle.get('volume', 0.0),
]) ]
# Append additional indicators (e.g., simulated sentiment here)
for key, value in additional_data.items(): for key, value in additional_data.items():
features.append(value) features.append(value)
return np.array(features, dtype=np.float32) return np.array(features, dtype=np.float32)
# ------------------------------------- # -------------------------------------
# RL Agent that Uses the Neural Network # RL Agent with Q-Learning Update and Epsilon-Greedy Exploration
# ------------------------------------- # -------------------------------------
class ContinuousRLAgent: class ContinuousRLAgent:
def __init__(self, model, optimizer, replay_buffer, batch_size=32): def __init__(self, model, optimizer, replay_buffer, batch_size=32, gamma=0.99):
self.model = model self.model = model
self.optimizer = optimizer self.optimizer = optimizer
self.replay_buffer = replay_buffer self.replay_buffer = replay_buffer
self.batch_size = batch_size self.batch_size = batch_size
self.loss_fn = nn.MSELoss() # Using a simple MSE loss for demonstration. self.loss_fn = nn.MSELoss()
self.gamma = gamma
def act(self, state): def act(self, state, epsilon=0.1):
""" # ε-greedy: with probability epsilon take a random action
Given state features, output an action. if np.random.rand() < epsilon:
Mapping: 0 -> SELL, 1 -> HOLD, 2 -> BUY. return np.random.randint(0, 3)
""" state_tensor = torch.from_numpy(np.array(state, dtype=np.float32)).unsqueeze(0)
state_tensor = torch.tensor(state, dtype=torch.float32).unsqueeze(0)
with torch.no_grad(): with torch.no_grad():
output = self.model(state_tensor) output = self.model(state_tensor)
action = torch.argmax(output, dim=1).item() action = torch.argmax(output, dim=1).item()
return action return action
def train_step(self): def train_step(self):
""" # Only train if we have enough samples
Sample a batch from the replay buffer and update the network.
(Note: A real RL algorithm will have a more-complex target calculation.)
"""
if len(self.replay_buffer) < self.batch_size: if len(self.replay_buffer) < self.batch_size:
return # Not enough samples yet return
# Convert lists to numpy arrays in one shot for performance
batch = self.replay_buffer.sample(self.batch_size) batch = self.replay_buffer.sample(self.batch_size)
states, rewards, next_states, dones = [], [], [], [] states, actions, rewards, next_states, dones = zip(*batch)
for experience in batch: states_tensor = torch.from_numpy(np.array(states, dtype=np.float32))
state, reward, next_state, done = experience actions_tensor = torch.tensor(actions, dtype=torch.int64)
states.append(state) rewards_tensor = torch.from_numpy(np.array(rewards, dtype=np.float32)).unsqueeze(1)
rewards.append(reward) next_states_tensor = torch.from_numpy(np.array(next_states, dtype=np.float32))
next_states.append(next_state) dones_tensor = torch.tensor(dones, dtype=torch.float32).unsqueeze(1)
dones.append(done)
states_tensor = torch.tensor(states, dtype=torch.float32) # Current Q-value for the chosen actions
targets_tensor = torch.tensor(rewards, dtype=torch.float32).unsqueeze(1) Q_values = self.model(states_tensor)
current_Q = Q_values.gather(1, actions_tensor.unsqueeze(1))
outputs = self.model(states_tensor) with torch.no_grad():
# For this simple demonstration we use the first output as the value estimate. next_Q_values = self.model(next_states_tensor)
predictions = outputs[:, 0].unsqueeze(1) max_next_Q = next_Q_values.max(1)[0].unsqueeze(1)
loss = self.loss_fn(predictions, targets_tensor) target = rewards_tensor + self.gamma * max_next_Q * (1.0 - dones_tensor)
loss = self.loss_fn(current_Q, target)
self.optimizer.zero_grad() self.optimizer.zero_grad()
loss.backward() loss.backward()
self.optimizer.step() self.optimizer.step()
@ -128,7 +148,7 @@ class ContinuousRLAgent:
async def fetch_historical_data(exchange, symbol, timeframe, since, end_time, batch_size=500): async def fetch_historical_data(exchange, symbol, timeframe, since, end_time, batch_size=500):
""" """
Fetch historical OHLCV data for the given symbol and timeframe. Fetch historical OHLCV data for the given symbol and timeframe.
The "since" and "end_time" parameters are in milliseconds. The 'since' and 'end_time' parameters are in milliseconds.
""" """
candles = [] candles = []
since_ms = since since_ms = since
@ -140,7 +160,6 @@ async def fetch_historical_data(exchange, symbol, timeframe, since, end_time, ba
break break
if not batch: if not batch:
break break
# Convert each candle from a list to a dict.
for c in batch: for c in batch:
candle_dict = { candle_dict = {
'timestamp': c[0], 'timestamp': c[0],
@ -158,6 +177,22 @@ async def fetch_historical_data(exchange, symbol, timeframe, since, end_time, ba
print(f"Fetched {len(candles)} candles.") print(f"Fetched {len(candles)} candles.")
return candles return candles
async def get_cached_or_fetch_data(exchange, symbol, timeframe, since, end_time, cache_file=CACHE_FILE, batch_size=500):
cached_candles = load_candles_cache(cache_file)
if cached_candles:
last_ts = cached_candles[-1]['timestamp']
# If the cached candles do not extend to 'end_time', fetch new ones.
if last_ts < end_time:
print("Fetching new candles to update cache...")
new_candles = await fetch_historical_data(exchange, symbol, timeframe, last_ts + 1, end_time, batch_size)
cached_candles.extend(new_candles)
else:
print("Cache covers the requested period.")
return cached_candles
else:
candles = await fetch_historical_data(exchange, symbol, timeframe, since, end_time, batch_size)
return candles
# ------------------------------------- # -------------------------------------
# Backtest Environment Class Definition # Backtest Environment Class Definition
# ------------------------------------- # -------------------------------------
@ -165,7 +200,7 @@ class BacktestEnvironment:
def __init__(self, candles): def __init__(self, candles):
self.candles = candles self.candles = candles
self.current_index = 0 self.current_index = 0
self.position = None # Will hold a dict once a BUY is simulated. self.position = None # Holds an open position, if any
def reset(self): def reset(self):
self.current_index = 0 self.current_index = 0
@ -185,18 +220,15 @@ class BacktestEnvironment:
def step(self, action): def step(self, action):
""" """
Simulate a trading step. Simulate a trading step.
• Uses the current candle as state. - If not in a position and action is BUY (2), buy at the next candle's open.
• Decides on an action: - If in a position and action is SELL (0), sell at the next candle's open and compute reward.
- If not in a position and action is BUY (2), we buy at the next candle's open. - Otherwise, no trade is executed.
- If in position and action is SELL (0), we sell at the next candle's open. Returns: (state, reward, next_state, done)
- Otherwise, no trade is executed.
• Returns: (state, reward, next_state, done)
""" """
if self.current_index >= len(self.candles) - 1: if self.current_index >= len(self.candles) - 1:
# End of the historical data. return self.get_state(self.current_index), 0.0, None, True
return self.get_state(self.current_index), 0, None, True
state = self.get_state(self.current_index) current_state = self.get_state(self.current_index)
next_index = self.current_index + 1 next_index = self.current_index + 1
next_state = self.get_state(next_index) next_state = self.get_state(next_index)
current_candle = self.candles[self.current_index] current_candle = self.candles[self.current_index]
@ -205,28 +237,26 @@ class BacktestEnvironment:
# Action mapping: 0 -> SELL, 1 -> HOLD, 2 -> BUY. # Action mapping: 0 -> SELL, 1 -> HOLD, 2 -> BUY.
if self.position is None: if self.position is None:
if action == 2: # BUY signal: enter a long position. if action == 2: # BUY: enter long position at next candle's open.
# Buy at the next candle's open price.
entry_price = next_candle['open'] entry_price = next_candle['open']
self.position = {'entry_price': entry_price, 'entry_index': self.current_index} self.position = {'entry_price': entry_price, 'entry_index': self.current_index}
# No immediate reward on entry.
else: else:
if action == 0: # SELL signal: exit the long position. if action == 0: # SELL: close long position.
sell_price = next_candle['open'] sell_price = next_candle['open']
reward = sell_price - self.position['entry_price'] reward = sell_price - self.position['entry_price']
self.position = None self.position = None
self.current_index = next_index self.current_index = next_index
done = (self.current_index >= len(self.candles) - 1) done = (self.current_index >= len(self.candles) - 1)
return state, reward, next_state, done return current_state, reward, next_state, done
# ------------------------------------- # -------------------------------------
# Training Loop Over Historical Data (Backtest) # Training Loop Over Historical Data (Backtest)
# ------------------------------------- # -------------------------------------
def train_on_historical_data(env, rl_agent, num_epochs=10): def train_on_historical_data(env, rl_agent, num_epochs=10, epsilon=0.1):
""" """
For each epoch, run through the historical data episode. For each epoch, run through the entire historical data.
At every step, let the agent decide an action and simulate a trade. At each step, choose an action using εgreedy policy, simulate a trade,
The experience (state, reward, next_state, done) is stored and used to update the network. store the experience (state, action, reward, next_state, done), and update the model.
""" """
for epoch in range(num_epochs): for epoch in range(num_epochs):
state = env.reset() state = env.reset()
@ -234,13 +264,14 @@ def train_on_historical_data(env, rl_agent, num_epochs=10):
total_reward = 0.0 total_reward = 0.0
steps = 0 steps = 0
while not done: while not done:
action = rl_agent.act(state) action = rl_agent.act(state, epsilon=epsilon)
state_next, reward, next_state, done = env.step(action) prev_state = state
state, reward, next_state, done = env.step(action)
if next_state is None: if next_state is None:
next_state = np.zeros_like(state) next_state = np.zeros_like(prev_state)
rl_agent.replay_buffer.add((state, reward, next_state, done)) # Store the experience including the action taken.
rl_agent.replay_buffer.add((prev_state, action, reward, next_state, done))
rl_agent.train_step() rl_agent.train_step()
state = next_state
total_reward += reward total_reward += reward
steps += 1 steps += 1
print(f"Epoch {epoch+1}/{num_epochs} completed, total reward: {total_reward:.4f} over {steps} steps.") print(f"Epoch {epoch+1}/{num_epochs} completed, total reward: {total_reward:.4f} over {steps} steps.")
@ -254,11 +285,11 @@ async def main_backtest():
timeframe = '1m' timeframe = '1m'
now = int(time.time() * 1000) now = int(time.time() * 1000)
one_day_ms = 24 * 60 * 60 * 1000 one_day_ms = 24 * 60 * 60 * 1000
# For example, fetch a 1-day period from 2 days ago until 1 day ago. # Fetch a 1-day period from 2 days ago until 1 day ago.
since = now - one_day_ms * 2 since = now - one_day_ms * 2
end_time = now - one_day_ms end_time = now - one_day_ms
# Initialize the exchange (using MEXC in this example). # Initialize exchange (using MEXC for example).
mexc_api_key = os.environ.get('MEXC_API_KEY', 'YOUR_API_KEY') mexc_api_key = os.environ.get('MEXC_API_KEY', 'YOUR_API_KEY')
mexc_api_secret = os.environ.get('MEXC_API_SECRET', 'YOUR_SECRET_KEY') mexc_api_secret = os.environ.get('MEXC_API_SECRET', 'YOUR_SECRET_KEY')
exchange = ccxt.mexc({ exchange = ccxt.mexc({
@ -268,38 +299,41 @@ async def main_backtest():
}) })
print("Fetching historical data...") print("Fetching historical data...")
candles = await fetch_historical_data(exchange, symbol, timeframe, since, end_time) candles = await get_cached_or_fetch_data(exchange, symbol, timeframe, since, end_time)
if not candles: if not candles:
print("No historical data fetched.") print("No historical data fetched.")
await exchange.close() await exchange.close()
return return
# Initialize the backtest environment with the historical candles. # Save/Update cache file.
save_candles_cache(CACHE_FILE, candles)
# Initialize the backtest environment with the candles.
env = BacktestEnvironment(candles) env = BacktestEnvironment(candles)
# Model dimensions: # Model dimensions: 5 base OHLCV features + 3 simulated sentiment features = 8.
# 5 base OHLCV features + 3 simulated sentiment features. input_dim = 8
input_dim = 5 + 3
hidden_dim = 128 hidden_dim = 128
output_dim = 3 # SELL, HOLD, BUY output_dim = 3 # SELL, HOLD, BUY
model = TradingModel(input_dim, hidden_dim, output_dim) model = TradingModel(input_dim, hidden_dim, output_dim)
optimizer = optim.Adam(model.parameters(), lr=1e-4) optimizer = optim.Adam(model.parameters(), lr=1e-4)
replay_buffer = ReplayBuffer(capacity=10000) replay_buffer = ReplayBuffer(capacity=10000)
rl_agent = ContinuousRLAgent(model, optimizer, replay_buffer, batch_size=32) rl_agent = ContinuousRLAgent(model, optimizer, replay_buffer, batch_size=32, gamma=0.99)
# Train the RL agent via backtesting. # Run training over historical data.
num_epochs = 10 # Adjust number of epochs as needed. num_epochs = 10 # Adjust as needed.
train_on_historical_data(env, rl_agent, num_epochs=num_epochs) train_on_historical_data(env, rl_agent, num_epochs=num_epochs, epsilon=0.1)
# Optionally, perform a final test episode to simulate trading with the trained model. # Optionally, perform a final test run (without exploration) to check cumulative profit.
state = env.reset() state = env.reset()
done = False done = False
cumulative_reward = 0.0 cumulative_reward = 0.0
while not done: while not done:
action = rl_agent.act(state) action = rl_agent.act(state, epsilon=0.0)
state, reward, next_state, done = env.step(action) state, reward, next_state, done = env.step(action)
cumulative_reward += reward cumulative_reward += reward
state = next_state
print("Final backtest simulation cumulative profit:", cumulative_reward) print("Final backtest simulation cumulative profit:", cumulative_reward)
await exchange.close() await exchange.close()