implementations
This commit is contained in:
parent
1a15ee934b
commit
33a5588539
@ -121,6 +121,44 @@ def create_padding_mask(seq, pad_token=0):
|
|||||||
"""
|
"""
|
||||||
return (seq == pad_token).all(dim=-1).unsqueeze(0)
|
return (seq == pad_token).all(dim=-1).unsqueeze(0)
|
||||||
|
|
||||||
|
def get_aligned_candle_with_index(candles_list, base_ts):
|
||||||
|
"""
|
||||||
|
Find the candle from candles_list that is closest to (and <=) base_ts.
|
||||||
|
Returns: (index, candle)
|
||||||
|
"""
|
||||||
|
aligned_index = None
|
||||||
|
aligned_candle = None
|
||||||
|
for i in range(len(candles_list)):
|
||||||
|
if candles_list[i]["timestamp"] <= base_ts:
|
||||||
|
aligned_index = i
|
||||||
|
aligned_candle = candles_list[i]
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return aligned_index, aligned_candle
|
||||||
|
|
||||||
|
def get_features_for_tf(candles_list, aligned_index, period=10):
|
||||||
|
"""
|
||||||
|
Extract features from the candle at aligned_index.
|
||||||
|
If aligned_index is None, return a zeroed feature vector.
|
||||||
|
"""
|
||||||
|
if aligned_index is None:
|
||||||
|
return [0.0] * 7 # return zeroed feature vector
|
||||||
|
candle = candles_list[aligned_index]
|
||||||
|
# Simple features: open, high, low, close, volume, and two EMAs.
|
||||||
|
close_prices = [c["close"] for c in candles_list[:aligned_index+1]]
|
||||||
|
ema_short = calculate_ema(candles_list[:aligned_index+1], period=period)[-1]
|
||||||
|
ema_long = calculate_ema(candles_list[:aligned_index+1], period=period*2)[-1]
|
||||||
|
features = [
|
||||||
|
candle["open"],
|
||||||
|
candle["high"],
|
||||||
|
candle["low"],
|
||||||
|
candle["close"],
|
||||||
|
candle["volume"],
|
||||||
|
ema_short,
|
||||||
|
ema_long
|
||||||
|
]
|
||||||
|
return features
|
||||||
|
|
||||||
# Example usage (within a larger training loop):
|
# Example usage (within a larger training loop):
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# Dummy data for demonstration
|
# Dummy data for demonstration
|
||||||
@ -156,3 +194,13 @@ if __name__ == '__main__':
|
|||||||
print("\nMask:\n", mask)
|
print("\nMask:\n", mask)
|
||||||
padding_mask = create_padding_mask(torch.tensor(candle_features))
|
padding_mask = create_padding_mask(torch.tensor(candle_features))
|
||||||
print(f"\nPadding mask: {padding_mask}")
|
print(f"\nPadding mask: {padding_mask}")
|
||||||
|
|
||||||
|
# Example usage of the new functions
|
||||||
|
index, candle = get_aligned_candle_with_index(candles_data, 1678886570000)
|
||||||
|
if candle:
|
||||||
|
print(f"\nAligned candle: {candle}")
|
||||||
|
else:
|
||||||
|
print("\nNo aligned candle found.")
|
||||||
|
|
||||||
|
features = get_features_for_tf(candles_data, index)
|
||||||
|
print(f"\nFeatures for timeframe: {features}")
|
||||||
|
@ -7,7 +7,7 @@ from collections import deque
|
|||||||
|
|
||||||
import ccxt.async_support as ccxt
|
import ccxt.async_support as ccxt
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
import platform
|
||||||
|
|
||||||
class LiveDataManager:
|
class LiveDataManager:
|
||||||
def __init__(self, symbol, exchange_name='mexc', window_size=120):
|
def __init__(self, symbol, exchange_name='mexc', window_size=120):
|
||||||
@ -20,6 +20,7 @@ class LiveDataManager:
|
|||||||
self.last_candle_time = None
|
self.last_candle_time = None
|
||||||
self.exchange = self._initialize_exchange()
|
self.exchange = self._initialize_exchange()
|
||||||
self.lock = asyncio.Lock() # Lock to prevent race conditions
|
self.lock = asyncio.Lock() # Lock to prevent race conditions
|
||||||
|
self.is_windows = platform.system() == 'Windows'
|
||||||
|
|
||||||
def _initialize_exchange(self):
|
def _initialize_exchange(self):
|
||||||
exchange_class = getattr(ccxt, self.exchange_name)
|
exchange_class = getattr(ccxt, self.exchange_name)
|
||||||
@ -41,15 +42,23 @@ class LiveDataManager:
|
|||||||
print(f"Fetching initial candles for {self.symbol}...")
|
print(f"Fetching initial candles for {self.symbol}...")
|
||||||
now = int(time.time() * 1000)
|
now = int(time.time() * 1000)
|
||||||
since = now - self.window_size * 60 * 1000
|
since = now - self.window_size * 60 * 1000
|
||||||
try:
|
retries = 3
|
||||||
candles = await self.exchange.fetch_ohlcv(self.symbol, '1m', since=since, limit=self.window_size)
|
for attempt in range(retries):
|
||||||
for candle in candles:
|
try:
|
||||||
self.candles.append(self._format_candle(candle))
|
candles = await self.exchange.fetch_ohlcv(self.symbol, '1m', since=since, limit=self.window_size)
|
||||||
if candles:
|
for candle in candles:
|
||||||
self.last_candle_time = candles[-1][0]
|
self.candles.append(self._format_candle(candle))
|
||||||
print(f"Fetched {len(candles)} initial candles.")
|
if candles:
|
||||||
except Exception as e:
|
self.last_candle_time = candles[-1][0]
|
||||||
print(f"Error fetching initial candles: {e}")
|
print(f"Fetched {len(candles)} initial candles.")
|
||||||
|
return # Exit the function if successful
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Attempt {attempt + 1} failed: {e}")
|
||||||
|
if self.is_windows and "aiodns needs a SelectorEventLoop" in str(e):
|
||||||
|
print("aiodns issue detected on Windows. This is a known problem with aiodns and ccxt on Windows.")
|
||||||
|
if attempt < retries - 1:
|
||||||
|
await asyncio.sleep(5) # Wait before retrying
|
||||||
|
print("Failed to fetch initial candles after multiple retries.")
|
||||||
|
|
||||||
def _format_candle(self, candle_data):
|
def _format_candle(self, candle_data):
|
||||||
return {
|
return {
|
||||||
@ -112,16 +121,23 @@ class LiveDataManager:
|
|||||||
async def fetch_and_process_ticks(self):
|
async def fetch_and_process_ticks(self):
|
||||||
async with self.lock:
|
async with self.lock:
|
||||||
since = None if not self.ticks else self.ticks[-1]['timestamp']
|
since = None if not self.ticks else self.ticks[-1]['timestamp']
|
||||||
try:
|
retries = 3
|
||||||
# Use fetch_trades (or appropriate method for your exchange) for live ticks.
|
for attempt in range(retries):
|
||||||
ticks = await self.exchange.fetch_trades(self.symbol, since=since)
|
try:
|
||||||
for tick in ticks:
|
# Use fetch_trades (or appropriate method for your exchange) for live ticks.
|
||||||
formatted_tick = self._format_tick(tick)
|
ticks = await self.exchange.fetch_trades(self.symbol, since=since)
|
||||||
if formatted_tick: # Add the check here
|
for tick in ticks:
|
||||||
self.ticks.append(formatted_tick)
|
formatted_tick = self._format_tick(tick)
|
||||||
await self._update_candle(formatted_tick)
|
if formatted_tick: # Add the check here
|
||||||
except Exception as e:
|
self.ticks.append(formatted_tick)
|
||||||
print(f"Error fetching ticks: {e}")
|
await self._update_candle(formatted_tick)
|
||||||
|
break # Exit the retry loop if successful
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching ticks (attempt {attempt + 1}): {e}")
|
||||||
|
if self.is_windows and "aiodns needs a SelectorEventLoop" in str(e):
|
||||||
|
print("aiodns issue detected on Windows. This is a known problem with aiodns and ccxt on Windows.")
|
||||||
|
if attempt < retries - 1:
|
||||||
|
await asyncio.sleep(5) # Wait before retrying
|
||||||
|
|
||||||
async def get_data(self):
|
async def get_data(self):
|
||||||
async with self.lock:
|
async with self.lock:
|
||||||
|
@ -8,6 +8,15 @@ from data.live_data import LiveDataManager
|
|||||||
from model.transformer import Transformer
|
from model.transformer import Transformer
|
||||||
from training.train import train
|
from training.train import train
|
||||||
from data.data_utils import preprocess_data # Import preprocess_data
|
from data.data_utils import preprocess_data # Import preprocess_data
|
||||||
|
import ccxt.async_support as ccxt
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from model.trading_model import TradingModel
|
||||||
|
from training.rl_agent import ContinuousRLAgent, ReplayBuffer
|
||||||
|
from training.train_historical import train_on_historical_data, load_best_checkpoint, save_candles_cache, CACHE_FILE, BEST_DIR
|
||||||
|
from data.data_utils import get_aligned_candle_with_index, get_features_for_tf
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
symbol = 'BTC/USDT'
|
symbol = 'BTC/USDT'
|
||||||
@ -43,5 +52,245 @@ async def main():
|
|||||||
print("Training stopped manually.")
|
print("Training stopped manually.")
|
||||||
finally:
|
finally:
|
||||||
await data_manager.close()
|
await data_manager.close()
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
# Main Asynchronous Function for Training & Charting
|
||||||
|
# -------------------------------------
|
||||||
|
async def main_backtest():
|
||||||
|
symbol = 'BTC/USDT'
|
||||||
|
# Define timeframes: we'll use 5 different ones.
|
||||||
|
timeframes = ["1m", "5m", "15m", "1h", "1d"]
|
||||||
|
now = int(time.time() * 1000)
|
||||||
|
# Use the base timeframe period of 1500 candles. For 1m, that is 1500 minutes.
|
||||||
|
period_ms = 1500 * 60 * 1000
|
||||||
|
since = now - period_ms
|
||||||
|
end_time = now
|
||||||
|
|
||||||
|
# Initialize exchange using MEXC (or your preferred exchange).
|
||||||
|
mexc_api_key = os.environ.get('MEXC_API_KEY', 'YOUR_API_KEY')
|
||||||
|
mexc_api_secret = os.environ.get('MEXC_API_SECRET', 'YOUR_SECRET_KEY')
|
||||||
|
exchange = ccxt.mexc({
|
||||||
|
'apiKey': mexc_api_key,
|
||||||
|
'secret': mexc_api_secret,
|
||||||
|
'enableRateLimit': True,
|
||||||
|
})
|
||||||
|
|
||||||
|
candles_dict = {}
|
||||||
|
for tf in timeframes:
|
||||||
|
print(f"Fetching historical data for timeframe {tf}...")
|
||||||
|
candles = await fetch_historical_data(exchange, symbol, tf, since, end_time, batch_size=500)
|
||||||
|
candles_dict[tf] = candles
|
||||||
|
|
||||||
|
# Optionally, save the multi-timeframe cache.
|
||||||
|
save_candles_cache(CACHE_FILE, candles_dict)
|
||||||
|
|
||||||
|
# Create the backtest environment using multi-timeframe data.
|
||||||
|
env = BacktestEnvironment(candles_dict, base_tf="1m", timeframes=timeframes)
|
||||||
|
|
||||||
|
# Neural Network dimensions: each timeframe produces 7 features.
|
||||||
|
input_dim = len(timeframes) * 7 # 7 features * 5 timeframes = 35.
|
||||||
|
hidden_dim = 128
|
||||||
|
output_dim = 3 # Actions: SELL, HOLD, BUY.
|
||||||
|
|
||||||
|
model = TradingModel(input_dim, hidden_dim, output_dim)
|
||||||
|
optimizer = optim.Adam(model.parameters(), lr=1e-4)
|
||||||
|
replay_buffer = ReplayBuffer(capacity=10000)
|
||||||
|
rl_agent = ContinuousRLAgent(model, optimizer, replay_buffer, batch_size=32, gamma=0.99)
|
||||||
|
|
||||||
|
# Load best checkpoint if available.
|
||||||
|
load_best_checkpoint(model, BEST_DIR)
|
||||||
|
|
||||||
|
# Train the agent over the historical period.
|
||||||
|
num_epochs = 10 # Adjust as needed.
|
||||||
|
train_on_historical_data(env, rl_agent, num_epochs=num_epochs, epsilon=0.1)
|
||||||
|
|
||||||
|
# Run a final simulation (without exploration) to record trade history.
|
||||||
|
state = env.reset(clear_trade_history=True)
|
||||||
|
done = False
|
||||||
|
cumulative_reward = 0.0
|
||||||
|
while not done:
|
||||||
|
action = rl_agent.act(state, epsilon=0.0)
|
||||||
|
state, reward, next_state, done = env.step(action)
|
||||||
|
cumulative_reward += reward
|
||||||
|
state = next_state
|
||||||
|
print("Final simulation cumulative profit:", cumulative_reward)
|
||||||
|
|
||||||
|
# Evaluate trade performance.
|
||||||
|
trades = env.trade_history
|
||||||
|
num_trades = len(trades)
|
||||||
|
num_wins = sum(1 for trade in trades if trade["pnl"] > 0)
|
||||||
|
win_rate = (num_wins / num_trades * 100) if num_trades > 0 else 0.0
|
||||||
|
total_profit = sum(trade["pnl"] for trade in trades)
|
||||||
|
print(f"Total trades: {num_trades}, Wins: {num_wins}, Win rate: {win_rate:.2f}%, Total Profit: {total_profit:.4f}")
|
||||||
|
|
||||||
|
# Plot chart with buy/sell markers on the base timeframe ("1m").
|
||||||
|
plot_trade_history(candles_dict["1m"], trades)
|
||||||
|
|
||||||
|
await exchange.close()
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
# Historical Data Fetching Function (for a given timeframe)
|
||||||
|
# -------------------------------------
|
||||||
|
async def fetch_historical_data(exchange, symbol, timeframe, since, end_time, batch_size=500):
|
||||||
|
candles = []
|
||||||
|
since_ms = since
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
batch = await exchange.fetch_ohlcv(symbol, timeframe=timeframe, since=since_ms, limit=batch_size)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching historical data for {timeframe}:", e)
|
||||||
|
break
|
||||||
|
if not batch:
|
||||||
|
break
|
||||||
|
for c in batch:
|
||||||
|
candle_dict = {
|
||||||
|
'timestamp': c[0],
|
||||||
|
'open': c[1],
|
||||||
|
'high': c[2],
|
||||||
|
'low': c[3],
|
||||||
|
'close': c[4],
|
||||||
|
'volume': c[5]
|
||||||
|
}
|
||||||
|
candles.append(candle_dict)
|
||||||
|
last_timestamp = batch[-1][0]
|
||||||
|
if last_timestamp >= end_time:
|
||||||
|
break
|
||||||
|
since_ms = last_timestamp + 1
|
||||||
|
print(f"Fetched {len(candles)} candles for timeframe {timeframe}.")
|
||||||
|
return candles
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
# Backtest Environment with Multi-Timeframe State
|
||||||
|
# -------------------------------------
|
||||||
|
class BacktestEnvironment:
|
||||||
|
def __init__(self, candles_dict, base_tf="1m", timeframes=None):
|
||||||
|
self.candles_dict = candles_dict # dict of timeframe: candles_list
|
||||||
|
self.base_tf = base_tf
|
||||||
|
if timeframes is None:
|
||||||
|
self.timeframes = [base_tf] # fallback to single timeframe
|
||||||
|
else:
|
||||||
|
self.timeframes = timeframes
|
||||||
|
self.trade_history = [] # record of closed trades
|
||||||
|
self.current_index = 0 # index on base_tf candles
|
||||||
|
self.position = None # active position record
|
||||||
|
|
||||||
|
def reset(self, clear_trade_history=True):
|
||||||
|
self.current_index = 0
|
||||||
|
self.position = None
|
||||||
|
if clear_trade_history:
|
||||||
|
self.trade_history = []
|
||||||
|
return self.get_state(self.current_index)
|
||||||
|
|
||||||
|
def get_state(self, index):
|
||||||
|
"""Construct the state as the concatenated features of all timeframes.
|
||||||
|
For each timeframe, find the aligned candle for the base timeframe’s timestamp."""
|
||||||
|
state_features = []
|
||||||
|
base_candle = self.candles_dict[self.base_tf][index]
|
||||||
|
base_ts = base_candle["timestamp"]
|
||||||
|
for tf in self.timeframes:
|
||||||
|
candles_list = self.candles_dict[tf]
|
||||||
|
# Get the candle from this timeframe that is closest to (and <=) base_ts.
|
||||||
|
aligned_index, _ = get_aligned_candle_with_index(candles_list, base_ts)
|
||||||
|
features = get_features_for_tf(candles_list, aligned_index, period=10)
|
||||||
|
state_features.extend(features)
|
||||||
|
return np.array(state_features, dtype=np.float32)
|
||||||
|
|
||||||
|
def step(self, action):
|
||||||
|
"""
|
||||||
|
Simulate a trading step based on the base timeframe.
|
||||||
|
- If not in a position and action is BUY (2), record entry at next candle's open.
|
||||||
|
- If in a position and action is SELL (0), record exit at next candle's open, computing PnL.
|
||||||
|
Returns: (current_state, reward, next_state, done)
|
||||||
|
"""
|
||||||
|
base_candles = self.candles_dict[self.base_tf]
|
||||||
|
if self.current_index >= len(base_candles) - 1:
|
||||||
|
return self.get_state(self.current_index), 0.0, None, True
|
||||||
|
|
||||||
|
current_state = self.get_state(self.current_index)
|
||||||
|
next_index = self.current_index + 1
|
||||||
|
next_state = self.get_state(next_index)
|
||||||
|
current_candle = base_candles[self.current_index]
|
||||||
|
next_candle = base_candles[next_index]
|
||||||
|
reward = 0.0
|
||||||
|
|
||||||
|
# Action mapping: 0 -> SELL, 1 -> HOLD, 2 -> BUY.
|
||||||
|
if self.position is None:
|
||||||
|
if action == 2: # BUY signal: enter position at next candle's open.
|
||||||
|
entry_price = next_candle["open"]
|
||||||
|
self.position = {"entry_price": entry_price, "entry_index": self.current_index}
|
||||||
|
else:
|
||||||
|
if action == 0: # SELL signal: close position at next candle's open.
|
||||||
|
exit_price = next_candle["open"]
|
||||||
|
reward = exit_price - self.position["entry_price"]
|
||||||
|
trade = {
|
||||||
|
"entry_index": self.position["entry_index"],
|
||||||
|
"entry_price": self.position["entry_price"],
|
||||||
|
"exit_index": next_index,
|
||||||
|
"exit_price": exit_price,
|
||||||
|
"pnl": reward
|
||||||
|
}
|
||||||
|
self.trade_history.append(trade)
|
||||||
|
self.position = None
|
||||||
|
|
||||||
|
self.current_index = next_index
|
||||||
|
done = (self.current_index >= len(base_candles) - 1)
|
||||||
|
return current_state, reward, next_state, done
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
# Chart Plotting: Trade History & PnL
|
||||||
|
# -------------------------------------
|
||||||
|
def plot_trade_history(candles, trade_history):
|
||||||
|
close_prices = [candle["close"] for candle in candles]
|
||||||
|
x = list(range(len(close_prices)))
|
||||||
|
plt.figure(figsize=(12, 6))
|
||||||
|
plt.plot(x, close_prices, label="Close Price", color="black", linewidth=1)
|
||||||
|
|
||||||
|
# Use these flags so that the label "BUY" or "SELL" is only shown once in the legend.
|
||||||
|
buy_label_added = False
|
||||||
|
sell_label_added = False
|
||||||
|
|
||||||
|
for trade in trade_history:
|
||||||
|
in_idx = trade["entry_index"]
|
||||||
|
out_idx = trade["exit_index"]
|
||||||
|
in_price = trade["entry_price"]
|
||||||
|
out_price = trade["exit_price"]
|
||||||
|
pnl = trade["pnl"]
|
||||||
|
|
||||||
|
# Plot BUY marker ("IN")
|
||||||
|
if not buy_label_added:
|
||||||
|
plt.plot(in_idx, in_price, marker="^", color="green", markersize=10, label="BUY (IN)")
|
||||||
|
buy_label_added = True
|
||||||
|
else:
|
||||||
|
plt.plot(in_idx, in_price, marker="^", color="green", markersize=10)
|
||||||
|
plt.text(in_idx, in_price, " IN", color="green", fontsize=8, verticalalignment="bottom")
|
||||||
|
|
||||||
|
# Plot SELL marker ("OUT")
|
||||||
|
if not sell_label_added:
|
||||||
|
plt.plot(out_idx, out_price, marker="v", color="red", markersize=10, label="SELL (OUT)")
|
||||||
|
sell_label_added = True
|
||||||
|
else:
|
||||||
|
plt.plot(out_idx, out_price, marker="v", color="red", markersize=10)
|
||||||
|
plt.text(out_idx, out_price, " OUT", color="red", fontsize=8, verticalalignment="top")
|
||||||
|
|
||||||
|
# Annotate the PnL near the SELL marker.
|
||||||
|
plt.text(out_idx, out_price, f" {pnl:+.2f}", color="blue", fontsize=8, verticalalignment="bottom")
|
||||||
|
|
||||||
|
# Choose line color based on profitability.
|
||||||
|
if pnl > 0:
|
||||||
|
line_color = "green"
|
||||||
|
elif pnl < 0:
|
||||||
|
line_color = "red"
|
||||||
|
else:
|
||||||
|
line_color = "gray"
|
||||||
|
# Draw a dotted line between the buy and sell points.
|
||||||
|
plt.plot([in_idx, out_idx], [in_price, out_price], linestyle="dotted", color=line_color)
|
||||||
|
|
||||||
|
plt.title("Trade History with PnL")
|
||||||
|
plt.xlabel("Base Candle Index (1m)")
|
||||||
|
plt.ylabel("Price")
|
||||||
|
plt.legend()
|
||||||
|
plt.grid(True)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
asyncio.run(main())
|
asyncio.run(main_backtest())
|
||||||
|
17
crypto/gogo/model/trading_model.py
Normal file
17
crypto/gogo/model/trading_model.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# model/trading_model.py
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
class TradingModel(nn.Module):
|
||||||
|
def __init__(self, input_dim, hidden_dim, output_dim):
|
||||||
|
super(TradingModel, self).__init__()
|
||||||
|
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
||||||
|
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
|
||||||
|
self.fc3 = nn.Linear(hidden_dim, output_dim)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = F.relu(self.fc1(x))
|
||||||
|
x = F.relu(self.fc2(x))
|
||||||
|
x = self.fc3(x)
|
||||||
|
return x
|
49
crypto/gogo/training/rl_agent.py
Normal file
49
crypto/gogo/training/rl_agent.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# training/rl_agent.py
|
||||||
|
import random
|
||||||
|
from collections import deque
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
class ContinuousRLAgent:
|
||||||
|
def __init__(self, model, optimizer, replay_buffer, batch_size=32, gamma=0.99):
|
||||||
|
self.model = model
|
||||||
|
self.optimizer = optimizer
|
||||||
|
self.replay_buffer = replay_buffer
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.gamma = gamma
|
||||||
|
|
||||||
|
def act(self, state, epsilon=0.0):
|
||||||
|
"""
|
||||||
|
Select an action based on the state, using an epsilon-greedy policy.
|
||||||
|
"""
|
||||||
|
if random.random() < epsilon:
|
||||||
|
# Exploration: choose a random action.
|
||||||
|
action = np.random.choice([0, 1, 2]) # SELL, HOLD, BUY
|
||||||
|
else:
|
||||||
|
# Exploitation: choose the action with the highest Q-value.
|
||||||
|
with torch.no_grad():
|
||||||
|
state_tensor = torch.tensor(state, dtype=torch.float32).unsqueeze(0)
|
||||||
|
q_values = self.model(state_tensor)
|
||||||
|
action = torch.argmax(q_values).item()
|
||||||
|
return action
|
||||||
|
|
||||||
|
class ReplayBuffer:
|
||||||
|
def __init__(self, capacity):
|
||||||
|
self.buffer = deque(maxlen=capacity)
|
||||||
|
|
||||||
|
def push(self, state, action, reward, next_state, done):
|
||||||
|
"""
|
||||||
|
Store an experience tuple into the replay buffer.
|
||||||
|
"""
|
||||||
|
self.buffer.append((state, action, reward, next_state, done))
|
||||||
|
|
||||||
|
def sample(self, batch_size):
|
||||||
|
"""
|
||||||
|
Randomly sample a batch of experiences from the replay buffer.
|
||||||
|
"""
|
||||||
|
batch = random.sample(self.buffer, batch_size)
|
||||||
|
states, actions, rewards, next_states, dones = zip(*batch)
|
||||||
|
return states, actions, rewards, next_states, dones
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.buffer)
|
194
crypto/gogo/training/train_historical.py
Normal file
194
crypto/gogo/training/train_historical.py
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
# training/train_historical.py
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
from model.trading_model import TradingModel
|
||||||
|
from data.data_utils import get_aligned_candle_with_index, get_features_for_tf
|
||||||
|
|
||||||
|
# --- Directories for saving models ---
|
||||||
|
LAST_DIR = os.path.join("models", "last")
|
||||||
|
BEST_DIR = os.path.join("models", "best")
|
||||||
|
os.makedirs(LAST_DIR, exist_ok=True)
|
||||||
|
os.makedirs(BEST_DIR, exist_ok=True)
|
||||||
|
|
||||||
|
# --- File for saving candles cache ---
|
||||||
|
CACHE_FILE = "candles_cache.json"
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
# Checkpoint Functions (same as before)
|
||||||
|
# -------------------------------------
|
||||||
|
def maintain_checkpoint_directory(directory, max_files=10):
|
||||||
|
files = os.listdir(directory)
|
||||||
|
if len(files) > max_files:
|
||||||
|
full_paths = [os.path.join(directory, f) for f in files]
|
||||||
|
full_paths.sort(key=lambda x: os.path.getmtime(x))
|
||||||
|
for f in full_paths[: len(files) - max_files]:
|
||||||
|
os.remove(f)
|
||||||
|
|
||||||
|
def get_best_models(directory):
|
||||||
|
best_files = []
|
||||||
|
for file in os.listdir(directory):
|
||||||
|
parts = file.split("_")
|
||||||
|
try:
|
||||||
|
r = float(parts[1])
|
||||||
|
best_files.append((r, file))
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return best_files
|
||||||
|
|
||||||
|
def save_checkpoint(model, epoch, total_loss, last_dir=LAST_DIR, best_dir=BEST_DIR):
|
||||||
|
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
||||||
|
last_filename = f"model_last_epoch_{epoch}_{timestamp}.pt"
|
||||||
|
last_path = os.path.join(last_dir, last_filename)
|
||||||
|
torch.save({
|
||||||
|
"epoch": epoch,
|
||||||
|
"total_loss": total_loss,
|
||||||
|
"model_state_dict": model.state_dict()
|
||||||
|
}, last_path)
|
||||||
|
maintain_checkpoint_directory(last_dir, max_files=10)
|
||||||
|
|
||||||
|
best_models = get_best_models(best_dir)
|
||||||
|
add_to_best = False
|
||||||
|
if len(best_models) < 10:
|
||||||
|
add_to_best = True
|
||||||
|
else:
|
||||||
|
min_loss, min_file = min(best_models, key=lambda x: x[0])
|
||||||
|
if total_loss < min_loss:
|
||||||
|
add_to_best = True
|
||||||
|
os.remove(os.path.join(best_dir, min_file))
|
||||||
|
if add_to_best:
|
||||||
|
best_filename = f"best_{total_loss:.4f}_epoch_{epoch}_{timestamp}.pt"
|
||||||
|
best_path = os.path.join(best_dir, best_filename)
|
||||||
|
torch.save({
|
||||||
|
"epoch": epoch,
|
||||||
|
"total_loss": total_loss,
|
||||||
|
"model_state_dict": model.state_dict()
|
||||||
|
}, best_path)
|
||||||
|
maintain_checkpoint_directory(best_dir, max_files=10)
|
||||||
|
print(f"Saved checkpoint for epoch {epoch} with loss {total_loss:.4f}")
|
||||||
|
|
||||||
|
def load_best_checkpoint(model, best_dir=BEST_DIR):
|
||||||
|
best_models = get_best_models(best_dir)
|
||||||
|
if not best_models:
|
||||||
|
return None
|
||||||
|
best_loss, best_file = min(best_models, key=lambda x: x[0]) #changed to min to represent the loss
|
||||||
|
path = os.path.join(best_dir, best_file)
|
||||||
|
print(f"Loading best model from checkpoint: {best_file} with loss {best_loss:.4f}")
|
||||||
|
checkpoint = torch.load(path)
|
||||||
|
model.load_state_dict(checkpoint["model_state_dict"])
|
||||||
|
return checkpoint
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
# Training Loop on Historical Data
|
||||||
|
# -------------------------------------
|
||||||
|
def train_on_historical_data(env, rl_agent, num_epochs=10, epsilon=0.1):
|
||||||
|
"""
|
||||||
|
Train the RL agent on historical data using the backtest environment.
|
||||||
|
"""
|
||||||
|
model = rl_agent.model
|
||||||
|
optimizer = rl_agent.optimizer
|
||||||
|
replay_buffer = rl_agent.replay_buffer
|
||||||
|
batch_size = rl_agent.batch_size
|
||||||
|
gamma = rl_agent.gamma
|
||||||
|
|
||||||
|
model.train()
|
||||||
|
criterion = nn.MSELoss() # or another suitable loss
|
||||||
|
|
||||||
|
for epoch in range(num_epochs):
|
||||||
|
state = env.reset()
|
||||||
|
done = False
|
||||||
|
total_reward = 0
|
||||||
|
total_loss = 0
|
||||||
|
|
||||||
|
while not done:
|
||||||
|
# Agent takes action (with exploration).
|
||||||
|
action = rl_agent.act(state, epsilon=epsilon)
|
||||||
|
current_state, reward, next_state, done = env.step(action)
|
||||||
|
total_reward += reward
|
||||||
|
|
||||||
|
# Store experience in replay buffer.
|
||||||
|
replay_buffer.push(state, action, reward, next_state, done)
|
||||||
|
|
||||||
|
# Train on a batch from the replay buffer.
|
||||||
|
if len(replay_buffer) > batch_size:
|
||||||
|
# Sample a batch from the replay buffer.
|
||||||
|
states, actions, rewards, next_states, dones = replay_buffer.sample(batch_size)
|
||||||
|
|
||||||
|
# Convert data to PyTorch tensors.
|
||||||
|
states = torch.tensor(states, dtype=torch.float32)
|
||||||
|
actions = torch.tensor(actions, dtype=torch.float32)
|
||||||
|
rewards = torch.tensor(rewards, dtype=torch.float32)
|
||||||
|
next_states = torch.tensor(next_states, dtype=torch.float32)
|
||||||
|
dones = torch.tensor(dones, dtype=torch.float32)
|
||||||
|
|
||||||
|
# Compute Q-values for current states.
|
||||||
|
q_values = model(states)
|
||||||
|
|
||||||
|
# Compute Q-values for next states.
|
||||||
|
next_q_values = model(next_states)
|
||||||
|
|
||||||
|
# Compute the TD target.
|
||||||
|
td_target = rewards + gamma * torch.max(next_q_values, dim=1)[0] * (1 - dones)
|
||||||
|
|
||||||
|
# Compute the loss.
|
||||||
|
loss = criterion(q_values.gather(1, actions.long().unsqueeze(1)).squeeze(), td_target)
|
||||||
|
|
||||||
|
# Optimize the model.
|
||||||
|
optimizer.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
total_loss += loss.item()
|
||||||
|
|
||||||
|
# Move to the next state.
|
||||||
|
state = next_state
|
||||||
|
|
||||||
|
print(f"Epoch {epoch + 1}/{num_epochs}, Total Reward: {total_reward:.4f}, Loss: {total_loss:.4f}")
|
||||||
|
save_checkpoint(model, epoch, total_loss, LAST_DIR, BEST_DIR)
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
# Caching Functions (for candles data)
|
||||||
|
# -------------------------------------
|
||||||
|
def save_candles_cache(filename, candles_dict):
|
||||||
|
"""
|
||||||
|
Save the candles data to a JSON file.
|
||||||
|
"""
|
||||||
|
# Convert numpy arrays to lists for JSON serialization.
|
||||||
|
serializable_candles_dict = {}
|
||||||
|
for timeframe, candles in candles_dict.items():
|
||||||
|
serializable_candles = []
|
||||||
|
for candle in candles:
|
||||||
|
serializable_candle = {
|
||||||
|
'timestamp': candle['timestamp'],
|
||||||
|
'open': candle['open'],
|
||||||
|
'high': candle['high'],
|
||||||
|
'low': candle['low'],
|
||||||
|
'close': candle['close'],
|
||||||
|
'volume': candle['volume']
|
||||||
|
}
|
||||||
|
serializable_candles.append(serializable_candle)
|
||||||
|
serializable_candles_dict[timeframe] = serializable_candles
|
||||||
|
|
||||||
|
with open(filename, 'w') as f:
|
||||||
|
json.dump(serializable_candles_dict, f)
|
||||||
|
|
||||||
|
def load_candles_cache(filename):
|
||||||
|
"""
|
||||||
|
Load the candles data from a JSON file.
|
||||||
|
"""
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
candles_dict = json.load(f)
|
||||||
|
|
||||||
|
# Convert lists back to numpy arrays.
|
||||||
|
for timeframe, candles in candles_dict.items():
|
||||||
|
for candle in candles:
|
||||||
|
candle['open'] = float(candle['open'])
|
||||||
|
candle['high'] = float(candle['high'])
|
||||||
|
candle['low'] = float(candle['low'])
|
||||||
|
candle['close'] = float(candle['close'])
|
||||||
|
candle['volume'] = float(candle['volume'])
|
||||||
|
return candles_dict
|
Loading…
x
Reference in New Issue
Block a user