fix checkpoint model loading
This commit is contained in:
parent
79c51c0d5d
commit
0ebf4e13bd
File diff suppressed because one or more lines are too long
@ -47,7 +47,7 @@ def save_candles_cache(filename, candles_dict):
|
|||||||
print("Error saving cache file:", e)
|
print("Error saving cache file:", e)
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
# Checkpoint Functions (same as before)
|
# Checkpoint Functions
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def maintain_checkpoint_directory(directory, max_files=10):
|
def maintain_checkpoint_directory(directory, max_files=10):
|
||||||
files = os.listdir(directory)
|
files = os.listdir(directory)
|
||||||
@ -100,6 +100,8 @@ def save_checkpoint(model, epoch, reward, last_dir=LAST_DIR, best_dir=BEST_DIR):
|
|||||||
print(f"Saved checkpoint for epoch {epoch} with reward {reward:.4f}")
|
print(f"Saved checkpoint for epoch {epoch} with reward {reward:.4f}")
|
||||||
|
|
||||||
def load_best_checkpoint(model, best_dir=BEST_DIR):
|
def load_best_checkpoint(model, best_dir=BEST_DIR):
|
||||||
|
"""Attempt to load the best checkpoint. If the architecture is different,
|
||||||
|
catch the RuntimeError and skip loading."""
|
||||||
best_models = get_best_models(best_dir)
|
best_models = get_best_models(best_dir)
|
||||||
if not best_models:
|
if not best_models:
|
||||||
return None
|
return None
|
||||||
@ -107,7 +109,13 @@ def load_best_checkpoint(model, best_dir=BEST_DIR):
|
|||||||
path = os.path.join(best_dir, best_file)
|
path = os.path.join(best_dir, best_file)
|
||||||
print(f"Loading best model from checkpoint: {best_file} with reward {best_reward:.4f}")
|
print(f"Loading best model from checkpoint: {best_file} with reward {best_reward:.4f}")
|
||||||
checkpoint = torch.load(path)
|
checkpoint = torch.load(path)
|
||||||
|
try:
|
||||||
model.load_state_dict(checkpoint["model_state_dict"])
|
model.load_state_dict(checkpoint["model_state_dict"])
|
||||||
|
except RuntimeError as e:
|
||||||
|
print("Warning: Failed to load best checkpoint due to:")
|
||||||
|
print(e)
|
||||||
|
print("This is likely due to a change in model architecture. Skipping checkpoint load.")
|
||||||
|
return None
|
||||||
return checkpoint
|
return checkpoint
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
@ -283,7 +291,6 @@ class BacktestEnvironment:
|
|||||||
base_ts = base_candle["timestamp"]
|
base_ts = base_candle["timestamp"]
|
||||||
for tf in self.timeframes:
|
for tf in self.timeframes:
|
||||||
candles_list = self.candles_dict[tf]
|
candles_list = self.candles_dict[tf]
|
||||||
# Get the candle from this timeframe that is closest to (and <=) base_ts.
|
|
||||||
aligned_index, _ = get_aligned_candle_with_index(candles_list, base_ts)
|
aligned_index, _ = get_aligned_candle_with_index(candles_list, base_ts)
|
||||||
features = get_features_for_tf(candles_list, aligned_index, period=10)
|
features = get_features_for_tf(candles_list, aligned_index, period=10)
|
||||||
state_features.extend(features)
|
state_features.extend(features)
|
||||||
@ -392,15 +399,15 @@ def train_on_historical_data(env, rl_agent, num_epochs=10, epsilon=0.1):
|
|||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
async def main_backtest():
|
async def main_backtest():
|
||||||
symbol = 'BTC/USDT'
|
symbol = 'BTC/USDT'
|
||||||
# Define timeframes: we'll use 5 different ones.
|
# Define timeframes: 5 different ones.
|
||||||
timeframes = ["1m", "5m", "15m", "1h", "1d"]
|
timeframes = ["1m", "5m", "15m", "1h", "1d"]
|
||||||
now = int(time.time() * 1000)
|
now = int(time.time() * 1000)
|
||||||
# Use the base timeframe period of 1500 candles. For 1m, that is 1500 minutes.
|
# For base timeframe 1m, get 1500 candles (1500 minutes)
|
||||||
period_ms = 1500 * 60 * 1000
|
period_ms = 1500 * 60 * 1000
|
||||||
since = now - period_ms
|
since = now - period_ms
|
||||||
end_time = now
|
end_time = now
|
||||||
|
|
||||||
# Initialize exchange using MEXC (or your preferred exchange).
|
# Initialize exchange using MEXC
|
||||||
mexc_api_key = os.environ.get('MEXC_API_KEY', 'YOUR_API_KEY')
|
mexc_api_key = os.environ.get('MEXC_API_KEY', 'YOUR_API_KEY')
|
||||||
mexc_api_secret = os.environ.get('MEXC_API_SECRET', 'YOUR_SECRET_KEY')
|
mexc_api_secret = os.environ.get('MEXC_API_SECRET', 'YOUR_SECRET_KEY')
|
||||||
exchange = ccxt.mexc({
|
exchange = ccxt.mexc({
|
||||||
@ -409,6 +416,7 @@ async def main_backtest():
|
|||||||
'enableRateLimit': True,
|
'enableRateLimit': True,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
try:
|
||||||
candles_dict = {}
|
candles_dict = {}
|
||||||
for tf in timeframes:
|
for tf in timeframes:
|
||||||
print(f"Fetching historical data for timeframe {tf}...")
|
print(f"Fetching historical data for timeframe {tf}...")
|
||||||
@ -421,8 +429,8 @@ async def main_backtest():
|
|||||||
# Create the backtest environment using multi-timeframe data.
|
# Create the backtest environment using multi-timeframe data.
|
||||||
env = BacktestEnvironment(candles_dict, base_tf="1m", timeframes=timeframes)
|
env = BacktestEnvironment(candles_dict, base_tf="1m", timeframes=timeframes)
|
||||||
|
|
||||||
# Neural Network dimensions: each timeframe produces 7 features.
|
# Neural network dimensions: each timeframe produces 7 features.
|
||||||
input_dim = len(timeframes) * 7 # 7 features * 5 timeframes = 35.
|
input_dim = len(timeframes) * 7 # 7 features x 5 timeframes = 35.
|
||||||
hidden_dim = 128
|
hidden_dim = 128
|
||||||
output_dim = 3 # Actions: SELL, HOLD, BUY.
|
output_dim = 3 # Actions: SELL, HOLD, BUY.
|
||||||
|
|
||||||
@ -431,7 +439,7 @@ async def main_backtest():
|
|||||||
replay_buffer = ReplayBuffer(capacity=10000)
|
replay_buffer = ReplayBuffer(capacity=10000)
|
||||||
rl_agent = ContinuousRLAgent(model, optimizer, replay_buffer, batch_size=32, gamma=0.99)
|
rl_agent = ContinuousRLAgent(model, optimizer, replay_buffer, batch_size=32, gamma=0.99)
|
||||||
|
|
||||||
# Load best checkpoint if available.
|
# Load best checkpoint if available. (In case of architecture change, it will be skipped.)
|
||||||
load_best_checkpoint(model, BEST_DIR)
|
load_best_checkpoint(model, BEST_DIR)
|
||||||
|
|
||||||
# Train the agent over the historical period.
|
# Train the agent over the historical period.
|
||||||
@ -459,7 +467,8 @@ async def main_backtest():
|
|||||||
|
|
||||||
# Plot chart with buy/sell markers on the base timeframe ("1m").
|
# Plot chart with buy/sell markers on the base timeframe ("1m").
|
||||||
plot_trade_history(candles_dict["1m"], trades)
|
plot_trade_history(candles_dict["1m"], trades)
|
||||||
|
finally:
|
||||||
|
# Ensure that exchange resources are released even if errors occur.
|
||||||
await exchange.close()
|
await exchange.close()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
Loading…
x
Reference in New Issue
Block a user