enhancements

This commit is contained in:
Dobromir Popov
2025-04-01 13:46:53 +03:00
parent a46b2c74f8
commit 73c5ecb0d2
17 changed files with 2279 additions and 736 deletions

88
main.py
View File

@ -56,7 +56,7 @@ websocket_logger.setLevel(logging.INFO) # Change this from DEBUG to INFO
class WebSocketFilter(logging.Filter):
def filter(self, record):
# Filter out DEBUG messages from WebSocket-related modules
if record.levelno == logging.DEBUG and ('websocket' in record.name or
if record.levelno == logging.INFO and ('websocket' in record.name or
'protocol' in record.name or
'realtime' in record.name):
return False
@ -331,7 +331,7 @@ def main():
"""Main function for the trading bot."""
# Parse command-line arguments
parser = argparse.ArgumentParser(description="Trading Bot with Neural Network Integration")
parser.add_argument('--symbols', nargs='+', default=["BTC/USDT", "ETH/USDT"],
parser.add_argument('--symbols', nargs='+', default=["ETH/USDT", "ETH/USDT"],
help='Trading symbols to monitor')
parser.add_argument('--timeframes', nargs='+', default=["1m", "5m", "1h", "4h", "1d"],
help='Timeframes to monitor')
@ -692,11 +692,17 @@ if __name__ == "__main__":
"""Calculate reward for the given action with aggressive rewards for profitable trades and volume/price action signals"""
reward = 0
# Base reward for actions
if action == 0: # HOLD
reward = -0.05 # Increased penalty for doing nothing to encourage more trading
elif action == 1: # BUY/LONG
# Validate current price
if self.current_price <= 0 or self.current_price > 1000000: # Reasonable price range
logger.error(f"Invalid current price: {self.current_price}")
return -10.0 # Strong penalty for invalid price
# Validate position size
if self.position_size <= 0 or self.position_size > 1000000: # Reasonable position size range
logger.error(f"Invalid position size: {self.position_size}")
return -10.0 # Strong penalty for invalid position size
if action == 1: # BUY/LONG
if self.position == 'flat':
# Opening a long position
self.position = 'long'
@ -706,12 +712,11 @@ if __name__ == "__main__":
self.stop_loss = self.entry_price * (1 - self.stop_loss_pct/100)
self.take_profit = self.entry_price * (1 + self.take_profit_pct/100)
# Check if this is an optimal buy point (bottom)
current_idx = len(self.features['price']) - 1
if hasattr(self, 'optimal_bottoms') and current_idx in self.optimal_bottoms:
reward += 3.0 # Increased bonus for buying at a bottom
# Check if this is an optimal buy point
if hasattr(self, 'optimal_bottoms') and self.entry_index in self.optimal_bottoms:
reward += 2.0 # Bonus for buying at a bottom
# Check for volume spike (indicating potential big movement)
# Check for volume spike
if len(self.features['volume']) > 5:
avg_volume = np.mean(self.features['volume'][-5:-1])
current_volume = self.features['volume'][-1]
@ -737,9 +742,20 @@ if __name__ == "__main__":
pnl_percent = (self.entry_price - self.current_price) / self.entry_price * 100
pnl_dollar = pnl_percent / 100 * self.position_size
# Validate PnL values
if abs(pnl_percent) > 100: # Max 100% loss/gain
logger.error(f"Invalid PnL percentage: {pnl_percent}")
pnl_percent = max(min(pnl_percent, 100), -100)
pnl_dollar = pnl_percent / 100 * self.position_size
# Apply fees
pnl_dollar -= self.calculate_fees(self.position_size)
# Update balance with validation
if abs(pnl_dollar) > self.balance * 2: # Max 200% of balance
logger.error(f"Invalid PnL dollar amount: {pnl_dollar}")
pnl_dollar = max(min(pnl_dollar, self.balance * 2), -self.balance * 2)
# Update balance
self.balance += pnl_dollar
self.total_pnl += pnl_dollar
@ -758,11 +774,11 @@ if __name__ == "__main__":
# Reward based on PnL with stronger penalties for losses
if pnl_dollar > 0:
reward += 1.0 + pnl_dollar / 10 # Positive reward for profit
reward += 1.0 + min(pnl_dollar / 10, 5.0) # Cap positive reward at 5.0
self.win_count += 1
else:
# Stronger penalty for losses, scaled by the size of the loss
loss_penalty = 1.0 + abs(pnl_dollar) / 5
# Stronger penalty for losses, scaled by the size of the loss but capped
loss_penalty = min(1.0 + abs(pnl_dollar) / 5, 5.0)
reward -= loss_penalty
self.loss_count += 1
@ -2115,11 +2131,17 @@ class TradingEnvironment:
"""Calculate reward for the given action with aggressive rewards for profitable trades and volume/price action signals"""
reward = 0
# Base reward for actions
if action == 0: # HOLD
reward = -0.05 # Increased penalty for doing nothing to encourage more trading
elif action == 1: # BUY/LONG
# Validate current price
if self.current_price <= 0 or self.current_price > 1000000: # Reasonable price range
logger.error(f"Invalid current price: {self.current_price}")
return -10.0 # Strong penalty for invalid price
# Validate position size
if self.position_size <= 0 or self.position_size > 1000000: # Reasonable position size range
logger.error(f"Invalid position size: {self.position_size}")
return -10.0 # Strong penalty for invalid position size
if action == 1: # BUY/LONG
if self.position == 'flat':
# Opening a long position
self.position = 'long'
@ -2129,12 +2151,11 @@ class TradingEnvironment:
self.stop_loss = self.entry_price * (1 - self.stop_loss_pct/100)
self.take_profit = self.entry_price * (1 + self.take_profit_pct/100)
# Check if this is an optimal buy point (bottom)
current_idx = len(self.features['price']) - 1
if hasattr(self, 'optimal_bottoms') and current_idx in self.optimal_bottoms:
reward += 3.0 # Increased bonus for buying at a bottom
# Check if this is an optimal buy point
if hasattr(self, 'optimal_bottoms') and self.entry_index in self.optimal_bottoms:
reward += 2.0 # Bonus for buying at a bottom
# Check for volume spike (indicating potential big movement)
# Check for volume spike
if len(self.features['volume']) > 5:
avg_volume = np.mean(self.features['volume'][-5:-1])
current_volume = self.features['volume'][-1]
@ -2160,9 +2181,20 @@ class TradingEnvironment:
pnl_percent = (self.entry_price - self.current_price) / self.entry_price * 100
pnl_dollar = pnl_percent / 100 * self.position_size
# Validate PnL values
if abs(pnl_percent) > 100: # Max 100% loss/gain
logger.error(f"Invalid PnL percentage: {pnl_percent}")
pnl_percent = max(min(pnl_percent, 100), -100)
pnl_dollar = pnl_percent / 100 * self.position_size
# Apply fees
pnl_dollar -= self.calculate_fees(self.position_size)
# Update balance with validation
if abs(pnl_dollar) > self.balance * 2: # Max 200% of balance
logger.error(f"Invalid PnL dollar amount: {pnl_dollar}")
pnl_dollar = max(min(pnl_dollar, self.balance * 2), -self.balance * 2)
# Update balance
self.balance += pnl_dollar
self.total_pnl += pnl_dollar
@ -2181,11 +2213,11 @@ class TradingEnvironment:
# Reward based on PnL with stronger penalties for losses
if pnl_dollar > 0:
reward += 1.0 + pnl_dollar / 10 # Positive reward for profit
reward += 1.0 + min(pnl_dollar / 10, 5.0) # Cap positive reward at 5.0
self.win_count += 1
else:
# Stronger penalty for losses, scaled by the size of the loss
loss_penalty = 1.0 + abs(pnl_dollar) / 5
# Stronger penalty for losses, scaled by the size of the loss but capped
loss_penalty = min(1.0 + abs(pnl_dollar) / 5, 5.0)
reward -= loss_penalty
self.loss_count += 1