This commit is contained in:
Dobromir Popov 2025-03-10 10:42:48 +02:00
parent 9b6d3f94ed
commit 4be322622e
3 changed files with 82 additions and 62 deletions

View File

@ -875,76 +875,83 @@ async def main():
parser.add_argument('--demo', action='store_true', help='Run in demo mode (no real trading)')
args = parser.parse_args()
# Initialize exchange
exchange = ccxt.mexc({
# Initialize exchange with async support
exchange_id = 'mexc'
exchange_class = getattr(ccxt.async_support, exchange_id)
exchange = exchange_class({
'apiKey': MEXC_API_KEY,
'secret': MEXC_SECRET_KEY,
'enableRateLimit': True,
})
# Create environment
env = TradingEnvironment(
exchange=exchange,
symbol="ETH/USDT",
timeframe="1m",
leverage=MAX_LEVERAGE,
initial_balance=INITIAL_BALANCE,
is_demo=args.demo or args.mode != 'live' # Only trade for real in live mode
)
# Fetch initial data
await env.fetch_initial_data()
# Create agent
agent = Agent(state_size=STATE_SIZE, action_size=env.action_space)
# Try to load existing model
model_loaded = agent.load()
if not model_loaded and args.mode in ['eval', 'live']:
logger.warning("No pre-trained model found. Consider training first!")
if args.mode == 'train':
# Training mode
logger.info("Starting training mode")
await train_agent(agent, env, num_episodes=args.episodes)
try:
# Create environment
env = TradingEnvironment(
exchange=exchange,
symbol="ETH/USDT",
timeframe="1m",
leverage=MAX_LEVERAGE,
initial_balance=INITIAL_BALANCE,
is_demo=args.demo or args.mode != 'live'
)
elif args.mode == 'eval':
# Evaluation mode
logger.info("Starting evaluation mode")
eval_reward, eval_profit, win_rate = evaluate_agent(agent, env, num_episodes=args.episodes)
# Fetch initial data
await env.fetch_initial_data()
elif args.mode == 'live':
# Live trading mode
logger.info("Starting live trading mode with real-time data")
logger.info(f"Demo mode: {args.demo}")
# Create agent
agent = Agent(state_size=STATE_SIZE, action_size=env.action_space)
# Live trading loop
async for candle in get_live_prices("ETH/USDT", "1m"):
# Update environment with new data
await env._update_with_new_data(candle)
# Try to load existing model
model_loaded = agent.load()
if not model_loaded and args.mode in ['eval', 'live']:
logger.warning("No pre-trained model found. Consider training first!")
if args.mode == 'train':
# Training mode
logger.info("Starting training mode")
await train_agent(agent, env, num_episodes=args.episodes)
# Only trade if we have enough data
if len(env.data) >= env.window_size:
# Get current state
state = env.get_state()
elif args.mode == 'eval':
# Evaluation mode
logger.info("Starting evaluation mode")
eval_reward, eval_profit, win_rate = evaluate_agent(agent, env, num_episodes=args.episodes)
elif args.mode == 'live':
# Live trading mode
logger.info("Starting live trading mode with real-time data")
logger.info(f"Demo mode: {args.demo}")
# Live trading loop
async for candle in get_live_prices("ETH/USDT", "1m"):
# Update environment with new data
await env._update_with_new_data(candle)
# Select action (no exploration in live trading)
action = agent.select_action(state, training=False)
# Convert action number to readable format
action_names = ["HOLD", "BUY", "SELL", "CLOSE"]
logger.info(f"Price: ${candle['close']:.2f} | Action: {action_names[action]}")
# Take action
_, reward, _ = env.step(action)
# Print statistics
if len(env.trades) > 0:
wins = sum(1 for trade in env.trades if trade.get('pnl_percent', 0) > 0)
win_rate = wins / len(env.trades) * 100
total_pnl = sum(trade.get('pnl_dollar', 0) for trade in env.trades)
logger.info(f"Balance: ${env.balance:.2f} | Trades: {len(env.trades)} | "
f"Win Rate: {win_rate:.1f}% | Total PnL: ${total_pnl:.2f}")
# Only trade if we have enough data
if len(env.data) >= env.window_size:
# Get current state
state = env.get_state()
# Select action (no exploration in live trading)
action = agent.select_action(state, training=False)
# Convert action number to readable format
action_names = ["HOLD", "BUY", "SELL", "CLOSE"]
logger.info(f"Price: ${candle['close']:.2f} | Action: {action_names[action]}")
# Take action
_, reward, _ = env.step(action)
# Print statistics
if len(env.trades) > 0:
wins = sum(1 for trade in env.trades if trade.get('pnl_percent', 0) > 0)
win_rate = wins / len(env.trades) * 100
total_pnl = sum(trade.get('pnl_dollar', 0) for trade in env.trades)
logger.info(f"Balance: ${env.balance:.2f} | Trades: {len(env.trades)} | "
f"Win Rate: {win_rate:.1f}% | Total PnL: ${total_pnl:.2f}")
finally:
# Clean up exchange connection
await exchange.close()
if __name__ == "__main__":
try:

View File

@ -66,12 +66,21 @@ python main.py --mode eval --episodes 10
### Live Trading Mode
```bash
# Demo mode (no real trades)
# Demo mode (simulated trading with real market data)
python main.py --mode live --demo
# Real trading
# Real trading (actual trades on MEXC)
python main.py --mode live
```
Demo mode simulates trading using real-time market data but does not execute actual trades. It still:
- Logs all trading decisions and performance metrics
- Updates the model based on market data (if in training mode)
- Displays real-time analytics and position information
- Calculates theoretical profits/losses
- Saves performance data to TensorBoard
This makes it perfect for testing strategies without financial risk.
## Configuration

View File

@ -0,0 +1,4 @@
2025-03-10 10:31:19,097 - INFO - Fetching initial 60 candles for ETH/USDT...
2025-03-10 10:31:24,545 - ERROR - Error fetching initial data: object list can't be used in 'await' expression
2025-03-10 10:38:32,233 - INFO - Fetching initial 60 candles for ETH/USDT...
2025-03-10 10:38:38,055 - ERROR - Error fetching initial data: object list can't be used in 'await' expression