new_2
This commit is contained in:
344
main_clean.py
344
main_clean.py
@ -2,15 +2,16 @@
|
||||
"""
|
||||
Clean Trading System - Main Entry Point
|
||||
|
||||
This is the new clean entry point that demonstrates the consolidated architecture:
|
||||
- Single configuration system
|
||||
- Clean data provider
|
||||
- Modular CNN and RL components
|
||||
- Centralized orchestrator
|
||||
- Simple web dashboard
|
||||
Unified entry point for the clean trading architecture with these modes:
|
||||
- test: Test data provider and orchestrator
|
||||
- cnn: Train CNN models only
|
||||
- rl: Train RL agents only
|
||||
- train: Train both CNN and RL models
|
||||
- trade: Live trading mode
|
||||
- web: Web dashboard with real-time charts
|
||||
|
||||
Usage:
|
||||
python main_clean.py --mode [train|trade|web] --symbol ETH/USDT
|
||||
python main_clean.py --mode [test|cnn|rl|train|trade|web] --symbol ETH/USDT
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@ -32,15 +33,15 @@ from core.orchestrator import TradingOrchestrator
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def run_data_test():
|
||||
"""Test the data provider functionality"""
|
||||
"""Test the enhanced data provider functionality"""
|
||||
try:
|
||||
config = get_config()
|
||||
logger.info("Testing Data Provider...")
|
||||
logger.info("Testing Enhanced Data Provider...")
|
||||
|
||||
# Test data provider
|
||||
# Test data provider with multiple timeframes
|
||||
data_provider = DataProvider(
|
||||
symbols=['ETH/USDT'],
|
||||
timeframes=['1h', '4h']
|
||||
timeframes=['1s', '1m', '1h', '4h'] # Include 1s for scalping
|
||||
)
|
||||
|
||||
# Test historical data
|
||||
@ -48,24 +49,32 @@ def run_data_test():
|
||||
df = data_provider.get_historical_data('ETH/USDT', '1h', limit=100)
|
||||
if df is not None:
|
||||
logger.info(f"[SUCCESS] Historical data: {len(df)} candles loaded")
|
||||
logger.info(f" Columns: {list(df.columns)}")
|
||||
logger.info(f" Columns: {len(df.columns)} total")
|
||||
logger.info(f" Date range: {df['timestamp'].min()} to {df['timestamp'].max()}")
|
||||
|
||||
# Show indicator breakdown
|
||||
basic_cols = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
|
||||
indicators = [col for col in df.columns if col not in basic_cols]
|
||||
logger.info(f" Technical indicators: {len(indicators)}")
|
||||
else:
|
||||
logger.error("[FAILED] Failed to load historical data")
|
||||
|
||||
# Test feature matrix
|
||||
logger.info("Testing feature matrix...")
|
||||
feature_matrix = data_provider.get_feature_matrix('ETH/USDT', ['1h'], window_size=20)
|
||||
# Test multi-timeframe feature matrix
|
||||
logger.info("Testing multi-timeframe feature matrix...")
|
||||
feature_matrix = data_provider.get_feature_matrix('ETH/USDT', ['1h', '4h'], window_size=20)
|
||||
if feature_matrix is not None:
|
||||
logger.info(f"[SUCCESS] Feature matrix shape: {feature_matrix.shape}")
|
||||
logger.info(f" Timeframes: {feature_matrix.shape[0]}")
|
||||
logger.info(f" Window size: {feature_matrix.shape[1]}")
|
||||
logger.info(f" Features: {feature_matrix.shape[2]}")
|
||||
else:
|
||||
logger.error("[FAILED] Failed to create feature matrix")
|
||||
|
||||
# Test health check
|
||||
health = data_provider.health_check()
|
||||
logger.info(f"[SUCCESS] Data provider health: {health}")
|
||||
logger.info(f"[SUCCESS] Data provider health check completed")
|
||||
|
||||
logger.info("Data provider test completed successfully!")
|
||||
logger.info("Enhanced data provider test completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in data test: {e}")
|
||||
@ -73,157 +82,161 @@ def run_data_test():
|
||||
logger.error(traceback.format_exc())
|
||||
raise
|
||||
|
||||
def run_orchestrator_test():
|
||||
"""Test the modular orchestrator system"""
|
||||
def run_cnn_training():
|
||||
"""Train CNN models only"""
|
||||
try:
|
||||
from models import get_model_registry, ModelInterface
|
||||
import numpy as np
|
||||
import torch
|
||||
logger.info("Starting CNN Training Mode...")
|
||||
|
||||
logger.info("Testing Modular Orchestrator System...")
|
||||
|
||||
# Test model registry
|
||||
registry = get_model_registry()
|
||||
logger.info(f"[SUCCESS] Model registry initialized with {registry.total_memory_limit_mb}MB limit")
|
||||
|
||||
# Create a mock model for testing
|
||||
class MockCNNModel(ModelInterface):
|
||||
def __init__(self):
|
||||
config = {'max_memory_mb': 500} # 500MB limit
|
||||
super().__init__('MockCNN', config)
|
||||
self.model_params = torch.randn(1000, 100) # Small mock model
|
||||
|
||||
def predict(self, features):
|
||||
# Mock prediction: random but consistent
|
||||
np.random.seed(42)
|
||||
action_probs = np.random.dirichlet([1, 1, 1]) # Random probabilities that sum to 1
|
||||
confidence = np.random.uniform(0.5, 0.9)
|
||||
return action_probs, confidence
|
||||
|
||||
def get_memory_usage(self):
|
||||
# Estimate memory usage
|
||||
if hasattr(self, 'model_params'):
|
||||
return int(self.model_params.numel() * 4 / (1024*1024)) # 4 bytes per float, convert to MB
|
||||
return 0
|
||||
|
||||
class MockRLAgent(ModelInterface):
|
||||
def __init__(self):
|
||||
config = {'max_memory_mb': 300} # 300MB limit
|
||||
super().__init__('MockRL', config)
|
||||
self.q_network = torch.randn(500, 50) # Smaller mock RL model
|
||||
|
||||
def predict(self, features):
|
||||
# Mock RL prediction
|
||||
np.random.seed(123)
|
||||
action_probs = np.random.dirichlet([2, 1, 2]) # Favor BUY/SELL over HOLD
|
||||
confidence = np.random.uniform(0.6, 0.8)
|
||||
return action_probs, confidence
|
||||
|
||||
def act_with_confidence(self, state):
|
||||
action_probs, confidence = self.predict(state)
|
||||
action = np.argmax(action_probs)
|
||||
return action, confidence
|
||||
|
||||
def get_memory_usage(self):
|
||||
if hasattr(self, 'q_network'):
|
||||
return int(self.q_network.numel() * 4 / (1024*1024))
|
||||
return 0
|
||||
|
||||
def act(self, state):
|
||||
return self.act_with_confidence(state)[0]
|
||||
|
||||
def remember(self, state, action, reward, next_state, done):
|
||||
pass # Mock implementation
|
||||
|
||||
def replay(self):
|
||||
return 0.0 # Mock implementation
|
||||
|
||||
# Test model registration
|
||||
logger.info("Testing model registration...")
|
||||
mock_cnn = MockCNNModel()
|
||||
mock_rl = MockRLAgent()
|
||||
|
||||
success1 = registry.register_model(mock_cnn)
|
||||
success2 = registry.register_model(mock_rl)
|
||||
|
||||
if success1 and success2:
|
||||
logger.info("[SUCCESS] Both models registered successfully")
|
||||
else:
|
||||
logger.error(f"[FAILED] Model registration failed: CNN={success1}, RL={success2}")
|
||||
|
||||
# Test memory stats
|
||||
memory_stats = registry.get_memory_stats()
|
||||
logger.info(f"[SUCCESS] Memory stats: {memory_stats}")
|
||||
|
||||
# Test orchestrator
|
||||
logger.info("Testing orchestrator integration...")
|
||||
data_provider = DataProvider(symbols=['ETH/USDT'], timeframes=['1h'])
|
||||
# Initialize components
|
||||
data_provider = DataProvider(
|
||||
symbols=['ETH/USDT', 'BTC/USDT'],
|
||||
timeframes=['1s', '1m', '5m', '1h', '4h']
|
||||
)
|
||||
orchestrator = TradingOrchestrator(data_provider)
|
||||
|
||||
# Register models with orchestrator
|
||||
success1 = orchestrator.register_model(mock_cnn, weight=0.7)
|
||||
success2 = orchestrator.register_model(mock_rl, weight=0.3)
|
||||
logger.info("Creating CNN training data...")
|
||||
|
||||
if success1 and success2:
|
||||
logger.info("[SUCCESS] Models registered with orchestrator")
|
||||
else:
|
||||
logger.error(f"[FAILED] Orchestrator registration failed")
|
||||
# Prepare multi-timeframe, multi-symbol feature matrices
|
||||
symbols = ['ETH/USDT', 'BTC/USDT']
|
||||
timeframes = ['1m', '5m', '1h', '4h']
|
||||
|
||||
# Test orchestrator metrics
|
||||
metrics = orchestrator.get_performance_metrics()
|
||||
logger.info(f"[SUCCESS] Orchestrator metrics: {metrics}")
|
||||
for symbol in symbols:
|
||||
logger.info(f"Preparing CNN data for {symbol}...")
|
||||
|
||||
feature_matrix = data_provider.get_feature_matrix(
|
||||
symbol, timeframes, window_size=50
|
||||
)
|
||||
|
||||
if feature_matrix is not None:
|
||||
logger.info(f"CNN training data ready for {symbol}: {feature_matrix.shape}")
|
||||
# Here you would integrate with your CNN training module
|
||||
# Example: cnn_model.train(feature_matrix, labels)
|
||||
else:
|
||||
logger.warning(f"Could not prepare CNN data for {symbol}")
|
||||
|
||||
logger.info("Modular orchestrator test completed successfully!")
|
||||
logger.info("CNN training preparation completed!")
|
||||
logger.info("Note: Integrate this with your actual CNN training module")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in orchestrator test: {e}")
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
logger.error(f"Error in CNN training: {e}")
|
||||
raise
|
||||
|
||||
def run_rl_training():
|
||||
"""Train RL agents only"""
|
||||
try:
|
||||
logger.info("Starting RL Training Mode...")
|
||||
|
||||
# Initialize components for RL
|
||||
data_provider = DataProvider(
|
||||
symbols=['ETH/USDT'],
|
||||
timeframes=['1s', '1m', '5m'] # Focus on short timeframes for RL
|
||||
)
|
||||
orchestrator = TradingOrchestrator(data_provider)
|
||||
|
||||
logger.info("Setting up RL environment...")
|
||||
|
||||
# Get scalping data for RL training
|
||||
scalping_data = data_provider.get_latest_candles('ETH/USDT', '1s', limit=1000)
|
||||
|
||||
if not scalping_data.empty:
|
||||
logger.info(f"RL training data ready: {len(scalping_data)} 1s candles")
|
||||
logger.info(f"Price range: ${scalping_data['close'].min():.2f} - ${scalping_data['close'].max():.2f}")
|
||||
|
||||
# Here you would integrate with your RL training module
|
||||
# Example: rl_agent.train(environment_data=scalping_data)
|
||||
else:
|
||||
logger.warning("No scalping data available for RL training")
|
||||
|
||||
logger.info("RL training preparation completed!")
|
||||
logger.info("Note: Integrate this with your actual RL training module")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in RL training: {e}")
|
||||
raise
|
||||
|
||||
def run_combined_training():
|
||||
"""Train both CNN and RL models"""
|
||||
try:
|
||||
logger.info("Starting Combined Training Mode...")
|
||||
|
||||
# Run CNN training first
|
||||
logger.info("Phase 1: CNN Training")
|
||||
run_cnn_training()
|
||||
|
||||
# Then RL training
|
||||
logger.info("Phase 2: RL Training")
|
||||
run_rl_training()
|
||||
|
||||
logger.info("Combined training completed!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in combined training: {e}")
|
||||
raise
|
||||
|
||||
def run_live_trading():
|
||||
"""Run live trading mode"""
|
||||
try:
|
||||
logger.info("Starting Live Trading Mode...")
|
||||
|
||||
# Initialize for live trading with 1s scalping focus
|
||||
data_provider = DataProvider(
|
||||
symbols=['ETH/USDT'],
|
||||
timeframes=['1s', '1m', '5m', '15m']
|
||||
)
|
||||
orchestrator = TradingOrchestrator(data_provider)
|
||||
|
||||
# Start real-time data streaming
|
||||
logger.info("Starting real-time data streaming...")
|
||||
|
||||
# This would integrate with your live trading logic
|
||||
logger.info("Live trading mode ready!")
|
||||
logger.info("Note: Integrate this with your actual trading execution")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in live trading: {e}")
|
||||
raise
|
||||
|
||||
def run_web_dashboard(port: int = 8050, demo_mode: bool = True):
|
||||
"""Run the web dashboard"""
|
||||
"""Run the enhanced web dashboard"""
|
||||
try:
|
||||
from web.dashboard import TradingDashboard
|
||||
|
||||
logger.info("Starting Web Dashboard...")
|
||||
logger.info("Starting Enhanced Web Dashboard...")
|
||||
|
||||
# Initialize components
|
||||
data_provider = DataProvider(symbols=['ETH/USDT'], timeframes=['1h', '4h'])
|
||||
# Initialize components with 1s scalping focus
|
||||
data_provider = DataProvider(
|
||||
symbols=['ETH/USDT'],
|
||||
timeframes=['1s', '1m', '5m', '1h', '4h']
|
||||
)
|
||||
orchestrator = TradingOrchestrator(data_provider)
|
||||
|
||||
# Create dashboard
|
||||
dashboard = TradingDashboard(data_provider, orchestrator)
|
||||
|
||||
# Add orchestrator callback to send decisions to dashboard
|
||||
async def decision_callback(decision):
|
||||
dashboard.add_trading_decision(decision)
|
||||
|
||||
orchestrator.add_decision_callback(decision_callback)
|
||||
|
||||
if demo_mode:
|
||||
# Start demo mode with mock decisions
|
||||
logger.info("Starting demo mode with simulated trading decisions...")
|
||||
# Start demo mode with realistic scalping decisions
|
||||
logger.info("Starting scalping demo mode...")
|
||||
|
||||
def demo_thread():
|
||||
"""Generate demo trading decisions"""
|
||||
def scalping_demo_thread():
|
||||
"""Generate realistic scalping decisions"""
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime
|
||||
from core.orchestrator import TradingDecision
|
||||
|
||||
actions = ['BUY', 'SELL', 'HOLD']
|
||||
action_weights = [0.3, 0.3, 0.4] # More holds in scalping
|
||||
base_price = 3000.0
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Simulate price movement
|
||||
price_change = random.uniform(-50, 50)
|
||||
# Simulate small price movements for scalping
|
||||
price_change = random.uniform(-5, 5) # Smaller movements
|
||||
current_price = max(base_price + price_change, 1000)
|
||||
|
||||
# Create mock decision
|
||||
action = random.choice(actions)
|
||||
confidence = random.uniform(0.6, 0.95)
|
||||
# Create scalping decision
|
||||
action = random.choices(actions, weights=action_weights)[0]
|
||||
confidence = random.uniform(0.7, 0.95) # Higher confidence for scalping
|
||||
|
||||
decision = TradingDecision(
|
||||
action=action,
|
||||
@ -231,36 +244,27 @@ def run_web_dashboard(port: int = 8050, demo_mode: bool = True):
|
||||
symbol='ETH/USDT',
|
||||
price=current_price,
|
||||
timestamp=datetime.now(),
|
||||
reasoning={'demo_mode': True, 'random_decision': True},
|
||||
reasoning={'scalping_demo': True, 'timeframe': '1s'},
|
||||
memory_usage={'demo': 0}
|
||||
)
|
||||
|
||||
dashboard.add_trading_decision(decision)
|
||||
logger.info(f"Demo decision: {action} ETH/USDT @${current_price:.2f} (confidence: {confidence:.2f})")
|
||||
logger.info(f"Scalping: {action} ETH/USDT @${current_price:.2f} (conf: {confidence:.2f})")
|
||||
|
||||
# Update base price occasionally
|
||||
if random.random() < 0.1:
|
||||
if random.random() < 0.2:
|
||||
base_price = current_price
|
||||
|
||||
time.sleep(5) # New decision every 5 seconds
|
||||
time.sleep(3) # Faster decisions for scalping
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in demo thread: {e}")
|
||||
time.sleep(10)
|
||||
logger.error(f"Error in scalping demo: {e}")
|
||||
time.sleep(5)
|
||||
|
||||
# Start demo thread
|
||||
demo_thread_instance = Thread(target=demo_thread, daemon=True)
|
||||
# Start scalping demo thread
|
||||
demo_thread_instance = Thread(target=scalping_demo_thread, daemon=True)
|
||||
demo_thread_instance.start()
|
||||
|
||||
# Start data streaming if available
|
||||
try:
|
||||
logger.info("Starting real-time data streaming...")
|
||||
# Don't use asyncio.run here as we're already in an event loop context
|
||||
# Just log that streaming would be started in a real deployment
|
||||
logger.info("Real-time streaming would be started in production deployment")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not start real-time streaming: {e}")
|
||||
|
||||
# Run dashboard
|
||||
dashboard.run(port=port, debug=False)
|
||||
|
||||
@ -271,17 +275,18 @@ def run_web_dashboard(port: int = 8050, demo_mode: bool = True):
|
||||
raise
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
parser = argparse.ArgumentParser(description='Clean Trading System')
|
||||
parser.add_argument('--mode', choices=['trade', 'train', 'web', 'test', 'orchestrator'],
|
||||
default='test', help='Mode to run the system in')
|
||||
parser.add_argument('--symbol', type=str, help='Override default symbol')
|
||||
parser.add_argument('--config', type=str, default='config.yaml',
|
||||
help='Configuration file path')
|
||||
"""Main entry point with clean mode selection"""
|
||||
parser = argparse.ArgumentParser(description='Clean Trading System - Unified Entry Point')
|
||||
parser.add_argument('--mode',
|
||||
choices=['test', 'cnn', 'rl', 'train', 'trade', 'web'],
|
||||
default='test',
|
||||
help='Operation mode')
|
||||
parser.add_argument('--symbol', type=str, default='ETH/USDT',
|
||||
help='Trading symbol (default: ETH/USDT)')
|
||||
parser.add_argument('--port', type=int, default=8050,
|
||||
help='Port for web dashboard')
|
||||
help='Web dashboard port (default: 8050)')
|
||||
parser.add_argument('--demo', action='store_true',
|
||||
help='Run web dashboard in demo mode with simulated data')
|
||||
help='Run web dashboard in demo mode')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@ -290,18 +295,26 @@ async def main():
|
||||
|
||||
try:
|
||||
logger.info("=" * 60)
|
||||
logger.info("CLEAN TRADING SYSTEM STARTING")
|
||||
logger.info("CLEAN TRADING SYSTEM - UNIFIED LAUNCH")
|
||||
logger.info(f"Mode: {args.mode.upper()}")
|
||||
logger.info(f"Symbol: {args.symbol}")
|
||||
logger.info("=" * 60)
|
||||
|
||||
# Run appropriate mode
|
||||
# Route to appropriate mode
|
||||
if args.mode == 'test':
|
||||
run_data_test()
|
||||
elif args.mode == 'orchestrator':
|
||||
run_orchestrator_test()
|
||||
elif args.mode == 'cnn':
|
||||
run_cnn_training()
|
||||
elif args.mode == 'rl':
|
||||
run_rl_training()
|
||||
elif args.mode == 'train':
|
||||
run_combined_training()
|
||||
elif args.mode == 'trade':
|
||||
run_live_trading()
|
||||
elif args.mode == 'web':
|
||||
run_web_dashboard(port=args.port, demo_mode=args.demo)
|
||||
else:
|
||||
logger.info(f"Mode '{args.mode}' not yet implemented in clean architecture")
|
||||
|
||||
logger.info("Operation completed successfully!")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.info("System shutdown requested by user")
|
||||
@ -311,7 +324,6 @@ async def main():
|
||||
logger.error(traceback.format_exc())
|
||||
return 1
|
||||
|
||||
logger.info("Clean Trading System finished")
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
Reference in New Issue
Block a user