181 lines
5.8 KiB
Python
181 lines
5.8 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Unified Test Runner for Trading System
|
|
|
|
This script provides a unified interface to run all tests in the system:
|
|
- Essential functionality tests
|
|
- Model persistence tests
|
|
- Training integration tests
|
|
- Indicators and signals tests
|
|
- Remaining individual test files
|
|
|
|
Usage:
|
|
python run_tests.py # Run all tests
|
|
python run_tests.py essential # Run essential tests only
|
|
python run_tests.py persistence # Run model persistence tests only
|
|
python run_tests.py training # Run training integration tests only
|
|
python run_tests.py indicators # Run indicators and signals tests only
|
|
python run_tests.py individual # Run individual test files only
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
import subprocess
|
|
import logging
|
|
from pathlib import Path
|
|
|
|
# Add project root to path
|
|
project_root = Path(__file__).parent
|
|
sys.path.insert(0, str(project_root))
|
|
|
|
from core.config import setup_logging
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
def run_test_module(module_path, test_type="all"):
|
|
"""Run a specific test module"""
|
|
try:
|
|
cmd = [sys.executable, str(module_path)]
|
|
if test_type != "all":
|
|
cmd.append(test_type)
|
|
|
|
logger.info(f"Running: {' '.join(cmd)}")
|
|
result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root)
|
|
|
|
if result.returncode == 0:
|
|
logger.info(f"✅ {module_path.name} passed")
|
|
if result.stdout:
|
|
logger.info(result.stdout)
|
|
return True
|
|
else:
|
|
logger.error(f"❌ {module_path.name} failed")
|
|
if result.stderr:
|
|
logger.error(result.stderr)
|
|
if result.stdout:
|
|
logger.error(result.stdout)
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"❌ Error running {module_path}: {e}")
|
|
return False
|
|
|
|
def run_essential_tests():
|
|
"""Run essential functionality tests"""
|
|
logger.info("=== Running Essential Tests ===")
|
|
return run_test_module(project_root / "tests" / "test_essential.py")
|
|
|
|
def run_persistence_tests():
|
|
"""Run model persistence tests"""
|
|
logger.info("=== Running Model Persistence Tests ===")
|
|
return run_test_module(project_root / "tests" / "test_model_persistence.py")
|
|
|
|
def run_training_tests():
|
|
"""Run training integration tests"""
|
|
logger.info("=== Running Training Integration Tests ===")
|
|
return run_test_module(project_root / "tests" / "test_training_integration.py")
|
|
|
|
def run_indicators_tests():
|
|
"""Run indicators and signals tests"""
|
|
logger.info("=== Running Indicators and Signals Tests ===")
|
|
return run_test_module(project_root / "tests" / "test_indicators_and_signals.py")
|
|
|
|
def run_individual_tests():
|
|
"""Run remaining individual test files"""
|
|
logger.info("=== Running Individual Test Files ===")
|
|
|
|
individual_tests = [
|
|
"test_positions.py",
|
|
"test_tick_cache.py",
|
|
"test_timestamps.py"
|
|
]
|
|
|
|
results = []
|
|
for test_file in individual_tests:
|
|
test_path = project_root / test_file
|
|
if test_path.exists():
|
|
logger.info(f"Running {test_file}...")
|
|
result = run_test_module(test_path)
|
|
results.append(result)
|
|
else:
|
|
logger.warning(f"Test file not found: {test_file}")
|
|
results.append(False)
|
|
|
|
return all(results)
|
|
|
|
def run_all_tests():
|
|
"""Run all test suites"""
|
|
logger.info("🧪 Running All Trading System Tests")
|
|
logger.info("=" * 60)
|
|
|
|
test_suites = [
|
|
("Essential Tests", run_essential_tests),
|
|
("Model Persistence Tests", run_persistence_tests),
|
|
("Training Integration Tests", run_training_tests),
|
|
("Indicators and Signals Tests", run_indicators_tests),
|
|
("Individual Tests", run_individual_tests),
|
|
]
|
|
|
|
results = []
|
|
for suite_name, suite_func in test_suites:
|
|
logger.info(f"\n📋 {suite_name}")
|
|
logger.info("-" * 40)
|
|
try:
|
|
result = suite_func()
|
|
results.append((suite_name, result))
|
|
except Exception as e:
|
|
logger.error(f"❌ {suite_name} crashed: {e}")
|
|
results.append((suite_name, False))
|
|
|
|
# Print summary
|
|
logger.info("\n" + "=" * 60)
|
|
logger.info("📊 TEST RESULTS SUMMARY")
|
|
logger.info("=" * 60)
|
|
|
|
passed = 0
|
|
for suite_name, result in results:
|
|
status = "✅ PASS" if result else "❌ FAIL"
|
|
logger.info(f"{status}: {suite_name}")
|
|
if result:
|
|
passed += 1
|
|
|
|
logger.info(f"\nPassed: {passed}/{len(results)} test suites")
|
|
|
|
if passed == len(results):
|
|
logger.info("🎉 All tests passed! Trading system is working correctly.")
|
|
return True
|
|
else:
|
|
logger.warning(f"⚠️ {len(results) - passed} test suite(s) failed. Please check the issues above.")
|
|
return False
|
|
|
|
def main():
|
|
"""Main test runner"""
|
|
setup_logging()
|
|
|
|
# Parse command line arguments
|
|
if len(sys.argv) > 1:
|
|
test_type = sys.argv[1].lower()
|
|
|
|
if test_type == "essential":
|
|
success = run_essential_tests()
|
|
elif test_type == "persistence":
|
|
success = run_persistence_tests()
|
|
elif test_type == "training":
|
|
success = run_training_tests()
|
|
elif test_type == "indicators":
|
|
success = run_indicators_tests()
|
|
elif test_type == "individual":
|
|
success = run_individual_tests()
|
|
elif test_type in ["help", "-h", "--help"]:
|
|
print(__doc__)
|
|
return 0
|
|
else:
|
|
logger.error(f"Unknown test type: {test_type}")
|
|
print(__doc__)
|
|
return 1
|
|
else:
|
|
success = run_all_tests()
|
|
|
|
return 0 if success else 1
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main()) |