massive clenup
This commit is contained in:
230
run_tests.py
230
run_tests.py
@ -1,77 +1,181 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run unit tests for the trading bot.
|
||||
Unified Test Runner for Trading System
|
||||
|
||||
This script runs the unit tests defined in tests.py and displays the results.
|
||||
It can run a single test or all tests.
|
||||
This script provides a unified interface to run all tests in the system:
|
||||
- Essential functionality tests
|
||||
- Model persistence tests
|
||||
- Training integration tests
|
||||
- Indicators and signals tests
|
||||
- Remaining individual test files
|
||||
|
||||
Usage:
|
||||
python run_tests.py [test_name]
|
||||
|
||||
If test_name is provided, only that test will be run.
|
||||
Otherwise, all tests will be run.
|
||||
|
||||
Example:
|
||||
python run_tests.py TestPeriodicUpdates
|
||||
python run_tests.py TestBacktesting
|
||||
python run_tests.py TestBacktestingLastSevenDays
|
||||
python run_tests.py TestSingleDayBacktesting
|
||||
python run_tests.py
|
||||
python run_tests.py # Run all tests
|
||||
python run_tests.py essential # Run essential tests only
|
||||
python run_tests.py persistence # Run model persistence tests only
|
||||
python run_tests.py training # Run training integration tests only
|
||||
python run_tests.py indicators # Run indicators and signals tests only
|
||||
python run_tests.py individual # Run individual test files only
|
||||
"""
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
from tests import (
|
||||
TestPeriodicUpdates,
|
||||
TestBacktesting,
|
||||
TestBacktestingLastSevenDays,
|
||||
TestSingleDayBacktesting
|
||||
)
|
||||
from pathlib import Path
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||
handlers=[logging.StreamHandler()])
|
||||
|
||||
# Get the test name from the command line
|
||||
test_name = sys.argv[1] if len(sys.argv) > 1 else None
|
||||
|
||||
# Run the specified test or all tests
|
||||
if test_name:
|
||||
logging.info(f"Running test: {test_name}")
|
||||
if test_name == "TestPeriodicUpdates":
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(TestPeriodicUpdates)
|
||||
elif test_name == "TestBacktesting":
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(TestBacktesting)
|
||||
elif test_name == "TestBacktestingLastSevenDays":
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(TestBacktestingLastSevenDays)
|
||||
elif test_name == "TestSingleDayBacktesting":
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(TestSingleDayBacktesting)
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from core.config import setup_logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def run_test_module(module_path, test_type="all"):
|
||||
"""Run a specific test module"""
|
||||
try:
|
||||
cmd = [sys.executable, str(module_path)]
|
||||
if test_type != "all":
|
||||
cmd.append(test_type)
|
||||
|
||||
logger.info(f"Running: {' '.join(cmd)}")
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, cwd=project_root)
|
||||
|
||||
if result.returncode == 0:
|
||||
logger.info(f"✅ {module_path.name} passed")
|
||||
if result.stdout:
|
||||
logger.info(result.stdout)
|
||||
return True
|
||||
else:
|
||||
logging.error(f"Unknown test: {test_name}")
|
||||
logging.info("Available tests: TestPeriodicUpdates, TestBacktesting, TestBacktestingLastSevenDays, TestSingleDayBacktesting")
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Run all tests
|
||||
logging.info("Running all tests")
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPeriodicUpdates))
|
||||
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestBacktesting))
|
||||
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestBacktestingLastSevenDays))
|
||||
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSingleDayBacktesting))
|
||||
logger.error(f"❌ {module_path.name} failed")
|
||||
if result.stderr:
|
||||
logger.error(result.stderr)
|
||||
if result.stdout:
|
||||
logger.error(result.stdout)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error running {module_path}: {e}")
|
||||
return False
|
||||
|
||||
def run_essential_tests():
|
||||
"""Run essential functionality tests"""
|
||||
logger.info("=== Running Essential Tests ===")
|
||||
return run_test_module(project_root / "tests" / "test_essential.py")
|
||||
|
||||
def run_persistence_tests():
|
||||
"""Run model persistence tests"""
|
||||
logger.info("=== Running Model Persistence Tests ===")
|
||||
return run_test_module(project_root / "tests" / "test_model_persistence.py")
|
||||
|
||||
def run_training_tests():
|
||||
"""Run training integration tests"""
|
||||
logger.info("=== Running Training Integration Tests ===")
|
||||
return run_test_module(project_root / "tests" / "test_training_integration.py")
|
||||
|
||||
def run_indicators_tests():
|
||||
"""Run indicators and signals tests"""
|
||||
logger.info("=== Running Indicators and Signals Tests ===")
|
||||
return run_test_module(project_root / "tests" / "test_indicators_and_signals.py")
|
||||
|
||||
def run_individual_tests():
|
||||
"""Run remaining individual test files"""
|
||||
logger.info("=== Running Individual Test Files ===")
|
||||
|
||||
# Run the tests
|
||||
runner = unittest.TextTestRunner(verbosity=2)
|
||||
result = runner.run(suite)
|
||||
individual_tests = [
|
||||
"test_positions.py",
|
||||
"test_tick_cache.py",
|
||||
"test_timestamps.py"
|
||||
]
|
||||
|
||||
results = []
|
||||
for test_file in individual_tests:
|
||||
test_path = project_root / test_file
|
||||
if test_path.exists():
|
||||
logger.info(f"Running {test_file}...")
|
||||
result = run_test_module(test_path)
|
||||
results.append(result)
|
||||
else:
|
||||
logger.warning(f"Test file not found: {test_file}")
|
||||
results.append(False)
|
||||
|
||||
return all(results)
|
||||
|
||||
def run_all_tests():
|
||||
"""Run all test suites"""
|
||||
logger.info("🧪 Running All Trading System Tests")
|
||||
logger.info("=" * 60)
|
||||
|
||||
test_suites = [
|
||||
("Essential Tests", run_essential_tests),
|
||||
("Model Persistence Tests", run_persistence_tests),
|
||||
("Training Integration Tests", run_training_tests),
|
||||
("Indicators and Signals Tests", run_indicators_tests),
|
||||
("Individual Tests", run_individual_tests),
|
||||
]
|
||||
|
||||
results = []
|
||||
for suite_name, suite_func in test_suites:
|
||||
logger.info(f"\n📋 {suite_name}")
|
||||
logger.info("-" * 40)
|
||||
try:
|
||||
result = suite_func()
|
||||
results.append((suite_name, result))
|
||||
except Exception as e:
|
||||
logger.error(f"❌ {suite_name} crashed: {e}")
|
||||
results.append((suite_name, False))
|
||||
|
||||
# Print summary
|
||||
print("\nTest Summary:")
|
||||
print(f" Ran {result.testsRun} tests")
|
||||
print(f" Errors: {len(result.errors)}")
|
||||
print(f" Failures: {len(result.failures)}")
|
||||
print(f" Skipped: {len(result.skipped)}")
|
||||
logger.info("\n" + "=" * 60)
|
||||
logger.info("📊 TEST RESULTS SUMMARY")
|
||||
logger.info("=" * 60)
|
||||
|
||||
# Exit with non-zero status if any tests failed
|
||||
sys.exit(len(result.errors) + len(result.failures))
|
||||
passed = 0
|
||||
for suite_name, result in results:
|
||||
status = "✅ PASS" if result else "❌ FAIL"
|
||||
logger.info(f"{status}: {suite_name}")
|
||||
if result:
|
||||
passed += 1
|
||||
|
||||
logger.info(f"\nPassed: {passed}/{len(results)} test suites")
|
||||
|
||||
if passed == len(results):
|
||||
logger.info("🎉 All tests passed! Trading system is working correctly.")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"⚠️ {len(results) - passed} test suite(s) failed. Please check the issues above.")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Main test runner"""
|
||||
setup_logging()
|
||||
|
||||
# Parse command line arguments
|
||||
if len(sys.argv) > 1:
|
||||
test_type = sys.argv[1].lower()
|
||||
|
||||
if test_type == "essential":
|
||||
success = run_essential_tests()
|
||||
elif test_type == "persistence":
|
||||
success = run_persistence_tests()
|
||||
elif test_type == "training":
|
||||
success = run_training_tests()
|
||||
elif test_type == "indicators":
|
||||
success = run_indicators_tests()
|
||||
elif test_type == "individual":
|
||||
success = run_individual_tests()
|
||||
elif test_type in ["help", "-h", "--help"]:
|
||||
print(__doc__)
|
||||
return 0
|
||||
else:
|
||||
logger.error(f"Unknown test type: {test_type}")
|
||||
print(__doc__)
|
||||
return 1
|
||||
else:
|
||||
success = run_all_tests()
|
||||
|
||||
return 0 if success else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
Reference in New Issue
Block a user