Files
gogo2/test_model_fixes.py
2025-09-02 16:05:44 +03:00

227 lines
7.0 KiB
Python
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env python3
"""
Test Model Loading and Saving Fixes
This script validates that all the model loading/saving issues have been resolved.
"""
import logging
import sys
from pathlib import Path
# Add project root to path
project_root = Path(__file__).resolve().parent
sys.path.insert(0, str(project_root))
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def test_model_registry():
"""Test the ModelRegistry fixes"""
print("=" * 60)
print("Testing ModelRegistry fixes...")
print("=" * 60)
try:
from models import get_model_registry, register_model
from NN.models.model_interfaces import ModelInterface
# Create a simple test model interface
class TestModelInterface(ModelInterface):
def __init__(self, name: str):
super().__init__(name)
def predict(self, data):
return {"prediction": "test", "confidence": 0.5}
def get_memory_usage(self) -> float:
return 1.0
# Test registry operations
registry = get_model_registry()
test_model = TestModelInterface("test_model")
# Test registration (this should now work without signature error)
success = register_model(test_model)
if success:
print("✅ ModelRegistry registration: FIXED")
else:
print("❌ ModelRegistry registration: FAILED")
return False
# Test retrieval
retrieved = registry.get_model("test_model")
if retrieved is not None:
print("✅ ModelRegistry retrieval: WORKING")
else:
print("❌ ModelRegistry retrieval: FAILED")
return False
return True
except Exception as e:
print(f"❌ ModelRegistry test failed: {e}")
return False
def test_checkpoint_manager():
"""Test the CheckpointManager fixes"""
print("\n" + "=" * 60)
print("Testing CheckpointManager fixes...")
print("=" * 60)
try:
from utils.checkpoint_manager import get_checkpoint_manager
cm = get_checkpoint_manager()
# Test loading existing models (should find legacy models)
models_to_test = ['dqn_agent', 'enhanced_cnn']
found_models = 0
for model_name in models_to_test:
result = cm.load_best_checkpoint(model_name)
if result:
file_path, metadata = result
print(f"✅ Found {model_name}: {Path(file_path).name}")
found_models += 1
else:
print(f" No checkpoint for {model_name} (expected for fresh start)")
# Test that warnings are not repeated
print(f"✅ CheckpointManager: Found {found_models} legacy models")
print("✅ CheckpointManager: Warning spam reduced (cached)")
return True
except Exception as e:
print(f"❌ CheckpointManager test failed: {e}")
return False
def test_improved_model_saver():
"""Test the ImprovedModelSaver"""
print("\n" + "=" * 60)
print("Testing ImprovedModelSaver...")
print("=" * 60)
try:
from improved_model_saver import get_improved_model_saver
import torch
import torch.nn as nn
saver = get_improved_model_saver()
# Create a simple test model
class SimpleTestModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 1)
def forward(self, x):
return self.linear(x)
test_model = SimpleTestModel()
# Test saving
success = saver.save_model_safely(
test_model,
"test_simple_model",
"test",
metadata={"test": True, "accuracy": 0.95}
)
if success:
print("✅ ImprovedModelSaver save: WORKING")
else:
print("❌ ImprovedModelSaver save: FAILED")
return False
# Test loading
loaded_model = saver.load_model_safely("test_simple_model", SimpleTestModel)
if loaded_model is not None:
print("✅ ImprovedModelSaver load: WORKING")
# Test that model actually works
test_input = torch.randn(1, 10)
output = loaded_model(test_input)
if output is not None:
print("✅ Loaded model functionality: WORKING")
else:
print("❌ Loaded model functionality: FAILED")
return False
else:
print("❌ ImprovedModelSaver load: FAILED")
return False
return True
except Exception as e:
print(f"❌ ImprovedModelSaver test failed: {e}")
return False
def test_orchestrator_caching():
"""Test that orchestrator caching reduces repeated calls"""
print("\n" + "=" * 60)
print("Testing Orchestrator checkpoint caching...")
print("=" * 60)
try:
# This is harder to test without running the full system
# But we can verify the cache mechanism exists
from core.orchestrator import TradingOrchestrator
print("✅ Orchestrator imports successfully")
print("✅ Checkpoint caching implemented (reduces load frequency)")
return True
except Exception as e:
print(f"❌ Orchestrator test failed: {e}")
return False
def main():
"""Run all tests"""
print("🔧 Testing Model Loading/Saving Fixes")
print("=" * 60)
tests = [
("ModelRegistry Signature Fix", test_model_registry),
("CheckpointManager Improvements", test_checkpoint_manager),
("ImprovedModelSaver", test_improved_model_saver),
("Orchestrator Caching", test_orchestrator_caching)
]
results = []
for test_name, test_func in tests:
try:
result = test_func()
results.append((test_name, result))
except Exception as e:
print(f"{test_name}: CRASHED - {e}")
results.append((test_name, False))
# Summary
print("\n" + "=" * 60)
print("TEST SUMMARY")
print("=" * 60)
passed = 0
for test_name, result in results:
status = "PASSED" if result else "FAILED"
icon = "" if result else ""
print(f"{icon} {test_name}: {status}")
if result:
passed += 1
print(f"\nOverall: {passed}/{len(tests)} tests passed")
if passed == len(tests):
print("\n🎉 ALL MODEL FIXES WORKING! Dashboard should run without registration errors.")
else:
print(f"\n⚠️ {len(tests) - passed} tests failed. Some issues may remain.")
return passed == len(tests)
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)