18: tests, fixes
This commit is contained in:
485
COBY/tests/test_e2e_dashboard.py
Normal file
485
COBY/tests/test_e2e_dashboard.py
Normal file
@ -0,0 +1,485 @@
|
||||
"""
|
||||
End-to-end tests for web dashboard functionality.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import Mock, AsyncMock, patch
|
||||
from typing import Dict, Any, List
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import web, WSMsgType
|
||||
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
|
||||
|
||||
from ..api.rest_api import create_app
|
||||
from ..api.websocket_server import WebSocketServer
|
||||
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||
from ..utils.logging import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class TestDashboardAPI(AioHTTPTestCase):
|
||||
"""Test dashboard REST API endpoints"""
|
||||
|
||||
async def get_application(self):
|
||||
"""Create test application"""
|
||||
return create_app()
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_health_endpoint(self):
|
||||
"""Test health check endpoint"""
|
||||
resp = await self.client.request("GET", "/health")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
data = await resp.json()
|
||||
self.assertIn('status', data)
|
||||
self.assertIn('timestamp', data)
|
||||
self.assertEqual(data['status'], 'healthy')
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_metrics_endpoint(self):
|
||||
"""Test metrics endpoint"""
|
||||
resp = await self.client.request("GET", "/metrics")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
# Should return Prometheus format
|
||||
text = await resp.text()
|
||||
self.assertIn('# TYPE', text)
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_orderbook_endpoint(self):
|
||||
"""Test order book data endpoint"""
|
||||
# Mock data
|
||||
with patch('COBY.caching.redis_manager.redis_manager') as mock_redis:
|
||||
mock_redis.get.return_value = {
|
||||
'symbol': 'BTCUSDT',
|
||||
'exchange': 'binance',
|
||||
'bids': [{'price': 50000.0, 'size': 1.0}],
|
||||
'asks': [{'price': 50010.0, 'size': 1.0}]
|
||||
}
|
||||
|
||||
resp = await self.client.request("GET", "/api/orderbook/BTCUSDT")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
data = await resp.json()
|
||||
self.assertIn('symbol', data)
|
||||
self.assertEqual(data['symbol'], 'BTCUSDT')
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_heatmap_endpoint(self):
|
||||
"""Test heatmap data endpoint"""
|
||||
with patch('COBY.caching.redis_manager.redis_manager') as mock_redis:
|
||||
mock_redis.get.return_value = {
|
||||
'symbol': 'BTCUSDT',
|
||||
'bucket_size': 1.0,
|
||||
'data': [
|
||||
{'price': 50000.0, 'volume': 10.0, 'intensity': 0.8, 'side': 'bid'}
|
||||
]
|
||||
}
|
||||
|
||||
resp = await self.client.request("GET", "/api/heatmap/BTCUSDT")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
data = await resp.json()
|
||||
self.assertIn('symbol', data)
|
||||
self.assertIn('data', data)
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_exchanges_status_endpoint(self):
|
||||
"""Test exchanges status endpoint"""
|
||||
with patch('COBY.connectors.connection_manager.connection_manager') as mock_manager:
|
||||
mock_manager.get_all_statuses.return_value = {
|
||||
'binance': 'connected',
|
||||
'coinbase': 'connected',
|
||||
'kraken': 'disconnected'
|
||||
}
|
||||
|
||||
resp = await self.client.request("GET", "/api/exchanges/status")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
data = await resp.json()
|
||||
self.assertIn('binance', data)
|
||||
self.assertIn('coinbase', data)
|
||||
self.assertIn('kraken', data)
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_performance_metrics_endpoint(self):
|
||||
"""Test performance metrics endpoint"""
|
||||
with patch('COBY.monitoring.performance_monitor.get_performance_monitor') as mock_monitor:
|
||||
mock_monitor.return_value.get_performance_dashboard_data.return_value = {
|
||||
'timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
'system_metrics': {
|
||||
'cpu_usage': 45.2,
|
||||
'memory_usage': 67.8,
|
||||
'active_connections': 150
|
||||
},
|
||||
'performance_summary': {
|
||||
'throughput': 1250.5,
|
||||
'error_rate': 0.1,
|
||||
'avg_latency': 12.3
|
||||
}
|
||||
}
|
||||
|
||||
resp = await self.client.request("GET", "/api/performance")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
data = await resp.json()
|
||||
self.assertIn('system_metrics', data)
|
||||
self.assertIn('performance_summary', data)
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_static_files_served(self):
|
||||
"""Test that static files are served correctly"""
|
||||
# Test dashboard index
|
||||
resp = await self.client.request("GET", "/")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
content_type = resp.headers.get('content-type', '')
|
||||
self.assertIn('text/html', content_type)
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_cors_headers(self):
|
||||
"""Test CORS headers are present"""
|
||||
resp = await self.client.request("OPTIONS", "/api/health")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
# Check CORS headers
|
||||
self.assertIn('Access-Control-Allow-Origin', resp.headers)
|
||||
self.assertIn('Access-Control-Allow-Methods', resp.headers)
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_rate_limiting(self):
|
||||
"""Test API rate limiting"""
|
||||
# Make many requests quickly
|
||||
responses = []
|
||||
for i in range(150): # Exceed rate limit
|
||||
resp = await self.client.request("GET", "/api/health")
|
||||
responses.append(resp.status)
|
||||
|
||||
# Should have some rate limited responses
|
||||
rate_limited = [status for status in responses if status == 429]
|
||||
self.assertGreater(len(rate_limited), 0, "Rate limiting not working")
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_error_handling(self):
|
||||
"""Test API error handling"""
|
||||
# Test invalid symbol
|
||||
resp = await self.client.request("GET", "/api/orderbook/INVALID")
|
||||
self.assertEqual(resp.status, 404)
|
||||
|
||||
data = await resp.json()
|
||||
self.assertIn('error', data)
|
||||
|
||||
@unittest_run_loop
|
||||
async def test_api_documentation(self):
|
||||
"""Test API documentation endpoints"""
|
||||
# Test OpenAPI docs
|
||||
resp = await self.client.request("GET", "/docs")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
# Test ReDoc
|
||||
resp = await self.client.request("GET", "/redoc")
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
||||
|
||||
class TestWebSocketFunctionality:
|
||||
"""Test WebSocket functionality"""
|
||||
|
||||
@pytest.fixture
|
||||
async def websocket_server(self):
|
||||
"""Create WebSocket server for testing"""
|
||||
server = WebSocketServer(host='localhost', port=8081)
|
||||
await server.start()
|
||||
yield server
|
||||
await server.stop()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_websocket_connection(self, websocket_server):
|
||||
"""Test WebSocket connection establishment"""
|
||||
session = aiohttp.ClientSession()
|
||||
|
||||
try:
|
||||
async with session.ws_connect('ws://localhost:8081/ws/dashboard') as ws:
|
||||
# Connection should be established
|
||||
self.assertEqual(ws.closed, False)
|
||||
|
||||
# Send ping
|
||||
await ws.ping()
|
||||
|
||||
# Should receive pong
|
||||
msg = await ws.receive()
|
||||
self.assertEqual(msg.type, WSMsgType.PONG)
|
||||
|
||||
finally:
|
||||
await session.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_websocket_data_streaming(self, websocket_server):
|
||||
"""Test real-time data streaming via WebSocket"""
|
||||
session = aiohttp.ClientSession()
|
||||
|
||||
try:
|
||||
async with session.ws_connect('ws://localhost:8081/ws/dashboard') as ws:
|
||||
# Subscribe to updates
|
||||
subscribe_msg = {
|
||||
'type': 'subscribe',
|
||||
'channels': ['orderbook', 'trades', 'performance']
|
||||
}
|
||||
await ws.send_str(json.dumps(subscribe_msg))
|
||||
|
||||
# Should receive subscription confirmation
|
||||
msg = await ws.receive()
|
||||
self.assertEqual(msg.type, WSMsgType.TEXT)
|
||||
|
||||
data = json.loads(msg.data)
|
||||
self.assertEqual(data.get('type'), 'subscription_confirmed')
|
||||
|
||||
finally:
|
||||
await session.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_websocket_error_handling(self, websocket_server):
|
||||
"""Test WebSocket error handling"""
|
||||
session = aiohttp.ClientSession()
|
||||
|
||||
try:
|
||||
async with session.ws_connect('ws://localhost:8081/ws/dashboard') as ws:
|
||||
# Send invalid message
|
||||
invalid_msg = {'invalid': 'message'}
|
||||
await ws.send_str(json.dumps(invalid_msg))
|
||||
|
||||
# Should receive error response
|
||||
msg = await ws.receive()
|
||||
self.assertEqual(msg.type, WSMsgType.TEXT)
|
||||
|
||||
data = json.loads(msg.data)
|
||||
self.assertEqual(data.get('type'), 'error')
|
||||
|
||||
finally:
|
||||
await session.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_websocket_connections(self, websocket_server):
|
||||
"""Test multiple concurrent WebSocket connections"""
|
||||
session = aiohttp.ClientSession()
|
||||
connections = []
|
||||
|
||||
try:
|
||||
# Create multiple connections
|
||||
for i in range(10):
|
||||
ws = await session.ws_connect(f'ws://localhost:8081/ws/dashboard')
|
||||
connections.append(ws)
|
||||
|
||||
# All connections should be active
|
||||
for ws in connections:
|
||||
self.assertEqual(ws.closed, False)
|
||||
|
||||
# Send message to all connections
|
||||
test_msg = {'type': 'ping', 'id': 'test'}
|
||||
for ws in connections:
|
||||
await ws.send_str(json.dumps(test_msg))
|
||||
|
||||
# All should receive responses
|
||||
for ws in connections:
|
||||
msg = await ws.receive()
|
||||
self.assertEqual(msg.type, WSMsgType.TEXT)
|
||||
|
||||
finally:
|
||||
# Close all connections
|
||||
for ws in connections:
|
||||
if not ws.closed:
|
||||
await ws.close()
|
||||
await session.close()
|
||||
|
||||
|
||||
class TestDashboardIntegration:
|
||||
"""Test dashboard integration with backend services"""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_services(self):
|
||||
"""Mock backend services"""
|
||||
services = {
|
||||
'redis': Mock(),
|
||||
'timescale': Mock(),
|
||||
'connectors': Mock(),
|
||||
'aggregator': Mock(),
|
||||
'monitor': Mock()
|
||||
}
|
||||
|
||||
# Setup mock responses
|
||||
services['redis'].get.return_value = {'test': 'data'}
|
||||
services['timescale'].query.return_value = [{'result': 'data'}]
|
||||
services['connectors'].get_status.return_value = 'connected'
|
||||
services['aggregator'].get_heatmap.return_value = {'heatmap': 'data'}
|
||||
services['monitor'].get_metrics.return_value = {'metrics': 'data'}
|
||||
|
||||
return services
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dashboard_data_flow(self, mock_services):
|
||||
"""Test complete data flow from backend to dashboard"""
|
||||
# Simulate data generation
|
||||
orderbook = OrderBookSnapshot(
|
||||
symbol="BTCUSDT",
|
||||
exchange="binance",
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
bids=[PriceLevel(price=50000.0, size=1.0)],
|
||||
asks=[PriceLevel(price=50010.0, size=1.0)]
|
||||
)
|
||||
|
||||
# Mock data processing pipeline
|
||||
with patch.multiple(
|
||||
'COBY.processing.data_processor',
|
||||
DataProcessor=Mock()
|
||||
):
|
||||
# Process data
|
||||
processor = Mock()
|
||||
processor.normalize_orderbook.return_value = orderbook
|
||||
|
||||
# Aggregate data
|
||||
aggregator = Mock()
|
||||
aggregator.create_price_buckets.return_value = Mock()
|
||||
aggregator.generate_heatmap.return_value = Mock()
|
||||
|
||||
# Cache data
|
||||
cache = Mock()
|
||||
cache.set.return_value = True
|
||||
|
||||
# Verify data flows through pipeline
|
||||
processed = processor.normalize_orderbook({}, "binance")
|
||||
buckets = aggregator.create_price_buckets(processed)
|
||||
heatmap = aggregator.generate_heatmap(buckets)
|
||||
cached = cache.set("test_key", heatmap)
|
||||
|
||||
assert processed is not None
|
||||
assert buckets is not None
|
||||
assert heatmap is not None
|
||||
assert cached is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_real_time_updates(self, mock_services):
|
||||
"""Test real-time dashboard updates"""
|
||||
# Mock WebSocket server
|
||||
ws_server = Mock()
|
||||
ws_server.broadcast = AsyncMock()
|
||||
|
||||
# Simulate real-time data updates
|
||||
updates = [
|
||||
{'type': 'orderbook', 'symbol': 'BTCUSDT', 'data': {}},
|
||||
{'type': 'trade', 'symbol': 'BTCUSDT', 'data': {}},
|
||||
{'type': 'performance', 'data': {}}
|
||||
]
|
||||
|
||||
# Send updates
|
||||
for update in updates:
|
||||
await ws_server.broadcast(json.dumps(update))
|
||||
|
||||
# Verify broadcasts were sent
|
||||
assert ws_server.broadcast.call_count == len(updates)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dashboard_performance_under_load(self, mock_services):
|
||||
"""Test dashboard performance under high update frequency"""
|
||||
import time
|
||||
|
||||
# Mock high-frequency updates
|
||||
update_count = 1000
|
||||
start_time = time.time()
|
||||
|
||||
# Simulate processing many updates
|
||||
for i in range(update_count):
|
||||
# Mock data processing
|
||||
mock_services['redis'].get(f"orderbook:BTCUSDT:binance:{i}")
|
||||
mock_services['aggregator'].get_heatmap(f"BTCUSDT:{i}")
|
||||
|
||||
# Small delay to simulate processing
|
||||
await asyncio.sleep(0.001)
|
||||
|
||||
end_time = time.time()
|
||||
processing_time = end_time - start_time
|
||||
updates_per_second = update_count / processing_time
|
||||
|
||||
# Should handle at least 500 updates per second
|
||||
assert updates_per_second > 500, f"Dashboard too slow: {updates_per_second:.2f} updates/sec"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dashboard_error_recovery(self, mock_services):
|
||||
"""Test dashboard error recovery"""
|
||||
# Simulate service failures
|
||||
mock_services['redis'].get.side_effect = Exception("Redis connection failed")
|
||||
mock_services['timescale'].query.side_effect = Exception("Database error")
|
||||
|
||||
# Dashboard should handle errors gracefully
|
||||
try:
|
||||
# Attempt operations that will fail
|
||||
mock_services['redis'].get("test_key")
|
||||
except Exception:
|
||||
# Should recover and continue
|
||||
pass
|
||||
|
||||
try:
|
||||
mock_services['timescale'].query("SELECT * FROM test")
|
||||
except Exception:
|
||||
# Should recover and continue
|
||||
pass
|
||||
|
||||
# Reset services to working state
|
||||
mock_services['redis'].get.side_effect = None
|
||||
mock_services['redis'].get.return_value = {'recovered': True}
|
||||
|
||||
# Should work again
|
||||
result = mock_services['redis'].get("test_key")
|
||||
assert result['recovered'] is True
|
||||
|
||||
|
||||
class TestDashboardUI:
|
||||
"""Test dashboard UI functionality (requires browser automation)"""
|
||||
|
||||
@pytest.mark.skipif(not pytest.config.getoption("--ui"),
|
||||
reason="UI tests require --ui flag and browser setup")
|
||||
def test_dashboard_loads(self):
|
||||
"""Test that dashboard loads in browser"""
|
||||
# This would require Selenium or similar
|
||||
# Placeholder for UI tests
|
||||
pass
|
||||
|
||||
@pytest.mark.skipif(not pytest.config.getoption("--ui"),
|
||||
reason="UI tests require --ui flag and browser setup")
|
||||
def test_real_time_chart_updates(self):
|
||||
"""Test that charts update in real-time"""
|
||||
# This would require browser automation
|
||||
# Placeholder for UI tests
|
||||
pass
|
||||
|
||||
@pytest.mark.skipif(not pytest.config.getoption("--ui"),
|
||||
reason="UI tests require --ui flag and browser setup")
|
||||
def test_responsive_design(self):
|
||||
"""Test responsive design on different screen sizes"""
|
||||
# This would require browser automation with different viewport sizes
|
||||
# Placeholder for UI tests
|
||||
pass
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
"""Configure pytest with custom markers"""
|
||||
config.addinivalue_line("markers", "e2e: mark test as end-to-end test")
|
||||
config.addinivalue_line("markers", "ui: mark test as UI test")
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
"""Add custom command line options"""
|
||||
parser.addoption(
|
||||
"--e2e",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="run end-to-end tests"
|
||||
)
|
||||
parser.addoption(
|
||||
"--ui",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="run UI tests (requires browser setup)"
|
||||
)
|
Reference in New Issue
Block a user