108 lines
3.6 KiB
Python
108 lines
3.6 KiB
Python
"""
|
|
Performance benchmarks and regression tests.
|
|
"""
|
|
|
|
import pytest
|
|
import time
|
|
import statistics
|
|
import json
|
|
import os
|
|
from datetime import datetime, timezone
|
|
from typing import Dict, List, Any, Tuple
|
|
from dataclasses import dataclass
|
|
from pathlib import Path
|
|
|
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
|
from ..processing.data_processor import DataProcessor
|
|
from ..aggregation.aggregation_engine import AggregationEngine
|
|
from ..connectors.binance_connector import BinanceConnector
|
|
from ..storage.timescale_manager import TimescaleManager
|
|
from ..caching.redis_manager import RedisManager
|
|
from ..utils.logging import get_logger
|
|
|
|
logger = get_logger(__name__)
|
|
|
|
|
|
@dataclass
|
|
class BenchmarkResult:
|
|
"""Benchmark result data structure"""
|
|
name: str
|
|
duration_ms: float
|
|
operations_per_second: float
|
|
memory_usage_mb: float
|
|
cpu_usage_percent: float
|
|
timestamp: datetime
|
|
metadata: Dict[str, Any] = None
|
|
|
|
|
|
class BenchmarkRunner:
|
|
"""Benchmark execution and result management"""
|
|
|
|
def __init__(self, results_file: str = "benchmark_results.json"):
|
|
self.results_file = Path(results_file)
|
|
self.results: List[BenchmarkResult] = []
|
|
self.load_historical_results()
|
|
|
|
def load_historical_results(self):
|
|
"""Load historical benchmark results"""
|
|
if self.results_file.exists():
|
|
try:
|
|
with open(self.results_file, 'r') as f:
|
|
data = json.load(f)
|
|
for item in data:
|
|
result = BenchmarkResult(
|
|
name=item['name'],
|
|
duration_ms=item['duration_ms'],
|
|
operations_per_second=item['operations_per_second'],
|
|
memory_usage_mb=item['memory_usage_mb'],
|
|
cpu_usage_percent=item['cpu_usage_percent'],
|
|
timestamp=datetime.fromisoformat(item['timestamp']),
|
|
metadata=item.get('metadata', {})
|
|
)
|
|
self.results.append(result)
|
|
except Exception as e:
|
|
logger.warning(f"Could not load historical results: {e}")
|
|
|
|
def save_results(self):
|
|
"""Save benchmark results to file"""
|
|
try:
|
|
data = []
|
|
for result in self.results:
|
|
data.append({
|
|
'name': result.name,
|
|
'duration_ms': result.duration_ms,
|
|
'operations_per_second': result.operations_per_second,
|
|
'memory_usage_mb': result.memory_usage_mb,
|
|
'cpu_usage_percent': result.cpu_usage_percent,
|
|
'timestamp': result.timestamp.isoformat(),
|
|
'metadata': result.metadata or {}
|
|
})
|
|
|
|
with open(self.results_file, 'w') as f:
|
|
json.dump(data, f, indent=2)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Could not save benchmark results: {e}")
|
|
|
|
def run_benchmark(self, name: str, func, iterations: int = 1000,
|
|
warmup: int = 100) -> BenchmarkResult:
|
|
"""Run a benchmark function"""
|
|
import psutil
|
|
|
|
process = psutil.Process()
|
|
|
|
# Warmup
|
|
for _ in range(warmup):
|
|
func()
|
|
|
|
# Collect baseline metrics
|
|
initial_memory = process.memory_info().rss / 1024 / 1024
|
|
initial_cpu = process.cpu_percent()
|
|
|
|
# Run benchmark
|
|
start_time = time.perf_counter()
|
|
|
|
for _ in range(iterations):
|
|
func()
|
|
|
|
end_time = |