Merge commit 'd49a473ed6f4aef55bfdd47d6370e53582be6b7b' into cleanup
This commit is contained in:
@@ -1,6 +1,7 @@
|
|||||||
# Aider configuration file
|
# Aider configuration file
|
||||||
# For more information, see: https://aider.chat/docs/config/aider_conf.html
|
# For more information, see: https://aider.chat/docs/config/aider_conf.html
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
# To use the custom OpenAI-compatible endpoint from hyperbolic.xyz
|
# To use the custom OpenAI-compatible endpoint from hyperbolic.xyz
|
||||||
# Set the model and the API base URL.
|
# Set the model and the API base URL.
|
||||||
# model: Qwen/Qwen3-Coder-480B-A35B-Instruct
|
# model: Qwen/Qwen3-Coder-480B-A35B-Instruct
|
||||||
@@ -17,3 +18,27 @@ model-metadata-file: .aider.model.metadata.json
|
|||||||
# run the following command in PowerShell and then RESTART YOUR SHELL:
|
# run the following command in PowerShell and then RESTART YOUR SHELL:
|
||||||
#
|
#
|
||||||
# setx OPENAI_API_KEY "your-api-key-from-the-curl-command"
|
# setx OPENAI_API_KEY "your-api-key-from-the-curl-command"
|
||||||
|
=======
|
||||||
|
# Configure for Hyperbolic API (OpenAI-compatible endpoint)
|
||||||
|
# hyperbolic
|
||||||
|
model: openai/Qwen/Qwen3-Coder-480B-A35B-Instruct
|
||||||
|
openai-api-base: https://api.hyperbolic.xyz/v1
|
||||||
|
openai-api-key: "eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJkb2Jyb21pci5wb3BvdkB5YWhvby5jb20iLCJpYXQiOjE3NTMyMzE0MjZ9.fCbv2pUmDO9xxjVqfSKru4yz1vtrNvuGIXHibWZWInE"
|
||||||
|
|
||||||
|
# setx OPENAI_API_BASE https://api.hyperbolic.xyz/v1
|
||||||
|
# setx OPENAI_API_KEY eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJkb2Jyb21pci5wb3BvdkB5YWhvby5jb20iLCJpYXQiOjE3NTMyMzE0MjZ9.fCbv2pUmDO9xxjVqfSKru4yz1vtrNvuGIXHibWZWInE
|
||||||
|
|
||||||
|
# Environment variables for litellm to recognize Hyperbolic provider
|
||||||
|
set-env:
|
||||||
|
#setx HYPERBOLIC_API_KEY eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJkb2Jyb21pci5wb3BvdkB5YWhvby5jb20iLCJpYXQiOjE3NTMyMzE0MjZ9.fCbv2pUmDO9xxjVqfSKru4yz1vtrNvuGIXHibWZWInE
|
||||||
|
- HYPERBOLIC_API_KEY=eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJkb2Jyb21pci5wb3BvdkB5YWhvby5jb20iLCJpYXQiOjE3NTMyMzE0MjZ9.fCbv2pUmDO9xxjVqfSKru4yz1vtrNvuGIXHibWZWInE
|
||||||
|
# - HYPERBOLIC_API_BASE=https://api.hyperbolic.xyz/v1
|
||||||
|
|
||||||
|
# Set encoding to UTF-8 (default)
|
||||||
|
encoding: utf-8
|
||||||
|
|
||||||
|
gitignore: false
|
||||||
|
# The metadata file is still needed to inform aider about the
|
||||||
|
# context window and costs for this custom model.
|
||||||
|
model-metadata-file: .aider.model.metadata.json
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
{
|
{
|
||||||
|
<<<<<<< HEAD
|
||||||
"Qwen/Qwen3-Coder-480B-A35B-Instruct": {
|
"Qwen/Qwen3-Coder-480B-A35B-Instruct": {
|
||||||
"context_window": 262144,
|
"context_window": 262144,
|
||||||
"input_cost_per_token": 0.000002,
|
"input_cost_per_token": 0.000002,
|
||||||
@@ -8,5 +9,11 @@
|
|||||||
"context_window": 106858,
|
"context_window": 106858,
|
||||||
"input_cost_per_token": 0.00000015,
|
"input_cost_per_token": 0.00000015,
|
||||||
"output_cost_per_token": 0.00000075
|
"output_cost_per_token": 0.00000075
|
||||||
|
=======
|
||||||
|
"hyperbolic/Qwen/Qwen3-Coder-480B-A35B-Instruct": {
|
||||||
|
"context_window": 262144,
|
||||||
|
"input_cost_per_token": 0.000002,
|
||||||
|
"output_cost_per_token": 0.000002
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
32
.ckpt_count.py
Normal file
32
.ckpt_count.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
import sys, json, os, traceback
|
||||||
|
sys.path.insert(0, r'F:\projects\gogo2')
|
||||||
|
res={}
|
||||||
|
try:
|
||||||
|
from utils.database_manager import get_database_manager
|
||||||
|
db=get_database_manager()
|
||||||
|
def db_count(name):
|
||||||
|
try:
|
||||||
|
lst = db.list_checkpoints(name)
|
||||||
|
return len(lst) if lst is not None else 0
|
||||||
|
except Exception as e:
|
||||||
|
print("DB error for %s: %s" % (name, str(e)))
|
||||||
|
return -1
|
||||||
|
res.setdefault('db', {})['dqn_agent']=db_count('dqn_agent')
|
||||||
|
res['db']['enhanced_cnn']=db_count('enhanced_cnn')
|
||||||
|
except Exception as e:
|
||||||
|
res['db']={'error': str(e)}
|
||||||
|
try:
|
||||||
|
from utils.checkpoint_manager import get_checkpoint_manager
|
||||||
|
cm=get_checkpoint_manager()
|
||||||
|
def fs_count(name):
|
||||||
|
try:
|
||||||
|
lst = cm.get_all_checkpoints(name)
|
||||||
|
return len(lst) if lst is not None else 0
|
||||||
|
except Exception as e:
|
||||||
|
print("FS error for %s: %s" % (name, str(e)))
|
||||||
|
return -1
|
||||||
|
res.setdefault('fs', {})['dqn_agent']=fs_count('dqn_agent')
|
||||||
|
res['fs']['enhanced_cnn']=fs_count('enhanced_cnn')
|
||||||
|
except Exception as e:
|
||||||
|
res['fs']={'error': str(e)}
|
||||||
|
print(json.dumps(res))
|
||||||
5
.cursor/rules/specs.mdc
Normal file
5
.cursor/rules/specs.mdc
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
description: use .kiro\specs cnotent as project guideline and specifications. they may change as project develops, but will give you a good starting point and broad understanding of the project we are working on. Also, when you find problems proceed to fixing them without asking. We are discovering problems so we fix them :)
|
||||||
|
globs:
|
||||||
|
alwaysApply: false
|
||||||
|
---
|
||||||
4
.env
4
.env
@@ -3,6 +3,10 @@
|
|||||||
# MEXC API Configuration (Spot Trading)
|
# MEXC API Configuration (Spot Trading)
|
||||||
MEXC_API_KEY=mx0vglhVPZeIJ32Qw1
|
MEXC_API_KEY=mx0vglhVPZeIJ32Qw1
|
||||||
MEXC_SECRET_KEY=3bfe4bd99d5541e4a1bca87ab257cc7e
|
MEXC_SECRET_KEY=3bfe4bd99d5541e4a1bca87ab257cc7e
|
||||||
|
DERBIT_API_CLIENTID=me1yf6K0
|
||||||
|
DERBIT_API_SECRET=PxdvEHmJ59FrguNVIt45-iUBj3lPXbmlA7OQUeINE9s
|
||||||
|
BYBIT_API_KEY=GQ50IkgZKkR3ljlbPx
|
||||||
|
BYBIT_API_SECRET=0GWpva5lYrhzsUqZCidQpO5TxYwaEmdiEDyc
|
||||||
#3bfe4bd99d5541e4a1bca87ab257cc7e 45d0b3c26f2644f19bfb98b07741b2f5
|
#3bfe4bd99d5541e4a1bca87ab257cc7e 45d0b3c26f2644f19bfb98b07741b2f5
|
||||||
|
|
||||||
# BASE ENDPOINTS: https://api.mexc.com wss://wbs-api.mexc.com/ws !!! DO NOT CHANGE THIS
|
# BASE ENDPOINTS: https://api.mexc.com wss://wbs-api.mexc.com/ws !!! DO NOT CHANGE THIS
|
||||||
|
|||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -16,7 +16,7 @@ models/trading_agent_final.pt.backup
|
|||||||
*.pt
|
*.pt
|
||||||
*.backup
|
*.backup
|
||||||
logs/
|
logs/
|
||||||
trade_logs/
|
# trade_logs/
|
||||||
*.csv
|
*.csv
|
||||||
cache/
|
cache/
|
||||||
realtime_chart.log
|
realtime_chart.log
|
||||||
@@ -46,6 +46,7 @@ chrome_user_data/*
|
|||||||
!.aider.model.metadata.json
|
!.aider.model.metadata.json
|
||||||
|
|
||||||
.env
|
.env
|
||||||
|
<<<<<<< HEAD
|
||||||
venv/*
|
venv/*
|
||||||
|
|
||||||
wandb/
|
wandb/
|
||||||
@@ -58,3 +59,9 @@ mcp_servers/*
|
|||||||
data/prediction_snapshots/*
|
data/prediction_snapshots/*
|
||||||
reports/backtest_*
|
reports/backtest_*
|
||||||
data/prediction_snapshots/snapshots.db
|
data/prediction_snapshots/snapshots.db
|
||||||
|
=======
|
||||||
|
.env
|
||||||
|
training_data/*
|
||||||
|
data/trading_system.db
|
||||||
|
/data/trading_system.db
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
|
|||||||
448
.kiro/specs/multi-exchange-data-aggregation/design.md
Normal file
448
.kiro/specs/multi-exchange-data-aggregation/design.md
Normal file
@@ -0,0 +1,448 @@
|
|||||||
|
# Design Document
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Multi-Exchange Data Aggregation System is a comprehensive data collection and processing subsystem designed to serve as the foundational data layer for the trading orchestrator. The system will collect real-time order book and OHLCV data from the top 10 cryptocurrency exchanges, aggregate it into standardized formats, store it in a TimescaleDB time-series database, and provide both live data feeds and historical replay capabilities.
|
||||||
|
|
||||||
|
The system follows a microservices architecture with containerized components, ensuring scalability, maintainability, and seamless integration with the existing trading infrastructure.
|
||||||
|
|
||||||
|
We implement it in the `.\COBY` subfolder for easy integration with the existing system
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### High-Level Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
subgraph "Exchange Connectors"
|
||||||
|
E1[Binance WebSocket]
|
||||||
|
E2[Coinbase WebSocket]
|
||||||
|
E3[Kraken WebSocket]
|
||||||
|
E4[Bybit WebSocket]
|
||||||
|
E5[OKX WebSocket]
|
||||||
|
E6[Huobi WebSocket]
|
||||||
|
E7[KuCoin WebSocket]
|
||||||
|
E8[Gate.io WebSocket]
|
||||||
|
E9[Bitfinex WebSocket]
|
||||||
|
E10[MEXC WebSocket]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Data Processing Layer"
|
||||||
|
DP[Data Processor]
|
||||||
|
AGG[Aggregation Engine]
|
||||||
|
NORM[Data Normalizer]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Storage Layer"
|
||||||
|
TSDB[(TimescaleDB)]
|
||||||
|
CACHE[Redis Cache]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "API Layer"
|
||||||
|
LIVE[Live Data API]
|
||||||
|
REPLAY[Replay API]
|
||||||
|
WEB[Web Dashboard]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Integration Layer"
|
||||||
|
ORCH[Orchestrator Interface]
|
||||||
|
ADAPTER[Data Adapter]
|
||||||
|
end
|
||||||
|
|
||||||
|
E1 --> DP
|
||||||
|
E2 --> DP
|
||||||
|
E3 --> DP
|
||||||
|
E4 --> DP
|
||||||
|
E5 --> DP
|
||||||
|
E6 --> DP
|
||||||
|
E7 --> DP
|
||||||
|
E8 --> DP
|
||||||
|
E9 --> DP
|
||||||
|
E10 --> DP
|
||||||
|
|
||||||
|
DP --> NORM
|
||||||
|
NORM --> AGG
|
||||||
|
AGG --> TSDB
|
||||||
|
AGG --> CACHE
|
||||||
|
|
||||||
|
CACHE --> LIVE
|
||||||
|
TSDB --> REPLAY
|
||||||
|
LIVE --> WEB
|
||||||
|
REPLAY --> WEB
|
||||||
|
|
||||||
|
LIVE --> ADAPTER
|
||||||
|
REPLAY --> ADAPTER
|
||||||
|
ADAPTER --> ORCH
|
||||||
|
```
|
||||||
|
|
||||||
|
### Component Architecture
|
||||||
|
|
||||||
|
The system is organized into several key components:
|
||||||
|
|
||||||
|
1. **Exchange Connectors**: WebSocket clients for each exchange
|
||||||
|
2. **Data Processing Engine**: Normalizes and validates incoming data
|
||||||
|
3. **Aggregation Engine**: Creates price buckets and heatmaps
|
||||||
|
4. **Storage Layer**: TimescaleDB for persistence, Redis for caching
|
||||||
|
5. **API Layer**: REST and WebSocket APIs for data access
|
||||||
|
6. **Web Dashboard**: Real-time visualization interface
|
||||||
|
7. **Integration Layer**: Orchestrator-compatible interface
|
||||||
|
|
||||||
|
## Components and Interfaces
|
||||||
|
|
||||||
|
### Exchange Connector Interface
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExchangeConnector:
|
||||||
|
"""Base interface for exchange WebSocket connectors"""
|
||||||
|
|
||||||
|
async def connect(self) -> bool
|
||||||
|
async def disconnect(self) -> None
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None
|
||||||
|
def get_connection_status(self) -> ConnectionStatus
|
||||||
|
def add_data_callback(self, callback: Callable) -> None
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data Processing Interface
|
||||||
|
|
||||||
|
```python
|
||||||
|
class DataProcessor:
|
||||||
|
"""Processes and normalizes raw exchange data"""
|
||||||
|
|
||||||
|
def normalize_orderbook(self, raw_data: Dict, exchange: str) -> OrderBookSnapshot
|
||||||
|
def normalize_trade(self, raw_data: Dict, exchange: str) -> TradeEvent
|
||||||
|
def validate_data(self, data: Union[OrderBookSnapshot, TradeEvent]) -> bool
|
||||||
|
def calculate_metrics(self, orderbook: OrderBookSnapshot) -> OrderBookMetrics
|
||||||
|
```
|
||||||
|
|
||||||
|
### Aggregation Engine Interface
|
||||||
|
|
||||||
|
```python
|
||||||
|
class AggregationEngine:
|
||||||
|
"""Aggregates data into price buckets and heatmaps"""
|
||||||
|
|
||||||
|
def create_price_buckets(self, orderbook: OrderBookSnapshot, bucket_size: float) -> PriceBuckets
|
||||||
|
def update_heatmap(self, symbol: str, buckets: PriceBuckets) -> HeatmapData
|
||||||
|
def calculate_imbalances(self, orderbook: OrderBookSnapshot) -> ImbalanceMetrics
|
||||||
|
def aggregate_across_exchanges(self, symbol: str) -> ConsolidatedOrderBook
|
||||||
|
```
|
||||||
|
|
||||||
|
### Storage Interface
|
||||||
|
|
||||||
|
```python
|
||||||
|
class StorageManager:
|
||||||
|
"""Manages data persistence and retrieval"""
|
||||||
|
|
||||||
|
async def store_orderbook(self, data: OrderBookSnapshot) -> bool
|
||||||
|
async def store_trade(self, data: TradeEvent) -> bool
|
||||||
|
async def get_historical_data(self, symbol: str, start: datetime, end: datetime) -> List[Dict]
|
||||||
|
async def get_latest_data(self, symbol: str) -> Dict
|
||||||
|
def setup_database_schema(self) -> None
|
||||||
|
```
|
||||||
|
|
||||||
|
### Replay Interface
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ReplayManager:
|
||||||
|
"""Provides historical data replay functionality"""
|
||||||
|
|
||||||
|
def create_replay_session(self, start_time: datetime, end_time: datetime, speed: float) -> str
|
||||||
|
async def start_replay(self, session_id: str) -> None
|
||||||
|
async def pause_replay(self, session_id: str) -> None
|
||||||
|
async def stop_replay(self, session_id: str) -> None
|
||||||
|
def get_replay_status(self, session_id: str) -> ReplayStatus
|
||||||
|
```
|
||||||
|
|
||||||
|
## Data Models
|
||||||
|
|
||||||
|
### Core Data Structures
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class OrderBookSnapshot:
|
||||||
|
"""Standardized order book snapshot"""
|
||||||
|
symbol: str
|
||||||
|
exchange: str
|
||||||
|
timestamp: datetime
|
||||||
|
bids: List[PriceLevel]
|
||||||
|
asks: List[PriceLevel]
|
||||||
|
sequence_id: Optional[int] = None
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PriceLevel:
|
||||||
|
"""Individual price level in order book"""
|
||||||
|
price: float
|
||||||
|
size: float
|
||||||
|
count: Optional[int] = None
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TradeEvent:
|
||||||
|
"""Standardized trade event"""
|
||||||
|
symbol: str
|
||||||
|
exchange: str
|
||||||
|
timestamp: datetime
|
||||||
|
price: float
|
||||||
|
size: float
|
||||||
|
side: str # 'buy' or 'sell'
|
||||||
|
trade_id: str
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PriceBuckets:
|
||||||
|
"""Aggregated price buckets for heatmap"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
bucket_size: float
|
||||||
|
bid_buckets: Dict[float, float] # price -> volume
|
||||||
|
ask_buckets: Dict[float, float] # price -> volume
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HeatmapData:
|
||||||
|
"""Heatmap visualization data"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
bucket_size: float
|
||||||
|
data: List[HeatmapPoint]
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HeatmapPoint:
|
||||||
|
"""Individual heatmap data point"""
|
||||||
|
price: float
|
||||||
|
volume: float
|
||||||
|
intensity: float # 0.0 to 1.0
|
||||||
|
side: str # 'bid' or 'ask'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database Schema
|
||||||
|
|
||||||
|
#### TimescaleDB Tables
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Order book snapshots table
|
||||||
|
CREATE TABLE order_book_snapshots (
|
||||||
|
id BIGSERIAL,
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
exchange VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
bids JSONB NOT NULL,
|
||||||
|
asks JSONB NOT NULL,
|
||||||
|
sequence_id BIGINT,
|
||||||
|
mid_price DECIMAL(20,8),
|
||||||
|
spread DECIMAL(20,8),
|
||||||
|
bid_volume DECIMAL(30,8),
|
||||||
|
ask_volume DECIMAL(30,8),
|
||||||
|
PRIMARY KEY (timestamp, symbol, exchange)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('order_book_snapshots', 'timestamp');
|
||||||
|
|
||||||
|
-- Trade events table
|
||||||
|
CREATE TABLE trade_events (
|
||||||
|
id BIGSERIAL,
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
exchange VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
price DECIMAL(20,8) NOT NULL,
|
||||||
|
size DECIMAL(30,8) NOT NULL,
|
||||||
|
side VARCHAR(4) NOT NULL,
|
||||||
|
trade_id VARCHAR(100) NOT NULL,
|
||||||
|
PRIMARY KEY (timestamp, symbol, exchange, trade_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('trade_events', 'timestamp');
|
||||||
|
|
||||||
|
-- Aggregated heatmap data table
|
||||||
|
CREATE TABLE heatmap_data (
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
bucket_size DECIMAL(10,2) NOT NULL,
|
||||||
|
price_bucket DECIMAL(20,8) NOT NULL,
|
||||||
|
volume DECIMAL(30,8) NOT NULL,
|
||||||
|
side VARCHAR(3) NOT NULL,
|
||||||
|
exchange_count INTEGER NOT NULL,
|
||||||
|
PRIMARY KEY (timestamp, symbol, bucket_size, price_bucket, side)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('heatmap_data', 'timestamp');
|
||||||
|
|
||||||
|
-- OHLCV data table
|
||||||
|
CREATE TABLE ohlcv_data (
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
timeframe VARCHAR(10) NOT NULL,
|
||||||
|
open_price DECIMAL(20,8) NOT NULL,
|
||||||
|
high_price DECIMAL(20,8) NOT NULL,
|
||||||
|
low_price DECIMAL(20,8) NOT NULL,
|
||||||
|
close_price DECIMAL(20,8) NOT NULL,
|
||||||
|
volume DECIMAL(30,8) NOT NULL,
|
||||||
|
trade_count INTEGER,
|
||||||
|
PRIMARY KEY (timestamp, symbol, timeframe)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('ohlcv_data', 'timestamp');
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Connection Management
|
||||||
|
|
||||||
|
The system implements robust error handling for exchange connections:
|
||||||
|
|
||||||
|
1. **Exponential Backoff**: Failed connections retry with increasing delays
|
||||||
|
2. **Circuit Breaker**: Temporarily disable problematic exchanges
|
||||||
|
3. **Graceful Degradation**: Continue operation with available exchanges
|
||||||
|
4. **Health Monitoring**: Continuous monitoring of connection status
|
||||||
|
|
||||||
|
### Data Validation
|
||||||
|
|
||||||
|
All incoming data undergoes validation:
|
||||||
|
|
||||||
|
1. **Schema Validation**: Ensure data structure compliance
|
||||||
|
2. **Range Validation**: Check price and volume ranges
|
||||||
|
3. **Timestamp Validation**: Verify temporal consistency
|
||||||
|
4. **Duplicate Detection**: Prevent duplicate data storage
|
||||||
|
|
||||||
|
### Database Resilience
|
||||||
|
|
||||||
|
Database operations include comprehensive error handling:
|
||||||
|
|
||||||
|
1. **Connection Pooling**: Maintain multiple database connections
|
||||||
|
2. **Transaction Management**: Ensure data consistency
|
||||||
|
3. **Retry Logic**: Automatic retry for transient failures
|
||||||
|
4. **Backup Strategies**: Regular data backups and recovery procedures
|
||||||
|
|
||||||
|
## Testing Strategy
|
||||||
|
|
||||||
|
### Unit Testing
|
||||||
|
|
||||||
|
Each component will have comprehensive unit tests:
|
||||||
|
|
||||||
|
1. **Exchange Connectors**: Mock WebSocket responses
|
||||||
|
2. **Data Processing**: Test normalization and validation
|
||||||
|
3. **Aggregation Engine**: Verify bucket calculations
|
||||||
|
4. **Storage Layer**: Test database operations
|
||||||
|
5. **API Layer**: Test endpoint responses
|
||||||
|
|
||||||
|
### Integration Testing
|
||||||
|
|
||||||
|
End-to-end testing scenarios:
|
||||||
|
|
||||||
|
1. **Multi-Exchange Data Flow**: Test complete data pipeline
|
||||||
|
2. **Database Integration**: Verify TimescaleDB operations
|
||||||
|
3. **API Integration**: Test orchestrator interface compatibility
|
||||||
|
4. **Performance Testing**: Load testing with high-frequency data
|
||||||
|
|
||||||
|
### Performance Testing
|
||||||
|
|
||||||
|
Performance benchmarks and testing:
|
||||||
|
|
||||||
|
1. **Throughput Testing**: Measure data processing capacity
|
||||||
|
2. **Latency Testing**: Measure end-to-end data latency
|
||||||
|
3. **Memory Usage**: Monitor memory consumption patterns
|
||||||
|
4. **Database Performance**: Query performance optimization
|
||||||
|
|
||||||
|
### Monitoring and Observability
|
||||||
|
|
||||||
|
Comprehensive monitoring system:
|
||||||
|
|
||||||
|
1. **Metrics Collection**: Prometheus-compatible metrics
|
||||||
|
2. **Logging**: Structured logging with correlation IDs
|
||||||
|
3. **Alerting**: Real-time alerts for system issues
|
||||||
|
4. **Dashboards**: Grafana dashboards for system monitoring
|
||||||
|
|
||||||
|
## Deployment Architecture
|
||||||
|
|
||||||
|
### Docker Containerization
|
||||||
|
|
||||||
|
The system will be deployed using Docker containers:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# docker-compose.yml
|
||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
timescaledb:
|
||||||
|
image: timescale/timescaledb:latest-pg14
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: market_data
|
||||||
|
POSTGRES_USER: market_user
|
||||||
|
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||||
|
volumes:
|
||||||
|
- timescale_data:/var/lib/postgresql/data
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
|
||||||
|
data-aggregator:
|
||||||
|
build: ./data-aggregator
|
||||||
|
environment:
|
||||||
|
- DB_HOST=timescaledb
|
||||||
|
- REDIS_HOST=redis
|
||||||
|
- LOG_LEVEL=INFO
|
||||||
|
depends_on:
|
||||||
|
- timescaledb
|
||||||
|
- redis
|
||||||
|
|
||||||
|
web-dashboard:
|
||||||
|
build: ./web-dashboard
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
environment:
|
||||||
|
- API_HOST=data-aggregator
|
||||||
|
depends_on:
|
||||||
|
- data-aggregator
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
timescale_data:
|
||||||
|
redis_data:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Management
|
||||||
|
|
||||||
|
Environment-based configuration:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# config.py
|
||||||
|
@dataclass
|
||||||
|
class Config:
|
||||||
|
# Database settings
|
||||||
|
db_host: str = os.getenv('DB_HOST', 'localhost')
|
||||||
|
db_port: int = int(os.getenv('DB_PORT', '5432'))
|
||||||
|
db_name: str = os.getenv('DB_NAME', 'market_data')
|
||||||
|
db_user: str = os.getenv('DB_USER', 'market_user')
|
||||||
|
db_password: str = os.getenv('DB_PASSWORD', '')
|
||||||
|
|
||||||
|
# Redis settings
|
||||||
|
redis_host: str = os.getenv('REDIS_HOST', 'localhost')
|
||||||
|
redis_port: int = int(os.getenv('REDIS_PORT', '6379'))
|
||||||
|
|
||||||
|
# Exchange settings
|
||||||
|
exchanges: List[str] = field(default_factory=lambda: [
|
||||||
|
'binance', 'coinbase', 'kraken', 'bybit', 'okx',
|
||||||
|
'huobi', 'kucoin', 'gateio', 'bitfinex', 'mexc'
|
||||||
|
])
|
||||||
|
|
||||||
|
# Aggregation settings
|
||||||
|
btc_bucket_size: float = 10.0 # $10 USD buckets for BTC
|
||||||
|
eth_bucket_size: float = 1.0 # $1 USD buckets for ETH
|
||||||
|
|
||||||
|
# Performance settings
|
||||||
|
max_connections_per_exchange: int = 5
|
||||||
|
data_buffer_size: int = 10000
|
||||||
|
batch_write_size: int = 1000
|
||||||
|
|
||||||
|
# API settings
|
||||||
|
api_host: str = os.getenv('API_HOST', '0.0.0.0')
|
||||||
|
api_port: int = int(os.getenv('API_PORT', '8080'))
|
||||||
|
websocket_port: int = int(os.getenv('WS_PORT', '8081'))
|
||||||
|
```
|
||||||
|
|
||||||
|
This design provides a robust, scalable foundation for multi-exchange data aggregation that seamlessly integrates with the existing trading orchestrator while providing the flexibility for future enhancements and additional exchange integrations.
|
||||||
103
.kiro/specs/multi-exchange-data-aggregation/requirements.md
Normal file
103
.kiro/specs/multi-exchange-data-aggregation/requirements.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# Requirements Document
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document outlines the requirements for a comprehensive data collection and aggregation subsystem that will serve as a foundational component for the trading orchestrator. The system will collect, aggregate, and store real-time order book and OHLCV data from multiple cryptocurrency exchanges, providing both live data feeds and historical replay capabilities for model training and backtesting.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
### Requirement 1
|
||||||
|
|
||||||
|
**User Story:** As a trading system developer, I want to collect real-time order book data from top 10 cryptocurrency exchanges, so that I can have comprehensive market data for analysis and trading decisions.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the system starts THEN it SHALL establish WebSocket connections to up to 10 major cryptocurrency exchanges
|
||||||
|
2. WHEN order book updates are received THEN the system SHALL process and store raw order book events in real-time
|
||||||
|
3. WHEN processing order book data THEN the system SHALL handle connection failures gracefully and automatically reconnect
|
||||||
|
4. WHEN multiple exchanges provide data THEN the system SHALL normalize data formats to a consistent structure
|
||||||
|
5. IF an exchange connection fails THEN the system SHALL log the failure and attempt reconnection with exponential backoff
|
||||||
|
|
||||||
|
### Requirement 2
|
||||||
|
|
||||||
|
**User Story:** As a trading analyst, I want order book data aggregated into price buckets with heatmap visualization, so that I can quickly identify market depth and liquidity patterns.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN processing BTC order book data THEN the system SHALL aggregate orders into $10 USD price range buckets
|
||||||
|
2. WHEN processing ETH order book data THEN the system SHALL aggregate orders into $1 USD price range buckets
|
||||||
|
3. WHEN aggregating order data THEN the system SHALL maintain separate bid and ask heatmaps
|
||||||
|
4. WHEN building heatmaps THEN the system SHALL update distribution data at high frequency (sub-second)
|
||||||
|
5. WHEN displaying heatmaps THEN the system SHALL show volume intensity using color gradients or progress bars
|
||||||
|
|
||||||
|
### Requirement 3
|
||||||
|
|
||||||
|
**User Story:** As a system architect, I want all market data stored in a TimescaleDB database, so that I can efficiently query time-series data and maintain historical records.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the system initializes THEN it SHALL connect to a TimescaleDB instance running in a Docker container
|
||||||
|
2. WHEN storing order book events THEN the system SHALL use TimescaleDB's time-series optimized storage
|
||||||
|
3. WHEN storing OHLCV data THEN the system SHALL create appropriate time-series tables with proper indexing
|
||||||
|
4. WHEN writing to database THEN the system SHALL batch writes for optimal performance
|
||||||
|
5. IF database connection fails THEN the system SHALL queue data in memory and retry with backoff strategy
|
||||||
|
|
||||||
|
### Requirement 4
|
||||||
|
|
||||||
|
**User Story:** As a trading system operator, I want a web-based dashboard to monitor real-time order book heatmaps, so that I can visualize market conditions across multiple exchanges.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN accessing the web dashboard THEN it SHALL display real-time order book heatmaps for BTC and ETH
|
||||||
|
2. WHEN viewing heatmaps THEN the dashboard SHALL show aggregated data from all connected exchanges
|
||||||
|
3. WHEN displaying progress bars THEN they SHALL always show aggregated values across price buckets
|
||||||
|
4. WHEN updating the display THEN the dashboard SHALL refresh data at least once per second
|
||||||
|
5. WHEN an exchange goes offline THEN the dashboard SHALL indicate the status change visually
|
||||||
|
|
||||||
|
### Requirement 5
|
||||||
|
|
||||||
|
**User Story:** As a model trainer, I want a replay interface that can provide historical data in the same format as live data, so that I can train models on past market events.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN requesting historical data THEN the replay interface SHALL provide data in the same structure as live feeds
|
||||||
|
2. WHEN replaying data THEN the system SHALL maintain original timing relationships between events
|
||||||
|
3. WHEN using replay mode THEN the interface SHALL support configurable playback speeds
|
||||||
|
4. WHEN switching between live and replay modes THEN the orchestrator SHALL receive data through the same interface
|
||||||
|
5. IF replay data is requested for unavailable time periods THEN the system SHALL return appropriate error messages
|
||||||
|
|
||||||
|
### Requirement 6
|
||||||
|
|
||||||
|
**User Story:** As a trading system integrator, I want the data aggregation system to follow the same interface as the current orchestrator data provider, so that I can seamlessly integrate it into existing workflows.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the orchestrator requests data THEN the aggregation system SHALL provide data in the expected format
|
||||||
|
2. WHEN integrating with existing systems THEN the interface SHALL be compatible with current data provider contracts
|
||||||
|
3. WHEN providing aggregated data THEN the system SHALL include metadata about data sources and quality
|
||||||
|
4. WHEN the orchestrator switches data sources THEN it SHALL work without code changes
|
||||||
|
5. IF data quality issues are detected THEN the system SHALL provide quality indicators in the response
|
||||||
|
|
||||||
|
### Requirement 7
|
||||||
|
|
||||||
|
**User Story:** As a system administrator, I want the data collection system to be containerized and easily deployable, so that I can manage it alongside other system components.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN deploying the system THEN it SHALL run in Docker containers with proper resource allocation
|
||||||
|
2. WHEN starting services THEN TimescaleDB SHALL be automatically provisioned in its own container
|
||||||
|
3. WHEN configuring the system THEN all settings SHALL be externalized through environment variables or config files
|
||||||
|
4. WHEN monitoring the system THEN it SHALL provide health check endpoints for container orchestration
|
||||||
|
5. IF containers need to be restarted THEN the system SHALL recover gracefully without data loss
|
||||||
|
|
||||||
|
### Requirement 8
|
||||||
|
|
||||||
|
**User Story:** As a performance engineer, I want the system to handle high-frequency data efficiently, so that it can process order book updates from multiple exchanges without latency issues.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN processing order book updates THEN the system SHALL handle at least 10 updates per second per exchange
|
||||||
|
2. WHEN aggregating data THEN processing latency SHALL be less than 10 milliseconds per update
|
||||||
|
3. WHEN storing data THEN the system SHALL use efficient batching to minimize database overhead
|
||||||
|
4. WHEN memory usage grows THEN the system SHALL implement appropriate cleanup and garbage collection
|
||||||
|
5. IF processing falls behind THEN the system SHALL prioritize recent data and log performance warnings
|
||||||
230
.kiro/specs/multi-exchange-data-aggregation/tasks.md
Normal file
230
.kiro/specs/multi-exchange-data-aggregation/tasks.md
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
# Implementation Plan
|
||||||
|
|
||||||
|
- [x] 1. Set up project structure and core interfaces
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Create directory structure in `.\COBY` subfolder for the multi-exchange data aggregation system
|
||||||
|
- Define base interfaces and data models for exchange connectors, data processing, and storage
|
||||||
|
- Implement configuration management system with environment variable support
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- _Requirements: 1.1, 6.1, 7.3_
|
||||||
|
|
||||||
|
|
||||||
|
- [x] 2. Implement TimescaleDB integration and database schema
|
||||||
|
|
||||||
|
- Create TimescaleDB connection manager with connection pooling
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Implement database schema creation with hypertables for time-series optimization
|
||||||
|
- Write database operations for storing order book snapshots and trade events
|
||||||
|
- Create database migration system for schema updates
|
||||||
|
- _Requirements: 3.1, 3.2, 3.3, 3.4_
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- [x] 3. Create base exchange connector framework
|
||||||
|
- Implement abstract base class for exchange WebSocket connectors
|
||||||
|
- Create connection management with exponential backoff and circuit breaker patterns
|
||||||
|
- Implement WebSocket message handling with proper error recovery
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Add connection status monitoring and health checks
|
||||||
|
- _Requirements: 1.1, 1.3, 1.4, 8.5_
|
||||||
|
|
||||||
|
|
||||||
|
- [x] 4. Implement Binance exchange connector
|
||||||
|
- Create Binance-specific WebSocket connector extending the base framework
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Implement order book depth stream subscription and processing
|
||||||
|
- Add trade stream subscription for volume analysis
|
||||||
|
- Implement data normalization from Binance format to standard format
|
||||||
|
- Write unit tests for Binance connector functionality
|
||||||
|
- _Requirements: 1.1, 1.2, 1.4, 6.2_
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- [x] 5. Create data processing and normalization engine
|
||||||
|
- Implement data processor for normalizing raw exchange data
|
||||||
|
- Create validation logic for order book and trade data
|
||||||
|
- Implement data quality checks and filtering
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Add metrics calculation for order book statistics
|
||||||
|
|
||||||
|
- Write comprehensive unit tests for data processing logic
|
||||||
|
- _Requirements: 1.4, 6.3, 8.1_
|
||||||
|
|
||||||
|
- [x] 6. Implement price bucket aggregation system
|
||||||
|
|
||||||
|
|
||||||
|
- Create aggregation engine for converting order book data to price buckets
|
||||||
|
- Implement configurable bucket sizes ($10 for BTC, $1 for ETH)
|
||||||
|
- Create heatmap data structure generation from price buckets
|
||||||
|
|
||||||
|
- Implement real-time aggregation with high-frequency updates
|
||||||
|
- Add volume-weighted aggregation calculations
|
||||||
|
- _Requirements: 2.1, 2.2, 2.3, 2.4, 8.1, 8.2_
|
||||||
|
|
||||||
|
- [x] 7. Build Redis caching layer
|
||||||
|
- Implement Redis connection manager with connection pooling
|
||||||
|
- Create caching strategies for latest order book data and heatmaps
|
||||||
|
|
||||||
|
- Implement cache invalidation and TTL management
|
||||||
|
- Add cache performance monitoring and metrics
|
||||||
|
- Write tests for caching functionality
|
||||||
|
- _Requirements: 8.2, 8.3_
|
||||||
|
|
||||||
|
- [x] 8. Create live data API endpoints
|
||||||
|
- Implement REST API for accessing current order book data
|
||||||
|
|
||||||
|
- Create WebSocket API for real-time data streaming
|
||||||
|
- Add endpoints for heatmap data retrieval
|
||||||
|
- Implement API rate limiting and authentication
|
||||||
|
- Create comprehensive API documentation
|
||||||
|
- _Requirements: 4.1, 4.2, 4.4, 6.3_
|
||||||
|
|
||||||
|
- [ ] 9. Implement web dashboard for visualization
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Create HTML/CSS/JavaScript dashboard for real-time heatmap visualization
|
||||||
|
- Implement WebSocket client for receiving real-time updates
|
||||||
|
- Create progress bar visualization for aggregated price buckets
|
||||||
|
- Add exchange status indicators and connection monitoring
|
||||||
|
- Implement responsive design for different screen sizes
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- _Requirements: 4.1, 4.2, 4.3, 4.5_
|
||||||
|
|
||||||
|
- [x] 10. Build historical data replay system
|
||||||
|
- Create replay manager for historical data playback
|
||||||
|
- Implement configurable playback speeds and time range selection
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Create replay session management with start/pause/stop controls
|
||||||
|
- Implement data streaming interface compatible with live data format
|
||||||
|
- Add replay status monitoring and progress tracking
|
||||||
|
- _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5_
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- [x] 11. Create orchestrator integration interface
|
||||||
|
- Implement data adapter that matches existing orchestrator interface
|
||||||
|
- Create compatibility layer for seamless integration with current data provider
|
||||||
|
- Add data quality indicators and metadata in responses
|
||||||
|
- Implement switching mechanism between live and replay modes
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Write integration tests with existing orchestrator code
|
||||||
|
- _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5_
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- [x] 12. Add additional exchange connectors (Coinbase, Kraken)
|
||||||
|
- Implement Coinbase Pro WebSocket connector with proper authentication
|
||||||
|
- Create Kraken WebSocket connector with their specific message format
|
||||||
|
|
||||||
|
- Add exchange-specific data normalization for both exchanges
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Implement proper error handling for each exchange's quirks
|
||||||
|
- Write unit tests for both new exchange connectors
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- _Requirements: 1.1, 1.2, 1.4_
|
||||||
|
|
||||||
|
- [x] 13. Implement remaining exchange connectors (Bybit, OKX, Huobi)
|
||||||
|
- Create Bybit WebSocket connector with unified trading account support
|
||||||
|
|
||||||
|
|
||||||
|
- Implement OKX connector with their V5 API WebSocket streams
|
||||||
|
- Add Huobi Global connector with proper symbol mapping
|
||||||
|
- Ensure all connectors follow the same interface and error handling patterns
|
||||||
|
- Write comprehensive tests for all three exchange connectors
|
||||||
|
- _Requirements: 1.1, 1.2, 1.4_
|
||||||
|
|
||||||
|
- [x] 14. Complete exchange connector suite (KuCoin, Gate.io, Bitfinex, MEXC)
|
||||||
|
- Implement KuCoin connector with proper token-based authentication
|
||||||
|
- Create Gate.io connector with their WebSocket v4 API
|
||||||
|
- Add Bitfinex connector with proper channel subscription management
|
||||||
|
- Implement MEXC connector with their WebSocket streams
|
||||||
|
- Ensure all 10 exchanges are properly integrated and tested
|
||||||
|
- _Requirements: 1.1, 1.2, 1.4_
|
||||||
|
|
||||||
|
- [ ] 15. Implement cross-exchange data consolidation
|
||||||
|
- Create consolidation engine that merges order book data from multiple exchanges
|
||||||
|
- Implement weighted aggregation based on exchange liquidity and reliability
|
||||||
|
- Add conflict resolution for price discrepancies between exchanges
|
||||||
|
- Create consolidated heatmap that shows combined market depth
|
||||||
|
- Write tests for multi-exchange aggregation scenarios
|
||||||
|
- _Requirements: 2.5, 4.2_
|
||||||
|
|
||||||
|
- [ ] 16. Add performance monitoring and optimization
|
||||||
|
- Implement comprehensive metrics collection for all system components
|
||||||
|
- Create performance monitoring dashboard with key system metrics
|
||||||
|
- Add latency tracking for end-to-end data processing
|
||||||
|
- Implement memory usage monitoring and garbage collection optimization
|
||||||
|
- Create alerting system for performance degradation
|
||||||
|
- _Requirements: 8.1, 8.2, 8.3, 8.4, 8.5_
|
||||||
|
|
||||||
|
- [ ] 17. Create Docker containerization and deployment
|
||||||
|
- Write Dockerfiles for all system components
|
||||||
|
- Create docker-compose configuration for local development
|
||||||
|
- Implement health check endpoints for container orchestration
|
||||||
|
- Add environment variable configuration for all services
|
||||||
|
- Create deployment scripts and documentation
|
||||||
|
- _Requirements: 7.1, 7.2, 7.3, 7.4, 7.5_
|
||||||
|
|
||||||
|
- [ ] 18. Implement comprehensive testing suite
|
||||||
|
- Create integration tests for complete data pipeline from exchanges to storage
|
||||||
|
- Implement load testing for high-frequency data scenarios
|
||||||
|
- Add end-to-end tests for web dashboard functionality
|
||||||
|
- Create performance benchmarks and regression tests
|
||||||
|
- Write documentation for running and maintaining tests
|
||||||
|
- _Requirements: 8.1, 8.2, 8.3, 8.4_
|
||||||
|
|
||||||
|
- [ ] 19. Add system monitoring and alerting
|
||||||
|
- Implement structured logging with correlation IDs across all components
|
||||||
|
- Create Prometheus metrics exporters for system monitoring
|
||||||
|
- Add Grafana dashboards for system visualization
|
||||||
|
- Implement alerting rules for system failures and performance issues
|
||||||
|
- Create runbook documentation for common operational scenarios
|
||||||
|
- _Requirements: 7.4, 8.5_
|
||||||
|
|
||||||
|
- [ ] 20. Final integration and system testing
|
||||||
|
- Integrate the complete system with existing trading orchestrator
|
||||||
|
- Perform end-to-end testing with real market data
|
||||||
|
- Validate replay functionality with historical data scenarios
|
||||||
|
- Test failover scenarios and system resilience
|
||||||
|
- Create user documentation and operational guides
|
||||||
|
- _Requirements: 6.1, 6.2, 6.4, 5.1, 5.2_
|
||||||
713
.kiro/specs/multi-modal-trading-system/design.md
Normal file
713
.kiro/specs/multi-modal-trading-system/design.md
Normal file
@@ -0,0 +1,713 @@
|
|||||||
|
# Multi-Modal Trading System Design Document
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Multi-Modal Trading System is designed as an advanced algorithmic trading platform that combines Convolutional Neural Networks (CNN) and Reinforcement Learning (RL) models orchestrated by a decision-making module. The system processes multi-timeframe and multi-symbol market data (primarily ETH and BTC) to generate trading actions.
|
||||||
|
|
||||||
|
This design document outlines the architecture, components, data flow, and implementation details for the system based on the requirements and existing codebase.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
The system follows a modular architecture with clear separation of concerns:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Data Provider] --> B[Data Processor] (calculates pivot points)
|
||||||
|
B --> C[CNN Model]
|
||||||
|
B --> D[RL(DQN) Model]
|
||||||
|
C --> E[Orchestrator]
|
||||||
|
D --> E
|
||||||
|
E --> F[Trading Executor]
|
||||||
|
E --> G[Dashboard]
|
||||||
|
F --> G
|
||||||
|
H[Risk Manager] --> F
|
||||||
|
H --> G
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Components
|
||||||
|
|
||||||
|
1. **Data Provider**: Centralized component responsible for collecting, processing, and distributing market data from multiple sources.
|
||||||
|
2. **Data Processor**: Processes raw market data, calculates technical indicators, and identifies pivot points.
|
||||||
|
3. **CNN Model**: Analyzes patterns in market data and predicts pivot points across multiple timeframes.
|
||||||
|
4. **RL Model**: Learns optimal trading strategies based on market data and CNN predictions.
|
||||||
|
5. **Orchestrator**: Makes final trading decisions based on inputs from both CNN and RL models.
|
||||||
|
6. **Trading Executor**: Executes trading actions through brokerage APIs.
|
||||||
|
7. **Risk Manager**: Implements risk management features like stop-loss and position sizing.
|
||||||
|
8. **Dashboard**: Provides a user interface for monitoring and controlling the system.
|
||||||
|
|
||||||
|
## Components and Interfaces
|
||||||
|
|
||||||
|
### 1. Data Provider
|
||||||
|
|
||||||
|
The Data Provider is the foundation of the system, responsible for collecting, processing, and distributing market data to all other components.
|
||||||
|
|
||||||
|
#### Key Classes and Interfaces
|
||||||
|
|
||||||
|
- **DataProvider**: Central class that manages data collection, processing, and distribution.
|
||||||
|
- **MarketTick**: Data structure for standardized market tick data.
|
||||||
|
- **DataSubscriber**: Interface for components that subscribe to market data.
|
||||||
|
- **PivotBounds**: Data structure for pivot-based normalization bounds.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
The DataProvider class will:
|
||||||
|
- Collect data from multiple sources (Binance, MEXC)
|
||||||
|
- Support multiple timeframes (1s, 1m, 1h, 1d)
|
||||||
|
- Support multiple symbols (ETH, BTC)
|
||||||
|
- Calculate technical indicators
|
||||||
|
- Identify pivot points
|
||||||
|
- Normalize data
|
||||||
|
- Distribute data to subscribers
|
||||||
|
- Calculate any other algoritmic manipulations/calculations on the data
|
||||||
|
- Cache up to 3x the model inputs (300 ticks OHLCV, etc) data so we can do a proper backtesting in up to 2x time in the future
|
||||||
|
|
||||||
|
Based on the existing implementation in `core/data_provider.py`, we'll enhance it to:
|
||||||
|
- Improve pivot point calculation using reccursive Williams Market Structure
|
||||||
|
- Optimize data caching for better performance
|
||||||
|
- Enhance real-time data streaming
|
||||||
|
- Implement better error handling and fallback mechanisms
|
||||||
|
|
||||||
|
### BASE FOR ALL MODELS ###
|
||||||
|
- ***INPUTS***: COB+OHCLV data frame as described:
|
||||||
|
- OHCLV: 300 frames of (1s, 1m, 1h, 1d) ETH + 300s of 1s BTC
|
||||||
|
- COB: for each 1s OHCLV we have +- 20 buckets of COB ammounts in USD
|
||||||
|
- 1,5,15 and 60s MA of the COB imbalance counting +- 5 COB buckets
|
||||||
|
- ***OUTPUTS***:
|
||||||
|
- suggested trade action (BUY/SELL/HOLD). Paired with confidence
|
||||||
|
- immediate price movement drection vector (-1: vertical down, 1: vertical up, 0: horizontal) - linear; with it's own confidence
|
||||||
|
|
||||||
|
# Standardized input for all models:
|
||||||
|
{
|
||||||
|
'primary_symbol': 'ETH/USDT',
|
||||||
|
'reference_symbol': 'BTC/USDT',
|
||||||
|
'eth_data': {'ETH_1s': df, 'ETH_1m': df, 'ETH_1h': df, 'ETH_1d': df},
|
||||||
|
'btc_data': {'BTC_1s': df},
|
||||||
|
'current_prices': {'ETH': price, 'BTC': price},
|
||||||
|
'data_completeness': {...}
|
||||||
|
}
|
||||||
|
|
||||||
|
### 2. CNN Model
|
||||||
|
|
||||||
|
The CNN Model is responsible for analyzing patterns in market data and predicting pivot points across multiple timeframes.
|
||||||
|
|
||||||
|
#### Key Classes and Interfaces
|
||||||
|
|
||||||
|
- **CNNModel**: Main class for the CNN model.
|
||||||
|
- **PivotPointPredictor**: Interface for predicting pivot points.
|
||||||
|
- **CNNTrainer**: Class for training the CNN model.
|
||||||
|
- ***INPUTS***: COB+OHCLV+Old Pivots (5 levels of pivots)
|
||||||
|
- ***OUTPUTS***: next pivot point for each level as price-time vector. (can be plotted as trend line) + suggested trade action (BUY/SELL)
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
The CNN Model will:
|
||||||
|
- Accept multi-timeframe and multi-symbol data as input
|
||||||
|
- Output predicted pivot points for each timeframe (1s, 1m, 1h, 1d)
|
||||||
|
- Provide confidence scores for each prediction
|
||||||
|
- Make hidden layer states available for the RL model
|
||||||
|
|
||||||
|
Architecture:
|
||||||
|
- Input layer: Multi-channel input for different timeframes and symbols
|
||||||
|
- Convolutional layers: Extract patterns from time series data
|
||||||
|
- LSTM/GRU layers: Capture temporal dependencies
|
||||||
|
- Attention mechanism: Focus on relevant parts of the input
|
||||||
|
- Output layer: Predict pivot points and confidence scores
|
||||||
|
|
||||||
|
Training:
|
||||||
|
- Use programmatically calculated pivot points as ground truth
|
||||||
|
- Train on historical data
|
||||||
|
- Update model when new pivot points are detected
|
||||||
|
- Use backpropagation to optimize weights
|
||||||
|
|
||||||
|
### 3. RL Model
|
||||||
|
|
||||||
|
The RL Model is responsible for learning optimal trading strategies based on market data and CNN predictions.
|
||||||
|
|
||||||
|
#### Key Classes and Interfaces
|
||||||
|
|
||||||
|
- **RLModel**: Main class for the RL model.
|
||||||
|
- **TradingActionGenerator**: Interface for generating trading actions.
|
||||||
|
- **RLTrainer**: Class for training the RL model.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
The RL Model will:
|
||||||
|
- Accept market data, CNN model predictions (output), and CNN hidden layer states as input
|
||||||
|
- Output trading action recommendations (buy/sell)
|
||||||
|
- Provide confidence scores for each action
|
||||||
|
- Learn from past experiences to adapt to the current market environment
|
||||||
|
|
||||||
|
Architecture:
|
||||||
|
- State representation: Market data, CNN model predictions (output), CNN hidden layer states
|
||||||
|
- Action space: Buy, Sell
|
||||||
|
- Reward function: PnL, risk-adjusted returns
|
||||||
|
- Policy network: Deep neural network
|
||||||
|
- Value network: Estimate expected returns
|
||||||
|
|
||||||
|
Training:
|
||||||
|
- Use reinforcement learning algorithms (DQN, PPO, A3C)
|
||||||
|
- Train on historical data
|
||||||
|
- Update model based on trading outcomes
|
||||||
|
- Use experience replay to improve sample efficiency
|
||||||
|
|
||||||
|
### 4. Orchestrator
|
||||||
|
|
||||||
|
The Orchestrator serves as the central coordination hub of the multi-modal trading system, responsible for data subscription management, model inference coordination, output storage, training pipeline orchestration, and inference-training feedback loop management.
|
||||||
|
|
||||||
|
#### Key Classes and Interfaces
|
||||||
|
|
||||||
|
- **Orchestrator**: Main class for the orchestrator.
|
||||||
|
- **DataSubscriptionManager**: Manages subscriptions to multiple data streams with different refresh rates.
|
||||||
|
- **ModelInferenceCoordinator**: Coordinates inference across all models.
|
||||||
|
- **ModelOutputStore**: Stores and manages model outputs for cross-model feeding.
|
||||||
|
- **TrainingPipelineManager**: Manages training pipelines for all models.
|
||||||
|
- **DecisionMaker**: Interface for making trading decisions.
|
||||||
|
- **MoEGateway**: Mixture of Experts gateway for model integration.
|
||||||
|
|
||||||
|
#### Core Responsibilities
|
||||||
|
|
||||||
|
##### 1. Data Subscription and Management
|
||||||
|
|
||||||
|
The Orchestrator subscribes to the Data Provider and manages multiple data streams with varying refresh rates:
|
||||||
|
|
||||||
|
- **10Hz COB (Cumulative Order Book) Data**: High-frequency order book updates for real-time market depth analysis
|
||||||
|
- **OHLCV Data**: Traditional candlestick data at multiple timeframes (1s, 1m, 1h, 1d)
|
||||||
|
- **Market Tick Data**: Individual trade executions and price movements
|
||||||
|
- **Technical Indicators**: Calculated indicators that update at different frequencies
|
||||||
|
- **Pivot Points**: Market structure analysis data
|
||||||
|
|
||||||
|
**Data Stream Management**:
|
||||||
|
- Maintains separate buffers for each data type with appropriate retention policies
|
||||||
|
- Ensures thread-safe access to data streams from multiple models
|
||||||
|
- Implements intelligent caching to serve "last updated" data efficiently
|
||||||
|
- Maintains full base dataframe that stays current for any model requesting data
|
||||||
|
- Handles data synchronization across different refresh rates
|
||||||
|
|
||||||
|
**Enhanced 1s Timeseries Data Combination**:
|
||||||
|
- Combines OHLCV data with COB (Cumulative Order Book) data for 1s timeframes
|
||||||
|
- Implements price bucket aggregation: ±20 buckets around current price
|
||||||
|
- ETH: $1 bucket size (e.g., $3000-$3040 range = 40 buckets) when current price is 3020
|
||||||
|
- BTC: $10 bucket size (e.g., $50000-$50400 range = 40 buckets) when price is 50200
|
||||||
|
- Creates unified base data input that includes:
|
||||||
|
- Traditional OHLCV metrics (Open, High, Low, Close, Volume)
|
||||||
|
- Order book depth and liquidity at each price level
|
||||||
|
- Bid/ask imbalances for the +-5 buckets with Moving Averages for 5,15, and 60s
|
||||||
|
- Volume-weighted average prices within buckets
|
||||||
|
- Order flow dynamics and market microstructure data
|
||||||
|
|
||||||
|
##### 2. Model Inference Coordination
|
||||||
|
|
||||||
|
The Orchestrator coordinates inference across all models in the system:
|
||||||
|
|
||||||
|
**Inference Pipeline**:
|
||||||
|
- Triggers model inference when relevant data updates occur
|
||||||
|
- Manages inference scheduling based on data availability and model requirements
|
||||||
|
- Coordinates parallel inference execution for independent models
|
||||||
|
- Handles model dependencies (e.g., RL model waiting for CNN hidden states)
|
||||||
|
|
||||||
|
**Model Input Management**:
|
||||||
|
- Assembles appropriate input data for each model based on their requirements
|
||||||
|
- Ensures models receive the most current data available at inference time
|
||||||
|
- Manages feature engineering and data preprocessing for each model
|
||||||
|
- Handles different input formats and requirements across models
|
||||||
|
|
||||||
|
##### 3. Model Output Storage and Cross-Feeding
|
||||||
|
|
||||||
|
The Orchestrator maintains a centralized store for all model outputs and manages cross-model data feeding:
|
||||||
|
|
||||||
|
**Output Storage**:
|
||||||
|
- Stores CNN predictions, confidence scores, and hidden layer states
|
||||||
|
- Stores RL action recommendations and value estimates
|
||||||
|
- Stores outputs from all models in extensible format supporting future models (LSTM, Transformer, etc.)
|
||||||
|
- Maintains historical output sequences for temporal analysis
|
||||||
|
- Implements efficient retrieval mechanisms for real-time access
|
||||||
|
- Uses standardized ModelOutput format for easy extension and cross-model compatibility
|
||||||
|
|
||||||
|
**Cross-Model Feeding**:
|
||||||
|
- Feeds CNN hidden layer states into RL model inputs
|
||||||
|
- Provides CNN predictions as context for RL decision-making
|
||||||
|
- Includes "last predictions" from each available model as part of base data input
|
||||||
|
- Stores model outputs that become inputs for subsequent inference cycles
|
||||||
|
- Manages circular dependencies and feedback loops between models
|
||||||
|
- Supports dynamic model addition without requiring system architecture changes
|
||||||
|
|
||||||
|
##### 4. Training Pipeline Management
|
||||||
|
|
||||||
|
The Orchestrator coordinates training for all models by managing the prediction-result feedback loop:
|
||||||
|
|
||||||
|
**Training Coordination**:
|
||||||
|
- Calls each model's training pipeline when new inference results are available
|
||||||
|
- Provides previous predictions alongside new results for supervised learning
|
||||||
|
- Manages training data collection and labeling
|
||||||
|
- Coordinates online learning updates based on real-time performance
|
||||||
|
|
||||||
|
**Training Data Management**:
|
||||||
|
- Maintains training datasets with prediction-result pairs
|
||||||
|
- Implements data quality checks and filtering
|
||||||
|
- Manages training data retention and archival policies
|
||||||
|
- Provides training data statistics and monitoring
|
||||||
|
|
||||||
|
**Performance Tracking**:
|
||||||
|
- Tracks prediction accuracy for each model over time
|
||||||
|
- Monitors model performance degradation and triggers retraining
|
||||||
|
- Maintains performance metrics for model comparison and selection
|
||||||
|
|
||||||
|
**Training progress and checkpoints persistance**
|
||||||
|
- it uses the checkpoint manager to store check points of each model over time as training progresses and we have improvements
|
||||||
|
- checkpoint manager has capability to ensure only top 5 to 10 best checkpoints are stored for each model deleting the least performant ones. it stores metadata along the CPs to decide the performance
|
||||||
|
- we automatically load the best CP at startup if we have stored ones
|
||||||
|
|
||||||
|
##### 5. Inference Data Validation and Storage
|
||||||
|
|
||||||
|
The Orchestrator implements comprehensive inference data validation and persistent storage:
|
||||||
|
|
||||||
|
**Input Data Validation**:
|
||||||
|
- Validates complete OHLCV dataframes for all required timeframes before inference
|
||||||
|
- Checks input data dimensions against model requirements
|
||||||
|
- Logs missing components and prevents prediction on incomplete data
|
||||||
|
- Raises validation errors with specific details about expected vs actual dimensions
|
||||||
|
|
||||||
|
**Inference History Storage**:
|
||||||
|
- Stores complete input data packages with each prediction in persistent storage
|
||||||
|
- Includes timestamp, symbol, input features, prediction outputs, confidence scores, and model internal states
|
||||||
|
- Maintains compressed storage to minimize footprint while preserving accessibility
|
||||||
|
- Implements efficient query mechanisms by symbol, timeframe, and date range
|
||||||
|
|
||||||
|
**Storage Management**:
|
||||||
|
- Applies configurable retention policies to manage storage limits
|
||||||
|
- Archives or removes oldest entries when limits are reached
|
||||||
|
- Prioritizes keeping most recent and valuable training examples during storage pressure
|
||||||
|
- Provides data completeness metrics and validation results in logs
|
||||||
|
|
||||||
|
##### 6. Inference-Training Feedback Loop
|
||||||
|
|
||||||
|
The Orchestrator manages the continuous learning cycle through inference-training feedback:
|
||||||
|
|
||||||
|
**Prediction Outcome Evaluation**:
|
||||||
|
- Evaluates prediction accuracy against actual price movements after sufficient time has passed
|
||||||
|
- Creates training examples using stored inference data paired with actual market outcomes
|
||||||
|
- Feeds prediction-result pairs back to respective models for learning
|
||||||
|
|
||||||
|
**Adaptive Learning Signals**:
|
||||||
|
- Provides positive reinforcement signals for accurate predictions
|
||||||
|
- Delivers corrective training signals for inaccurate predictions to help models learn from mistakes
|
||||||
|
- Retrieves last inference data for each model to compare predictions against actual outcomes
|
||||||
|
|
||||||
|
**Continuous Improvement Tracking**:
|
||||||
|
- Tracks and reports accuracy improvements or degradations over time
|
||||||
|
- Monitors model learning progress through the feedback loop
|
||||||
|
- Alerts administrators when data flow issues are detected with specific error details and remediation suggestions
|
||||||
|
|
||||||
|
##### 5. Decision Making and Trading Actions
|
||||||
|
|
||||||
|
Beyond coordination, the Orchestrator makes final trading decisions:
|
||||||
|
|
||||||
|
**Decision Integration**:
|
||||||
|
- Combines outputs from CNN and RL models using Mixture of Experts approach
|
||||||
|
- Applies confidence-based filtering to avoid uncertain trades
|
||||||
|
- Implements configurable thresholds for buy/sell decisions
|
||||||
|
- Considers market conditions and risk parameters
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
**Architecture**:
|
||||||
|
```python
|
||||||
|
class Orchestrator:
|
||||||
|
def __init__(self):
|
||||||
|
self.data_subscription_manager = DataSubscriptionManager()
|
||||||
|
self.model_inference_coordinator = ModelInferenceCoordinator()
|
||||||
|
self.model_output_store = ModelOutputStore()
|
||||||
|
self.training_pipeline_manager = TrainingPipelineManager()
|
||||||
|
self.decision_maker = DecisionMaker()
|
||||||
|
self.moe_gateway = MoEGateway()
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
# Subscribe to data streams
|
||||||
|
await self.data_subscription_manager.subscribe_to_data_provider()
|
||||||
|
|
||||||
|
# Start inference coordination loop
|
||||||
|
await self.model_inference_coordinator.start()
|
||||||
|
|
||||||
|
# Start training pipeline management
|
||||||
|
await self.training_pipeline_manager.start()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Data Flow Management**:
|
||||||
|
- Implements event-driven architecture for data updates
|
||||||
|
- Uses async/await patterns for non-blocking operations
|
||||||
|
- Maintains data freshness timestamps for each stream
|
||||||
|
- Implements backpressure handling for high-frequency data
|
||||||
|
|
||||||
|
**Model Coordination**:
|
||||||
|
- Manages model lifecycle (loading, inference, training, updating)
|
||||||
|
- Implements model versioning and rollback capabilities
|
||||||
|
- Handles model failures and fallback mechanisms
|
||||||
|
- Provides model performance monitoring and alerting
|
||||||
|
|
||||||
|
**Training Integration**:
|
||||||
|
- Implements incremental learning strategies
|
||||||
|
- Manages training batch composition and scheduling
|
||||||
|
- Provides training progress monitoring and control
|
||||||
|
- Handles training failures and recovery
|
||||||
|
|
||||||
|
### 5. Trading Executor
|
||||||
|
|
||||||
|
The Trading Executor is responsible for executing trading actions through brokerage APIs.
|
||||||
|
|
||||||
|
#### Key Classes and Interfaces
|
||||||
|
|
||||||
|
- **TradingExecutor**: Main class for the trading executor.
|
||||||
|
- **BrokerageAPI**: Interface for interacting with brokerages.
|
||||||
|
- **OrderManager**: Class for managing orders.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
The Trading Executor will:
|
||||||
|
- Accept trading actions from the orchestrator
|
||||||
|
- Execute orders through brokerage APIs
|
||||||
|
- Manage order lifecycle
|
||||||
|
- Handle errors and retries
|
||||||
|
- Provide feedback on order execution
|
||||||
|
|
||||||
|
Supported brokerages:
|
||||||
|
- MEXC
|
||||||
|
- Binance
|
||||||
|
- Bybit (future extension)
|
||||||
|
|
||||||
|
Order types:
|
||||||
|
- Market orders
|
||||||
|
- Limit orders
|
||||||
|
- Stop-loss orders
|
||||||
|
|
||||||
|
### 6. Risk Manager
|
||||||
|
|
||||||
|
The Risk Manager is responsible for implementing risk management features like stop-loss and position sizing.
|
||||||
|
|
||||||
|
#### Key Classes and Interfaces
|
||||||
|
|
||||||
|
- **RiskManager**: Main class for the risk manager.
|
||||||
|
- **StopLossManager**: Class for managing stop-loss orders.
|
||||||
|
- **PositionSizer**: Class for determining position sizes.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
The Risk Manager will:
|
||||||
|
- Implement configurable stop-loss functionality
|
||||||
|
- Implement configurable position sizing based on risk parameters
|
||||||
|
- Implement configurable maximum drawdown limits
|
||||||
|
- Provide real-time risk metrics
|
||||||
|
- Provide alerts for high-risk situations
|
||||||
|
|
||||||
|
Risk parameters:
|
||||||
|
- Maximum position size
|
||||||
|
- Maximum drawdown
|
||||||
|
- Risk per trade
|
||||||
|
- Maximum leverage
|
||||||
|
|
||||||
|
### 7. Dashboard
|
||||||
|
|
||||||
|
The Dashboard provides a user interface for monitoring and controlling the system.
|
||||||
|
|
||||||
|
#### Key Classes and Interfaces
|
||||||
|
|
||||||
|
- **Dashboard**: Main class for the dashboard.
|
||||||
|
- **ChartManager**: Class for managing charts.
|
||||||
|
- **ControlPanel**: Class for managing controls.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
The Dashboard will:
|
||||||
|
- Display real-time market data for all symbols and timeframes
|
||||||
|
- Display OHLCV charts for all timeframes
|
||||||
|
- Display CNN pivot point predictions and confidence levels
|
||||||
|
- Display RL and orchestrator trading actions and confidence levels
|
||||||
|
- Display system status and model performance metrics
|
||||||
|
- Provide start/stop toggles for all system processes
|
||||||
|
- Provide sliders to adjust buy/sell thresholds for the orchestrator
|
||||||
|
|
||||||
|
Implementation:
|
||||||
|
- Web-based dashboard using Flask/Dash
|
||||||
|
- Real-time updates using WebSockets
|
||||||
|
- Interactive charts using Plotly
|
||||||
|
- Server-side processing for all models
|
||||||
|
|
||||||
|
## Data Models
|
||||||
|
|
||||||
|
### Market Data
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class MarketTick:
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
price: float
|
||||||
|
volume: float
|
||||||
|
quantity: float
|
||||||
|
side: str # 'buy' or 'sell'
|
||||||
|
trade_id: str
|
||||||
|
is_buyer_maker: bool
|
||||||
|
raw_data: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
```
|
||||||
|
|
||||||
|
### OHLCV Data
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class OHLCVBar:
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
open: float
|
||||||
|
high: float
|
||||||
|
low: float
|
||||||
|
close: float
|
||||||
|
volume: float
|
||||||
|
timeframe: str
|
||||||
|
indicators: Dict[str, float] = field(default_factory=dict)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pivot Points
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class PivotPoint:
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
price: float
|
||||||
|
type: str # 'high' or 'low'
|
||||||
|
level: int # Pivot level (1, 2, 3, etc.)
|
||||||
|
confidence: float = 1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
### Trading Actions
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class TradingAction:
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
action: str # 'buy' or 'sell'
|
||||||
|
confidence: float
|
||||||
|
source: str # 'rl', 'cnn', 'orchestrator'
|
||||||
|
price: Optional[float] = None
|
||||||
|
quantity: Optional[float] = None
|
||||||
|
reason: Optional[str] = None
|
||||||
|
```
|
||||||
|
|
||||||
|
### Model Predictions
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class ModelOutput:
|
||||||
|
"""Extensible model output format supporting all model types"""
|
||||||
|
model_type: str # 'cnn', 'rl', 'lstm', 'transformer', 'orchestrator'
|
||||||
|
model_name: str # Specific model identifier
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
confidence: float
|
||||||
|
predictions: Dict[str, Any] # Model-specific predictions
|
||||||
|
hidden_states: Optional[Dict[str, Any]] = None # For cross-model feeding
|
||||||
|
metadata: Dict[str, Any] = field(default_factory=dict) # Additional info
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class CNNPrediction:
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
pivot_points: List[PivotPoint]
|
||||||
|
hidden_states: Dict[str, Any]
|
||||||
|
confidence: float
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class RLPrediction:
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
action: str # 'buy' or 'sell'
|
||||||
|
confidence: float
|
||||||
|
expected_reward: float
|
||||||
|
```
|
||||||
|
|
||||||
|
### Enhanced Base Data Input
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class BaseDataInput:
|
||||||
|
"""Unified base data input for all models"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
ohlcv_data: Dict[str, OHLCVBar] # Multi-timeframe OHLCV
|
||||||
|
cob_data: Optional[Dict[str, float]] = None # COB buckets for 1s timeframe
|
||||||
|
technical_indicators: Dict[str, float] = field(default_factory=dict)
|
||||||
|
pivot_points: List[PivotPoint] = field(default_factory=list)
|
||||||
|
last_predictions: Dict[str, ModelOutput] = field(default_factory=dict) # From all models
|
||||||
|
market_microstructure: Dict[str, Any] = field(default_factory=dict) # Order flow, etc.
|
||||||
|
```
|
||||||
|
|
||||||
|
### COB Data Structure
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class COBData:
|
||||||
|
"""Cumulative Order Book data for price buckets"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
current_price: float
|
||||||
|
bucket_size: float # $1 for ETH, $10 for BTC
|
||||||
|
price_buckets: Dict[float, Dict[str, float]] # price -> {bid_volume, ask_volume, etc.}
|
||||||
|
bid_ask_imbalance: Dict[float, float] # price -> imbalance ratio
|
||||||
|
volume_weighted_prices: Dict[float, float] # price -> VWAP within bucket
|
||||||
|
order_flow_metrics: Dict[str, float] # Various order flow indicators
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data Collection Errors
|
||||||
|
|
||||||
|
- Implement retry mechanisms for API failures
|
||||||
|
- Use fallback data sources when primary sources are unavailable
|
||||||
|
- Log all errors with detailed information
|
||||||
|
- Notify users through the dashboard
|
||||||
|
|
||||||
|
### Model Errors
|
||||||
|
|
||||||
|
- Implement model validation before deployment
|
||||||
|
- Use fallback models when primary models fail
|
||||||
|
- Log all errors with detailed information
|
||||||
|
- Notify users through the dashboard
|
||||||
|
|
||||||
|
### Trading Errors
|
||||||
|
|
||||||
|
- Implement order validation before submission
|
||||||
|
- Use retry mechanisms for order failures
|
||||||
|
- Implement circuit breakers for extreme market conditions
|
||||||
|
- Log all errors with detailed information
|
||||||
|
- Notify users through the dashboard
|
||||||
|
|
||||||
|
## Testing Strategy
|
||||||
|
|
||||||
|
### Unit Testing
|
||||||
|
|
||||||
|
- Test individual components in isolation
|
||||||
|
- Use mock objects for dependencies
|
||||||
|
- Focus on edge cases and error handling
|
||||||
|
|
||||||
|
### Integration Testing
|
||||||
|
|
||||||
|
- Test interactions between components
|
||||||
|
- Use real data for testing
|
||||||
|
- Focus on data flow and error propagation
|
||||||
|
|
||||||
|
### System Testing
|
||||||
|
|
||||||
|
- Test the entire system end-to-end
|
||||||
|
- Use real data for testing
|
||||||
|
- Focus on performance and reliability
|
||||||
|
|
||||||
|
### Backtesting
|
||||||
|
|
||||||
|
- Test trading strategies on historical data
|
||||||
|
- Measure performance metrics (PnL, Sharpe ratio, etc.)
|
||||||
|
- Compare against benchmarks
|
||||||
|
|
||||||
|
### Live Testing
|
||||||
|
|
||||||
|
- Test the system in a live environment with small position sizes
|
||||||
|
- Monitor performance and stability
|
||||||
|
- Gradually increase position sizes as confidence grows
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
The implementation will follow a phased approach:
|
||||||
|
|
||||||
|
1. **Phase 1: Data Provider**
|
||||||
|
- Implement the enhanced data provider
|
||||||
|
- Implement pivot point calculation
|
||||||
|
- Implement technical indicator calculation
|
||||||
|
- Implement data normalization
|
||||||
|
|
||||||
|
2. **Phase 2: CNN Model**
|
||||||
|
- Implement the CNN model architecture
|
||||||
|
- Implement the training pipeline
|
||||||
|
- Implement the inference pipeline
|
||||||
|
- Implement the pivot point prediction
|
||||||
|
|
||||||
|
3. **Phase 3: RL Model**
|
||||||
|
- Implement the RL model architecture
|
||||||
|
- Implement the training pipeline
|
||||||
|
- Implement the inference pipeline
|
||||||
|
- Implement the trading action generation
|
||||||
|
|
||||||
|
4. **Phase 4: Orchestrator**
|
||||||
|
- Implement the orchestrator architecture
|
||||||
|
- Implement the decision-making logic
|
||||||
|
- Implement the MoE gateway
|
||||||
|
- Implement the confidence-based filtering
|
||||||
|
|
||||||
|
5. **Phase 5: Trading Executor**
|
||||||
|
- Implement the trading executor
|
||||||
|
- Implement the brokerage API integrations
|
||||||
|
- Implement the order management
|
||||||
|
- Implement the error handling
|
||||||
|
|
||||||
|
6. **Phase 6: Risk Manager**
|
||||||
|
- Implement the risk manager
|
||||||
|
- Implement the stop-loss functionality
|
||||||
|
- Implement the position sizing
|
||||||
|
- Implement the risk metrics
|
||||||
|
|
||||||
|
7. **Phase 7: Dashboard**
|
||||||
|
- Implement the dashboard UI
|
||||||
|
- Implement the chart management
|
||||||
|
- Implement the control panel
|
||||||
|
- Implement the real-time updates
|
||||||
|
|
||||||
|
8. **Phase 8: Integration and Testing**
|
||||||
|
- Integrate all components
|
||||||
|
- Implement comprehensive testing
|
||||||
|
- Fix bugs and optimize performance
|
||||||
|
- Deploy to production
|
||||||
|
|
||||||
|
## Monitoring and Visualization
|
||||||
|
|
||||||
|
### TensorBoard Integration (Future Enhancement)
|
||||||
|
|
||||||
|
A comprehensive TensorBoard integration has been designed to provide detailed training visualization and monitoring capabilities:
|
||||||
|
|
||||||
|
#### Features
|
||||||
|
- **Training Metrics Visualization**: Real-time tracking of model losses, rewards, and performance metrics
|
||||||
|
- **Feature Distribution Analysis**: Histograms and statistics of input features to validate data quality
|
||||||
|
- **State Quality Monitoring**: Tracking of comprehensive state building (13,400 features) success rates
|
||||||
|
- **Reward Component Analysis**: Detailed breakdown of reward calculations including PnL, confidence, volatility, and order flow
|
||||||
|
- **Model Performance Comparison**: Side-by-side comparison of CNN, RL, and orchestrator performance
|
||||||
|
|
||||||
|
#### Implementation Status
|
||||||
|
- **Completed**: TensorBoardLogger utility class with comprehensive logging methods
|
||||||
|
- **Completed**: Integration points in enhanced_rl_training_integration.py
|
||||||
|
- **Completed**: Enhanced run_tensorboard.py with improved visualization options
|
||||||
|
- **Status**: Ready for deployment when system stability is achieved
|
||||||
|
|
||||||
|
#### Usage
|
||||||
|
```bash
|
||||||
|
# Start TensorBoard dashboard
|
||||||
|
python run_tensorboard.py
|
||||||
|
|
||||||
|
# Access at http://localhost:6006
|
||||||
|
# View training metrics, feature distributions, and model performance
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Benefits
|
||||||
|
- Real-time validation of training process
|
||||||
|
- Early detection of training issues
|
||||||
|
- Feature importance analysis
|
||||||
|
- Model performance comparison
|
||||||
|
- Historical training progress tracking
|
||||||
|
|
||||||
|
**Note**: TensorBoard integration is currently deprioritized in favor of system stability and core model improvements. It will be activated once the core training system is stable and performing optimally.
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
This design document outlines the architecture, components, data flow, and implementation details for the Multi-Modal Trading System. The system is designed to be modular, extensible, and robust, with a focus on performance, reliability, and user experience.
|
||||||
|
|
||||||
|
The implementation will follow a phased approach, with each phase building on the previous one. The system will be thoroughly tested at each phase to ensure that it meets the requirements and performs as expected.
|
||||||
|
|
||||||
|
The final system will provide traders with a powerful tool for analyzing market data, identifying trading opportunities, and executing trades with confidence.
|
||||||
175
.kiro/specs/multi-modal-trading-system/requirements.md
Normal file
175
.kiro/specs/multi-modal-trading-system/requirements.md
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
# Requirements Document
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The Multi-Modal Trading System is an advanced algorithmic trading platform that combines Convolutional Neural Networks (CNN) and Reinforcement Learning (RL) models orchestrated by a decision-making module. The system processes multi-timeframe and multi-symbol market data (primarily ETH and BTC) to generate trading actions. The system is designed to adapt to current market conditions through continuous learning from past experiences, with the CNN module trained on historical data to predict pivot points and the RL module optimizing trading decisions based on these predictions and market data.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
### Requirement 1: Data Collection and Processing
|
||||||
|
|
||||||
|
**User Story:** As a trader, I want the system to collect and process multi-timeframe and multi-symbol market data, so that the models have comprehensive market information for making accurate trading decisions.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
0. NEVER USE GENERATED/SYNTHETIC DATA or mock implementations and UI. If somethings is not implemented yet, it should be obvious.
|
||||||
|
1. WHEN the system starts THEN it SHALL collect and process data for both ETH and BTC symbols.
|
||||||
|
2. WHEN collecting data THEN the system SHALL store the following for the primary symbol (ETH):
|
||||||
|
- 300 seconds of raw tick data - price and COB snapshot for all prices +- 1% on fine reslolution buckets (1$ for ETH, 10$ for BTC)
|
||||||
|
- 300 seconds of 1-second OHLCV data + 1s aggregated COB data
|
||||||
|
- 300 bars of OHLCV + indicators for each timeframe (1s, 1m, 1h, 1d)
|
||||||
|
3. WHEN collecting data THEN the system SHALL store similar data for the reference symbol (BTC).
|
||||||
|
4. WHEN processing data THEN the system SHALL calculate standard technical indicators for all timeframes.
|
||||||
|
5. WHEN processing data THEN the system SHALL calculate pivot points for all timeframes according to the specified methodology.
|
||||||
|
6. WHEN new data arrives THEN the system SHALL update its data cache in real-time.
|
||||||
|
7. IF tick data is not available THEN the system SHALL substitute with the lowest available timeframe data.
|
||||||
|
8. WHEN normalizing data THEN the system SHALL normalize to the max and min of the highest timeframe to maintain relationships between different timeframes.
|
||||||
|
9. data is cached for longer (let's start with double the model inputs so 600 bars) to support performing backtesting when we know the current predictions outcomes so we can generate test cases.
|
||||||
|
10. In general all models have access to the whole data we collect in a central data provider implementation. only some are specialized. All models should also take as input the last output of evey other model (also cached in the data provider). there should be a room for adding more models in the other models data input so we can extend the system without having to loose existing models and trained W&B
|
||||||
|
|
||||||
|
### Requirement 2: CNN Model Implementation
|
||||||
|
|
||||||
|
**User Story:** As a trader, I want the system to implement a CNN model that can identify patterns and predict pivot points across multiple timeframes, so that I can anticipate market direction changes.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the CNN model is initialized THEN it SHALL accept multi-timeframe and multi-symbol data as input.
|
||||||
|
2. WHEN processing input data THEN the CNN model SHALL output predicted pivot points for each timeframe (1s, 1m, 1h, 1d).
|
||||||
|
3. WHEN predicting pivot points THEN the CNN model SHALL provide both the predicted pivot point value and the timestamp when it is expected to occur.
|
||||||
|
4. WHEN a pivot point is detected THEN the system SHALL trigger a training round for the CNN model using historical data.
|
||||||
|
5. WHEN training the CNN model THEN the system SHALL use programmatically calculated pivot points from historical data as ground truth.
|
||||||
|
6. WHEN outputting predictions THEN the CNN model SHALL include a confidence score for each prediction.
|
||||||
|
7. WHEN calculating pivot points THEN the system SHALL implement both standard pivot points and the recursive Williams market structure pivot points as described.
|
||||||
|
8. WHEN processing data THEN the CNN model SHALL make available its hidden layer states for use by the RL model.
|
||||||
|
|
||||||
|
### Requirement 3: RL Model Implementation
|
||||||
|
|
||||||
|
**User Story:** As a trader, I want the system to implement an RL model that can learn optimal trading strategies based on market data and CNN predictions, so that the system can adapt to changing market conditions.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the RL model is initialized THEN it SHALL accept market data, CNN predictions, and CNN hidden layer states as input.
|
||||||
|
2. WHEN processing input data THEN the RL model SHALL output trading action recommendations (buy/sell).
|
||||||
|
3. WHEN evaluating trading actions THEN the RL model SHALL learn from past experiences to adapt to the current market environment.
|
||||||
|
4. WHEN making decisions THEN the RL model SHALL consider the confidence levels of CNN predictions.
|
||||||
|
5. WHEN uncertain about market direction THEN the RL model SHALL learn to avoid entering positions.
|
||||||
|
6. WHEN training the RL model THEN the system SHALL use a reward function that incentivizes high risk/reward setups.
|
||||||
|
7. WHEN outputting trading actions THEN the RL model SHALL provide a confidence score for each action.
|
||||||
|
8. WHEN a trading action is executed THEN the system SHALL store the input data for future training.
|
||||||
|
|
||||||
|
### Requirement 4: Orchestrator Implementation
|
||||||
|
|
||||||
|
**User Story:** As a trader, I want the system to implement an orchestrator that can make final trading decisions based on inputs from both CNN and RL models, so that the system can make more balanced and informed trading decisions.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the orchestrator is initialized THEN it SHALL accept inputs from both CNN and RL models.
|
||||||
|
2. WHEN processing model inputs THEN the orchestrator SHALL output final trading actions (buy/sell).
|
||||||
|
3. WHEN making decisions THEN the orchestrator SHALL consider the confidence levels of both CNN and RL models.
|
||||||
|
4. WHEN uncertain about market direction THEN the orchestrator SHALL learn to avoid entering positions.
|
||||||
|
5. WHEN implementing the orchestrator THEN the system SHALL use a Mixture of Experts (MoE) approach to allow for future model integration.
|
||||||
|
6. WHEN outputting trading actions THEN the orchestrator SHALL provide a confidence score for each action.
|
||||||
|
7. WHEN a trading action is executed THEN the system SHALL store the input data for future training.
|
||||||
|
8. WHEN implementing the orchestrator THEN the system SHALL allow for configurable thresholds for entering and exiting positions.
|
||||||
|
|
||||||
|
### Requirement 5: Training Pipeline
|
||||||
|
|
||||||
|
**User Story:** As a developer, I want the system to implement a unified training pipeline for both CNN and RL models, so that the models can be trained efficiently and consistently.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN training models THEN the system SHALL use a unified data provider to prepare data for all models.
|
||||||
|
2. WHEN a pivot point is detected THEN the system SHALL trigger a training round for the CNN model.
|
||||||
|
3. WHEN training the CNN model THEN the system SHALL use programmatically calculated pivot points from historical data as ground truth.
|
||||||
|
4. WHEN training the RL model THEN the system SHALL use a reward function that incentivizes high risk/reward setups.
|
||||||
|
5. WHEN training models THEN the system SHALL run the training process on the server without requiring the dashboard to be open.
|
||||||
|
6. WHEN training models THEN the system SHALL provide real-time feedback on training progress through the dashboard.
|
||||||
|
7. WHEN training models THEN the system SHALL store model checkpoints for future use.
|
||||||
|
8. WHEN training models THEN the system SHALL provide metrics on model performance.
|
||||||
|
|
||||||
|
### Requirement 6: Dashboard Implementation
|
||||||
|
|
||||||
|
**User Story:** As a trader, I want the system to implement a comprehensive dashboard that displays real-time data, model predictions, and trading actions, so that I can monitor the system's performance and make informed decisions.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the dashboard is initialized THEN it SHALL display real-time market data for all symbols and timeframes.
|
||||||
|
2. WHEN displaying market data THEN the dashboard SHALL show OHLCV charts for all timeframes.
|
||||||
|
3. WHEN displaying model predictions THEN the dashboard SHALL show CNN pivot point predictions and confidence levels.
|
||||||
|
4. WHEN displaying trading actions THEN the dashboard SHALL show RL and orchestrator trading actions and confidence levels.
|
||||||
|
5. WHEN displaying system status THEN the dashboard SHALL show training progress and model performance metrics.
|
||||||
|
6. WHEN implementing controls THEN the dashboard SHALL provide start/stop toggles for all system processes.
|
||||||
|
7. WHEN implementing controls THEN the dashboard SHALL provide sliders to adjust buy/sell thresholds for the orchestrator.
|
||||||
|
8. WHEN implementing the dashboard THEN the system SHALL ensure all processes run on the server without requiring the dashboard to be open.
|
||||||
|
|
||||||
|
### Requirement 7: Risk Management
|
||||||
|
|
||||||
|
**User Story:** As a trader, I want the system to implement risk management features, so that I can protect my capital from significant losses.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN implementing risk management THEN the system SHALL provide configurable stop-loss functionality.
|
||||||
|
2. WHEN a stop-loss is triggered THEN the system SHALL automatically close the position.
|
||||||
|
3. WHEN implementing risk management THEN the system SHALL provide configurable position sizing based on risk parameters.
|
||||||
|
4. WHEN implementing risk management THEN the system SHALL provide configurable maximum drawdown limits.
|
||||||
|
5. WHEN maximum drawdown limits are reached THEN the system SHALL automatically stop trading.
|
||||||
|
6. WHEN implementing risk management THEN the system SHALL provide real-time risk metrics through the dashboard.
|
||||||
|
7. WHEN implementing risk management THEN the system SHALL allow for different risk parameters for different market conditions.
|
||||||
|
8. WHEN implementing risk management THEN the system SHALL provide alerts for high-risk situations.
|
||||||
|
|
||||||
|
### Requirement 8: System Architecture and Integration
|
||||||
|
|
||||||
|
**User Story:** As a developer, I want the system to implement a clean and modular architecture, so that the system is easy to maintain and extend.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN implementing the system architecture THEN the system SHALL use a unified data provider to prepare data for all models.
|
||||||
|
2. WHEN implementing the system architecture THEN the system SHALL use a modular approach to allow for easy extension.
|
||||||
|
3. WHEN implementing the system architecture THEN the system SHALL use a clean separation of concerns between data collection, model training, and trading execution.
|
||||||
|
4. WHEN implementing the system architecture THEN the system SHALL use a unified interface for all models.
|
||||||
|
5. WHEN implementing the system architecture THEN the system SHALL use a unified interface for all data providers.
|
||||||
|
6. WHEN implementing the system architecture THEN the system SHALL use a unified interface for all trading executors.
|
||||||
|
7. WHEN implementing the system architecture THEN the system SHALL use a unified interface for all risk management components.
|
||||||
|
8. WHEN implementing the system architecture THEN the system SHALL use a unified interface for all dashboard components.
|
||||||
|
|
||||||
|
### Requirement 9: Model Inference Data Validation and Storage
|
||||||
|
|
||||||
|
**User Story:** As a trading system developer, I want to ensure that all model predictions include complete input data validation and persistent storage, so that I can verify models receive correct inputs and track their performance over time.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN a model makes a prediction THEN the system SHALL validate that the input data contains complete OHLCV dataframes for all required timeframes
|
||||||
|
2. WHEN input data is incomplete THEN the system SHALL log the missing components and SHALL NOT proceed with prediction
|
||||||
|
3. WHEN input validation passes THEN the system SHALL store the complete input data package with the prediction in persistent storage
|
||||||
|
4. IF input data dimensions are incorrect THEN the system SHALL raise a validation error with specific details about expected vs actual dimensions
|
||||||
|
5. WHEN a model completes inference THEN the system SHALL store the complete input data, model outputs, confidence scores, and metadata in a persistent inference history
|
||||||
|
6. WHEN storing inference data THEN the system SHALL include timestamp, symbol, input features, prediction outputs, and model internal states
|
||||||
|
7. IF inference history storage fails THEN the system SHALL log the error and continue operation without breaking the prediction flow
|
||||||
|
|
||||||
|
### Requirement 10: Inference-Training Feedback Loop
|
||||||
|
|
||||||
|
**User Story:** As a machine learning engineer, I want the system to automatically train models using their previous inference data compared to actual market outcomes, so that models continuously improve their accuracy through real-world feedback.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN sufficient time has passed after a prediction THEN the system SHALL evaluate the prediction accuracy against actual price movements
|
||||||
|
2. WHEN a prediction outcome is determined THEN the system SHALL create a training example using the stored inference data and actual outcome
|
||||||
|
3. WHEN training examples are created THEN the system SHALL feed them back to the respective models for learning
|
||||||
|
4. IF the prediction was accurate THEN the system SHALL reinforce the model's decision pathway through positive training signals
|
||||||
|
5. IF the prediction was inaccurate THEN the system SHALL provide corrective training signals to help the model learn from mistakes
|
||||||
|
6. WHEN the system needs training data THEN it SHALL retrieve the last inference data for each model to compare predictions against actual market outcomes
|
||||||
|
7. WHEN models are trained on inference feedback THEN the system SHALL track and report accuracy improvements or degradations over time
|
||||||
|
|
||||||
|
### Requirement 11: Inference History Management and Monitoring
|
||||||
|
|
||||||
|
**User Story:** As a system administrator, I want comprehensive logging and monitoring of the inference-training feedback loop with configurable retention policies, so that I can track model learning progress and manage storage efficiently.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN inference data is stored THEN the system SHALL log the storage operation with data completeness metrics and validation results
|
||||||
|
2. WHEN training occurs based on previous inference THEN the system SHALL log the training outcome and model performance changes
|
||||||
|
3. WHEN the system detects data flow issues THEN it SHALL alert administrators with specific error details and suggested remediation
|
||||||
|
4. WHEN inference history reaches configured limits THEN the system SHALL archive or remove oldest entries based on retention policy
|
||||||
|
5. WHEN storing inference data THEN the system SHALL compress data to minimize storage footprint while maintaining accessibility
|
||||||
|
6. WHEN retrieving historical inference data THEN the system SHALL provide efficient query mechanisms by symbol, timeframe, and date range
|
||||||
|
7. IF storage space is critically low THEN the system SHALL prioritize keeping the most recent and most valuable training examples
|
||||||
382
.kiro/specs/multi-modal-trading-system/tasks.md
Normal file
382
.kiro/specs/multi-modal-trading-system/tasks.md
Normal file
@@ -0,0 +1,382 @@
|
|||||||
|
# Implementation Plan
|
||||||
|
|
||||||
|
## Enhanced Data Provider and COB Integration
|
||||||
|
|
||||||
|
- [ ] 1. Enhance the existing DataProvider class with standardized model inputs
|
||||||
|
- Extend the current implementation in core/data_provider.py
|
||||||
|
- Implement standardized COB+OHLCV data frame for all models
|
||||||
|
- Create unified input format: 300 frames OHLCV (1s, 1m, 1h, 1d) ETH + 300s of 1s BTC
|
||||||
|
- Integrate with existing multi_exchange_cob_provider.py for COB data
|
||||||
|
- _Requirements: 1.1, 1.2, 1.3, 1.6_
|
||||||
|
|
||||||
|
- [ ] 1.1. Implement standardized COB+OHLCV data frame for all models
|
||||||
|
- Create BaseDataInput class with standardized format for all models
|
||||||
|
- Implement OHLCV: 300 frames of (1s, 1m, 1h, 1d) ETH + 300s of 1s BTC
|
||||||
|
- Add COB: ±20 buckets of COB amounts in USD for each 1s OHLCV
|
||||||
|
- Include 1s, 5s, 15s, and 60s MA of COB imbalance counting ±5 COB buckets
|
||||||
|
- Ensure all models receive identical input format for consistency
|
||||||
|
- _Requirements: 1.2, 1.3, 8.1_
|
||||||
|
|
||||||
|
- [ ] 1.2. Implement extensible model output storage
|
||||||
|
- Create standardized ModelOutput data structure
|
||||||
|
- Support CNN, RL, LSTM, Transformer, and future model types
|
||||||
|
- Include model-specific predictions and cross-model hidden states
|
||||||
|
- Add metadata support for extensible model information
|
||||||
|
- _Requirements: 1.10, 8.2_
|
||||||
|
|
||||||
|
- [ ] 1.3. Enhance Williams Market Structure pivot point calculation
|
||||||
|
- Extend existing williams_market_structure.py implementation
|
||||||
|
- Improve recursive pivot point calculation accuracy
|
||||||
|
- Add unit tests to verify pivot point detection
|
||||||
|
- Integrate with COB data for enhanced pivot detection
|
||||||
|
- _Requirements: 1.5, 2.7_
|
||||||
|
|
||||||
|
- [-] 1.4. Optimize real-time data streaming with COB integration
|
||||||
|
- Enhance existing WebSocket connections in enhanced_cob_websocket.py
|
||||||
|
- Implement 10Hz COB data streaming alongside OHLCV data
|
||||||
|
- Add data synchronization across different refresh rates
|
||||||
|
- Ensure thread-safe access to multi-rate data streams
|
||||||
|
- _Requirements: 1.6, 8.5_
|
||||||
|
|
||||||
|
- [ ] 1.5. Fix WebSocket COB data processing errors
|
||||||
|
- Fix 'NoneType' object has no attribute 'append' errors in COB data processing
|
||||||
|
- Ensure proper initialization of data structures in MultiExchangeCOBProvider
|
||||||
|
- Add validation and defensive checks before accessing data structures
|
||||||
|
- Implement proper error handling for WebSocket data processing
|
||||||
|
- _Requirements: 1.1, 1.6, 8.5_
|
||||||
|
|
||||||
|
- [ ] 1.6. Enhance error handling in COB data processing
|
||||||
|
- Add validation for incoming WebSocket data
|
||||||
|
- Implement reconnection logic with exponential backoff
|
||||||
|
- Add detailed logging for debugging COB data issues
|
||||||
|
- Ensure system continues operation with last valid data during failures
|
||||||
|
- _Requirements: 1.6, 8.5_
|
||||||
|
|
||||||
|
## Enhanced CNN Model Implementation
|
||||||
|
|
||||||
|
- [ ] 2. Enhance the existing CNN model with standardized inputs/outputs
|
||||||
|
- Extend the current implementation in NN/models/enhanced_cnn.py
|
||||||
|
- Accept standardized COB+OHLCV data frame: 300 frames (1s,1m,1h,1d) ETH + 300s 1s BTC
|
||||||
|
- Include COB ±20 buckets and MA (1s,5s,15s,60s) of COB imbalance ±5 buckets
|
||||||
|
- Output BUY/SELL trading action with confidence scores - _Requirements: 2.1, 2.2, 2.8, 1.10_
|
||||||
|
|
||||||
|
- [x] 2.1. Implement CNN inference with standardized input format
|
||||||
|
- Accept BaseDataInput with standardized COB+OHLCV format
|
||||||
|
- Process 300 frames of multi-timeframe data with COB buckets
|
||||||
|
- Output BUY/SELL recommendations with confidence scores
|
||||||
|
- Make hidden layer states available for cross-model feeding
|
||||||
|
- Optimize inference performance for real-time processing
|
||||||
|
- _Requirements: 2.2, 2.6, 2.8, 4.3_
|
||||||
|
|
||||||
|
- [x] 2.2. Enhance CNN training pipeline with checkpoint management
|
||||||
|
- Integrate with checkpoint manager for training progress persistence
|
||||||
|
- Store top 5-10 best checkpoints based on performance metrics
|
||||||
|
- Automatically load best checkpoint at startup
|
||||||
|
- Implement training triggers based on orchestrator feedback
|
||||||
|
- Store metadata with checkpoints for performance tracking
|
||||||
|
- _Requirements: 2.4, 2.5, 5.2, 5.3, 5.7_
|
||||||
|
|
||||||
|
- [ ] 2.3. Implement CNN model evaluation and checkpoint optimization
|
||||||
|
- Create evaluation methods using standardized input/output format
|
||||||
|
- Implement performance metrics for checkpoint ranking
|
||||||
|
- Add validation against historical trading outcomes
|
||||||
|
- Support automatic checkpoint cleanup (keep only top performers)
|
||||||
|
- Track model improvement over time through checkpoint metadata
|
||||||
|
- _Requirements: 2.5, 5.8, 4.4_
|
||||||
|
|
||||||
|
## Enhanced RL Model Implementation
|
||||||
|
|
||||||
|
- [ ] 3. Enhance the existing RL model with standardized inputs/outputs
|
||||||
|
- Extend the current implementation in NN/models/dqn_agent.py
|
||||||
|
- Accept standardized COB+OHLCV data frame: 300 frames (1s,1m,1h,1d) ETH + 300s 1s BTC
|
||||||
|
- Include COB ±20 buckets and MA (1s,5s,15s,60s) of COB imbalance ±5 buckets
|
||||||
|
- Output BUY/SELL trading action with confidence scores
|
||||||
|
- _Requirements: 3.1, 3.2, 3.7, 1.10_
|
||||||
|
|
||||||
|
- [ ] 3.1. Implement RL inference with standardized input format
|
||||||
|
- Accept BaseDataInput with standardized COB+OHLCV format
|
||||||
|
- Process CNN hidden states and predictions as part of state input
|
||||||
|
- Output BUY/SELL recommendations with confidence scores
|
||||||
|
- Include expected rewards and value estimates in output
|
||||||
|
- Optimize inference performance for real-time processing
|
||||||
|
- _Requirements: 3.2, 3.7, 4.3_
|
||||||
|
|
||||||
|
- [ ] 3.2. Enhance RL training pipeline with checkpoint management
|
||||||
|
- Integrate with checkpoint manager for training progress persistence
|
||||||
|
- Store top 5-10 best checkpoints based on trading performance metrics
|
||||||
|
- Automatically load best checkpoint at startup
|
||||||
|
- Implement experience replay with profitability-based prioritization
|
||||||
|
- Store metadata with checkpoints for performance tracking
|
||||||
|
- _Requirements: 3.3, 3.5, 5.4, 5.7, 4.4_
|
||||||
|
|
||||||
|
- [ ] 3.3. Implement RL model evaluation and checkpoint optimization
|
||||||
|
- Create evaluation methods using standardized input/output format
|
||||||
|
- Implement trading performance metrics for checkpoint ranking
|
||||||
|
- Add validation against historical trading opportunities
|
||||||
|
- Support automatic checkpoint cleanup (keep only top performers)
|
||||||
|
- Track model improvement over time through checkpoint metadata
|
||||||
|
- _Requirements: 3.3, 5.8, 4.4_
|
||||||
|
|
||||||
|
## Enhanced Orchestrator Implementation
|
||||||
|
|
||||||
|
- [ ] 4. Enhance the existing orchestrator with centralized coordination
|
||||||
|
- Extend the current implementation in core/orchestrator.py
|
||||||
|
- Implement DataSubscriptionManager for multi-rate data streams
|
||||||
|
- Add ModelInferenceCoordinator for cross-model coordination
|
||||||
|
- Create ModelOutputStore for extensible model output management
|
||||||
|
- Add TrainingPipelineManager for continuous learning coordination
|
||||||
|
- _Requirements: 4.1, 4.2, 4.5, 8.1_
|
||||||
|
|
||||||
|
- [ ] 4.1. Implement data subscription and management system
|
||||||
|
- Create DataSubscriptionManager class
|
||||||
|
- Subscribe to 10Hz COB data, OHLCV, market ticks, and technical indicators
|
||||||
|
- Implement intelligent caching for "last updated" data serving
|
||||||
|
- Maintain synchronized base dataframe across different refresh rates
|
||||||
|
- Add thread-safe access to multi-rate data streams
|
||||||
|
- _Requirements: 4.1, 1.6, 8.5_
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- [ ] 4.2. Implement model inference coordination
|
||||||
|
- Create ModelInferenceCoordinator class
|
||||||
|
- Trigger model inference based on data availability and requirements
|
||||||
|
- Coordinate parallel inference execution for independent models
|
||||||
|
- Handle model dependencies (e.g., RL waiting for CNN hidden states)
|
||||||
|
- Assemble appropriate input data for each model type
|
||||||
|
- _Requirements: 4.2, 3.1, 2.1_
|
||||||
|
|
||||||
|
- [ ] 4.3. Implement model output storage and cross-feeding
|
||||||
|
- Create ModelOutputStore class using standardized ModelOutput format
|
||||||
|
- Store CNN predictions, confidence scores, and hidden layer states
|
||||||
|
- Store RL action recommendations and value estimates
|
||||||
|
- Support extensible storage for LSTM, Transformer, and future models
|
||||||
|
- Implement cross-model feeding of hidden states and predictions
|
||||||
|
- Include "last predictions" from all models in base data input
|
||||||
|
- _Requirements: 4.3, 1.10, 8.2_
|
||||||
|
|
||||||
|
- [ ] 4.4. Implement training pipeline management
|
||||||
|
- Create TrainingPipelineManager class
|
||||||
|
- Call each model's training pipeline with prediction-result pairs
|
||||||
|
- Manage training data collection and labeling
|
||||||
|
- Coordinate online learning updates based on real-time performance
|
||||||
|
- Track prediction accuracy and trigger retraining when needed
|
||||||
|
- _Requirements: 4.4, 5.2, 5.4, 5.7_
|
||||||
|
|
||||||
|
- [ ] 4.5. Implement enhanced decision-making with MoE
|
||||||
|
- Create enhanced DecisionMaker class
|
||||||
|
- Implement Mixture of Experts approach for model integration
|
||||||
|
- Apply confidence-based filtering to avoid uncertain trades
|
||||||
|
- Support configurable thresholds for buy/sell decisions
|
||||||
|
- Consider market conditions and risk parameters in decisions
|
||||||
|
- _Requirements: 4.5, 4.8, 6.7_
|
||||||
|
|
||||||
|
- [ ] 4.6. Implement extensible model integration architecture
|
||||||
|
- Create MoEGateway class supporting dynamic model addition
|
||||||
|
- Support CNN, RL, LSTM, Transformer model types without architecture changes
|
||||||
|
- Implement model versioning and rollback capabilities
|
||||||
|
- Handle model failures and fallback mechanisms
|
||||||
|
- Provide model performance monitoring and alerting
|
||||||
|
- _Requirements: 4.6, 8.2, 8.3_
|
||||||
|
|
||||||
|
## Model Inference Data Validation and Storage
|
||||||
|
|
||||||
|
- [x] 5. Implement comprehensive inference data validation system
|
||||||
|
|
||||||
|
- Create InferenceDataValidator class for input validation
|
||||||
|
- Validate complete OHLCV dataframes for all required timeframes
|
||||||
|
- Check input data dimensions against model requirements
|
||||||
|
- Log missing components and prevent prediction on incomplete data
|
||||||
|
- _Requirements: 9.1, 9.2, 9.3, 9.4_
|
||||||
|
|
||||||
|
- [ ] 5.1. Implement input data validation for all models
|
||||||
|
- Create validation methods for CNN, RL, and future model inputs
|
||||||
|
- Validate OHLCV data completeness (300 frames for 1s, 1m, 1h, 1d)
|
||||||
|
- Validate COB data structure (±20 buckets, MA calculations)
|
||||||
|
- Raise specific validation errors with expected vs actual dimensions
|
||||||
|
- Ensure validation occurs before any model inference
|
||||||
|
- _Requirements: 9.1, 9.4_
|
||||||
|
|
||||||
|
- [x] 5.2. Implement persistent inference history storage
|
||||||
|
|
||||||
|
|
||||||
|
- Create InferenceHistoryStore class for persistent storage
|
||||||
|
- Store complete input data packages with each prediction
|
||||||
|
- Include timestamp, symbol, input features, prediction outputs, confidence scores
|
||||||
|
- Store model internal states for cross-model feeding
|
||||||
|
- Implement compressed storage to minimize footprint
|
||||||
|
- _Requirements: 9.5, 9.6_
|
||||||
|
|
||||||
|
- [x] 5.3. Implement inference history query and retrieval system
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- Create efficient query mechanisms by symbol, timeframe, and date range
|
||||||
|
- Implement data retrieval for training pipeline consumption
|
||||||
|
- Add data completeness metrics and validation results in storage
|
||||||
|
- Handle storage failures gracefully without breaking prediction flow
|
||||||
|
- _Requirements: 9.7, 11.6_
|
||||||
|
|
||||||
|
## Inference-Training Feedback Loop Implementation
|
||||||
|
|
||||||
|
- [ ] 6. Implement prediction outcome evaluation system
|
||||||
|
- Create PredictionOutcomeEvaluator class
|
||||||
|
- Evaluate prediction accuracy against actual price movements
|
||||||
|
- Create training examples using stored inference data and actual outcomes
|
||||||
|
- Feed prediction-result pairs back to respective models
|
||||||
|
- _Requirements: 10.1, 10.2, 10.3_
|
||||||
|
|
||||||
|
- [ ] 6.1. Implement adaptive learning signal generation
|
||||||
|
- Create positive reinforcement signals for accurate predictions
|
||||||
|
- Generate corrective training signals for inaccurate predictions
|
||||||
|
- Retrieve last inference data for each model for outcome comparison
|
||||||
|
- Implement model-specific learning signal formats
|
||||||
|
- _Requirements: 10.4, 10.5, 10.6_
|
||||||
|
|
||||||
|
- [ ] 6.2. Implement continuous improvement tracking
|
||||||
|
- Track and report accuracy improvements/degradations over time
|
||||||
|
- Monitor model learning progress through feedback loop
|
||||||
|
- Create performance metrics for inference-training effectiveness
|
||||||
|
- Generate alerts for learning regression or stagnation
|
||||||
|
- _Requirements: 10.7_
|
||||||
|
|
||||||
|
## Inference History Management and Monitoring
|
||||||
|
|
||||||
|
- [ ] 7. Implement comprehensive inference logging and monitoring
|
||||||
|
- Create InferenceMonitor class for logging and alerting
|
||||||
|
- Log inference data storage operations with completeness metrics
|
||||||
|
- Log training outcomes and model performance changes
|
||||||
|
- Alert administrators on data flow issues with specific error details
|
||||||
|
- _Requirements: 11.1, 11.2, 11.3_
|
||||||
|
|
||||||
|
- [ ] 7.1. Implement configurable retention policies
|
||||||
|
- Create RetentionPolicyManager class
|
||||||
|
- Archive or remove oldest entries when limits are reached
|
||||||
|
- Prioritize keeping most recent and valuable training examples
|
||||||
|
- Implement storage space monitoring and alerts
|
||||||
|
- _Requirements: 11.4, 11.7_
|
||||||
|
|
||||||
|
- [ ] 7.2. Implement efficient historical data management
|
||||||
|
- Compress inference data to minimize storage footprint
|
||||||
|
- Maintain accessibility for training and analysis
|
||||||
|
- Implement efficient query mechanisms for historical analysis
|
||||||
|
- Add data archival and restoration capabilities
|
||||||
|
- _Requirements: 11.5, 11.6_
|
||||||
|
|
||||||
|
## Trading Executor Implementation
|
||||||
|
|
||||||
|
- [ ] 5. Design and implement the trading executor
|
||||||
|
- Create a TradingExecutor class that accepts trading actions from the orchestrator
|
||||||
|
- Implement order execution through brokerage APIs
|
||||||
|
- Add order lifecycle management
|
||||||
|
- _Requirements: 7.1, 7.2, 8.6_
|
||||||
|
|
||||||
|
- [ ] 5.1. Implement brokerage API integrations
|
||||||
|
- Create a BrokerageAPI interface
|
||||||
|
- Implement concrete classes for MEXC and Binance
|
||||||
|
- Add error handling and retry mechanisms
|
||||||
|
- _Requirements: 7.1, 7.2, 8.6_
|
||||||
|
|
||||||
|
- [ ] 5.2. Implement order management
|
||||||
|
- Create an OrderManager class
|
||||||
|
- Implement methods for creating, updating, and canceling orders
|
||||||
|
- Add order tracking and status updates
|
||||||
|
- _Requirements: 7.1, 7.2, 8.6_
|
||||||
|
|
||||||
|
- [ ] 5.3. Implement error handling
|
||||||
|
- Add comprehensive error handling for API failures
|
||||||
|
- Implement circuit breakers for extreme market conditions
|
||||||
|
- Add logging and notification mechanisms
|
||||||
|
- _Requirements: 7.1, 7.2, 8.6_
|
||||||
|
|
||||||
|
## Risk Manager Implementation
|
||||||
|
|
||||||
|
- [ ] 6. Design and implement the risk manager
|
||||||
|
- Create a RiskManager class
|
||||||
|
- Implement risk parameter management
|
||||||
|
- Add risk metric calculation
|
||||||
|
- _Requirements: 7.1, 7.3, 7.4_
|
||||||
|
|
||||||
|
- [ ] 6.1. Implement stop-loss functionality
|
||||||
|
- Create a StopLossManager class
|
||||||
|
- Implement methods for creating and managing stop-loss orders
|
||||||
|
- Add mechanisms to automatically close positions when stop-loss is triggered
|
||||||
|
- _Requirements: 7.1, 7.2_
|
||||||
|
|
||||||
|
- [ ] 6.2. Implement position sizing
|
||||||
|
- Create a PositionSizer class
|
||||||
|
- Implement methods for calculating position sizes based on risk parameters
|
||||||
|
- Add validation to ensure position sizes are within limits
|
||||||
|
- _Requirements: 7.3, 7.7_
|
||||||
|
|
||||||
|
- [ ] 6.3. Implement risk metrics
|
||||||
|
- Add methods to calculate risk metrics (drawdown, VaR, etc.)
|
||||||
|
- Implement real-time risk monitoring
|
||||||
|
- Add alerts for high-risk situations
|
||||||
|
- _Requirements: 7.4, 7.5, 7.6, 7.8_
|
||||||
|
|
||||||
|
## Dashboard Implementation
|
||||||
|
|
||||||
|
- [ ] 7. Design and implement the dashboard UI
|
||||||
|
- Create a Dashboard class
|
||||||
|
- Implement the web-based UI using Flask/Dash
|
||||||
|
- Add real-time updates using WebSockets
|
||||||
|
- _Requirements: 6.1, 6.8_
|
||||||
|
|
||||||
|
- [ ] 7.1. Implement chart management
|
||||||
|
- Create a ChartManager class
|
||||||
|
- Implement methods for creating and updating charts
|
||||||
|
- Add interactive features (zoom, pan, etc.)
|
||||||
|
- _Requirements: 6.1, 6.2_
|
||||||
|
|
||||||
|
- [ ] 7.2. Implement control panel
|
||||||
|
- Create a ControlPanel class
|
||||||
|
- Implement start/stop toggles for system processes
|
||||||
|
- Add sliders for adjusting buy/sell thresholds
|
||||||
|
- _Requirements: 6.6, 6.7_
|
||||||
|
|
||||||
|
- [ ] 7.3. Implement system status display
|
||||||
|
- Add methods to display training progress
|
||||||
|
- Implement model performance metrics visualization
|
||||||
|
- Add real-time system status updates
|
||||||
|
- _Requirements: 6.5, 5.6_
|
||||||
|
|
||||||
|
- [ ] 7.4. Implement server-side processing
|
||||||
|
- Ensure all processes run on the server without requiring the dashboard to be open
|
||||||
|
- Implement background tasks for model training and inference
|
||||||
|
- Add mechanisms to persist system state
|
||||||
|
- _Requirements: 6.8, 5.5_
|
||||||
|
|
||||||
|
## Integration and Testing
|
||||||
|
|
||||||
|
- [ ] 8. Integrate all components
|
||||||
|
- Connect the data provider to the CNN and RL models
|
||||||
|
- Connect the CNN and RL models to the orchestrator
|
||||||
|
- Connect the orchestrator to the trading executor
|
||||||
|
- _Requirements: 8.1, 8.2, 8.3_
|
||||||
|
|
||||||
|
- [ ] 8.1. Implement comprehensive unit tests
|
||||||
|
- Create unit tests for each component
|
||||||
|
- Implement test fixtures and mocks
|
||||||
|
- Add test coverage reporting
|
||||||
|
- _Requirements: 8.1, 8.2, 8.3_
|
||||||
|
|
||||||
|
- [ ] 8.2. Implement integration tests
|
||||||
|
- Create tests for component interactions
|
||||||
|
- Implement end-to-end tests
|
||||||
|
- Add performance benchmarks
|
||||||
|
- _Requirements: 8.1, 8.2, 8.3_
|
||||||
|
|
||||||
|
- [ ] 8.3. Implement backtesting framework
|
||||||
|
- Create a backtesting environment
|
||||||
|
- Implement methods to replay historical data
|
||||||
|
- Add performance metrics calculation
|
||||||
|
- _Requirements: 5.8, 8.1_
|
||||||
|
|
||||||
|
- [ ] 8.4. Optimize performance
|
||||||
|
- Profile the system to identify bottlenecks
|
||||||
|
- Implement optimizations for critical paths
|
||||||
|
- Add caching and parallelization where appropriate
|
||||||
|
- _Requirements: 8.1, 8.2, 8.3_
|
||||||
350
.kiro/specs/ui-stability-fix/design.md
Normal file
350
.kiro/specs/ui-stability-fix/design.md
Normal file
@@ -0,0 +1,350 @@
|
|||||||
|
# Design Document
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The UI Stability Fix implements a comprehensive solution to resolve critical stability issues between the dashboard UI and training processes. The design focuses on complete process isolation, proper async/await handling, resource conflict resolution, and robust error handling. The solution ensures that the dashboard can operate independently without affecting training system stability.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### High-Level Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
subgraph "Training Process"
|
||||||
|
TP[Training Process]
|
||||||
|
TM[Training Models]
|
||||||
|
TD[Training Data]
|
||||||
|
TL[Training Logs]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Dashboard Process"
|
||||||
|
DP[Dashboard Process]
|
||||||
|
DU[Dashboard UI]
|
||||||
|
DC[Dashboard Cache]
|
||||||
|
DL[Dashboard Logs]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph "Shared Resources"
|
||||||
|
SF[Shared Files]
|
||||||
|
SC[Shared Config]
|
||||||
|
SM[Shared Models]
|
||||||
|
SD[Shared Data]
|
||||||
|
end
|
||||||
|
|
||||||
|
TP --> SF
|
||||||
|
DP --> SF
|
||||||
|
TP --> SC
|
||||||
|
DP --> SC
|
||||||
|
TP --> SM
|
||||||
|
DP --> SM
|
||||||
|
TP --> SD
|
||||||
|
DP --> SD
|
||||||
|
|
||||||
|
TP -.->|No Direct Connection| DP
|
||||||
|
```
|
||||||
|
|
||||||
|
### Process Isolation Design
|
||||||
|
|
||||||
|
The system will implement complete process isolation using:
|
||||||
|
|
||||||
|
1. **Separate Python Processes**: Dashboard and training run as independent processes
|
||||||
|
2. **Inter-Process Communication**: File-based communication for status and data sharing
|
||||||
|
3. **Resource Partitioning**: Separate resource allocation for each process
|
||||||
|
4. **Independent Lifecycle Management**: Each process can start, stop, and restart independently
|
||||||
|
|
||||||
|
### Async/Await Error Resolution
|
||||||
|
|
||||||
|
The design addresses async issues through:
|
||||||
|
|
||||||
|
1. **Proper Event Loop Management**: Single event loop per process with proper lifecycle
|
||||||
|
2. **Async Context Isolation**: Separate async contexts for different components
|
||||||
|
3. **Coroutine Handling**: Proper awaiting of all async operations
|
||||||
|
4. **Exception Propagation**: Proper async exception handling and propagation
|
||||||
|
|
||||||
|
## Components and Interfaces
|
||||||
|
|
||||||
|
### 1. Process Manager
|
||||||
|
|
||||||
|
**Purpose**: Manages the lifecycle of both dashboard and training processes
|
||||||
|
|
||||||
|
**Interface**:
|
||||||
|
```python
|
||||||
|
class ProcessManager:
|
||||||
|
def start_training_process(self) -> bool
|
||||||
|
def start_dashboard_process(self, port: int = 8050) -> bool
|
||||||
|
def stop_training_process(self) -> bool
|
||||||
|
def stop_dashboard_process(self) -> bool
|
||||||
|
def get_process_status(self) -> Dict[str, str]
|
||||||
|
def restart_process(self, process_name: str) -> bool
|
||||||
|
```
|
||||||
|
|
||||||
|
**Implementation Details**:
|
||||||
|
- Uses subprocess.Popen for process creation
|
||||||
|
- Monitors process health with periodic checks
|
||||||
|
- Handles process output logging and error capture
|
||||||
|
- Implements graceful shutdown with timeout handling
|
||||||
|
|
||||||
|
### 2. Isolated Dashboard
|
||||||
|
|
||||||
|
**Purpose**: Provides a completely isolated dashboard that doesn't interfere with training
|
||||||
|
|
||||||
|
**Interface**:
|
||||||
|
```python
|
||||||
|
class IsolatedDashboard:
|
||||||
|
def __init__(self, config: Dict[str, Any])
|
||||||
|
def start_server(self, host: str, port: int) -> None
|
||||||
|
def stop_server(self) -> None
|
||||||
|
def update_data_from_files(self) -> None
|
||||||
|
def get_training_status(self) -> Dict[str, Any]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Implementation Details**:
|
||||||
|
- Runs in separate process with own event loop
|
||||||
|
- Reads data from shared files instead of direct memory access
|
||||||
|
- Uses file-based communication for training status
|
||||||
|
- Implements proper async/await patterns for all operations
|
||||||
|
|
||||||
|
### 3. Isolated Training Process
|
||||||
|
|
||||||
|
**Purpose**: Runs training completely isolated from UI components
|
||||||
|
|
||||||
|
**Interface**:
|
||||||
|
```python
|
||||||
|
class IsolatedTrainingProcess:
|
||||||
|
def __init__(self, config: Dict[str, Any])
|
||||||
|
def start_training(self) -> None
|
||||||
|
def stop_training(self) -> None
|
||||||
|
def get_training_metrics(self) -> Dict[str, Any]
|
||||||
|
def save_status_to_file(self) -> None
|
||||||
|
```
|
||||||
|
|
||||||
|
**Implementation Details**:
|
||||||
|
- No UI dependencies or imports
|
||||||
|
- Writes status and metrics to shared files
|
||||||
|
- Implements proper resource cleanup
|
||||||
|
- Uses separate logging configuration
|
||||||
|
|
||||||
|
### 4. Shared Data Manager
|
||||||
|
|
||||||
|
**Purpose**: Manages data sharing between processes through files
|
||||||
|
|
||||||
|
**Interface**:
|
||||||
|
```python
|
||||||
|
class SharedDataManager:
|
||||||
|
def write_training_status(self, status: Dict[str, Any]) -> None
|
||||||
|
def read_training_status(self) -> Dict[str, Any]
|
||||||
|
def write_market_data(self, data: Dict[str, Any]) -> None
|
||||||
|
def read_market_data(self) -> Dict[str, Any]
|
||||||
|
def write_model_metrics(self, metrics: Dict[str, Any]) -> None
|
||||||
|
def read_model_metrics(self) -> Dict[str, Any]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Implementation Details**:
|
||||||
|
- Uses JSON files for structured data
|
||||||
|
- Implements file locking to prevent corruption
|
||||||
|
- Provides atomic write operations
|
||||||
|
- Includes data validation and error handling
|
||||||
|
|
||||||
|
### 5. Resource Manager
|
||||||
|
|
||||||
|
**Purpose**: Manages resource allocation and prevents conflicts
|
||||||
|
|
||||||
|
**Interface**:
|
||||||
|
```python
|
||||||
|
class ResourceManager:
|
||||||
|
def allocate_gpu_resources(self, process_name: str) -> bool
|
||||||
|
def release_gpu_resources(self, process_name: str) -> None
|
||||||
|
def check_memory_usage(self) -> Dict[str, float]
|
||||||
|
def enforce_resource_limits(self) -> None
|
||||||
|
```
|
||||||
|
|
||||||
|
**Implementation Details**:
|
||||||
|
- Monitors GPU memory usage per process
|
||||||
|
- Implements resource quotas and limits
|
||||||
|
- Provides resource conflict detection
|
||||||
|
- Includes automatic resource cleanup
|
||||||
|
|
||||||
|
### 6. Async Handler
|
||||||
|
|
||||||
|
**Purpose**: Properly handles all async operations in the dashboard
|
||||||
|
|
||||||
|
**Interface**:
|
||||||
|
```python
|
||||||
|
class AsyncHandler:
|
||||||
|
def __init__(self, loop: asyncio.AbstractEventLoop)
|
||||||
|
async def handle_orchestrator_connection(self) -> None
|
||||||
|
async def handle_cob_integration(self) -> None
|
||||||
|
async def handle_trading_decisions(self, decision: Dict) -> None
|
||||||
|
def run_async_safely(self, coro: Coroutine) -> Any
|
||||||
|
```
|
||||||
|
|
||||||
|
**Implementation Details**:
|
||||||
|
- Manages single event loop per process
|
||||||
|
- Provides proper exception handling for async operations
|
||||||
|
- Implements timeout handling for long-running operations
|
||||||
|
- Includes async context management
|
||||||
|
|
||||||
|
## Data Models
|
||||||
|
|
||||||
|
### Process Status Model
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class ProcessStatus:
|
||||||
|
name: str
|
||||||
|
pid: int
|
||||||
|
status: str # 'running', 'stopped', 'error'
|
||||||
|
start_time: datetime
|
||||||
|
last_heartbeat: datetime
|
||||||
|
memory_usage: float
|
||||||
|
cpu_usage: float
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
```
|
||||||
|
|
||||||
|
### Training Status Model
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class TrainingStatus:
|
||||||
|
is_running: bool
|
||||||
|
current_epoch: int
|
||||||
|
total_epochs: int
|
||||||
|
loss: float
|
||||||
|
accuracy: float
|
||||||
|
last_update: datetime
|
||||||
|
model_path: str
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
```
|
||||||
|
|
||||||
|
### Dashboard State Model
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class DashboardState:
|
||||||
|
is_connected: bool
|
||||||
|
last_data_update: datetime
|
||||||
|
active_connections: int
|
||||||
|
error_count: int
|
||||||
|
performance_metrics: Dict[str, float]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Exception Hierarchy
|
||||||
|
```python
|
||||||
|
class UIStabilityError(Exception):
|
||||||
|
"""Base exception for UI stability issues"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
class ProcessCommunicationError(UIStabilityError):
|
||||||
|
"""Error in inter-process communication"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
class AsyncOperationError(UIStabilityError):
|
||||||
|
"""Error in async operation handling"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
class ResourceConflictError(UIStabilityError):
|
||||||
|
"""Error due to resource conflicts"""
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error Recovery Strategies
|
||||||
|
|
||||||
|
1. **Automatic Retry**: For transient network and file I/O errors
|
||||||
|
2. **Graceful Degradation**: Fallback to basic functionality when components fail
|
||||||
|
3. **Process Restart**: Automatic restart of failed processes
|
||||||
|
4. **Circuit Breaker**: Temporary disable of failing components
|
||||||
|
5. **Rollback**: Revert to last known good state
|
||||||
|
|
||||||
|
### Error Monitoring
|
||||||
|
|
||||||
|
- Centralized error logging with structured format
|
||||||
|
- Real-time error rate monitoring
|
||||||
|
- Automatic alerting for critical errors
|
||||||
|
- Error trend analysis and reporting
|
||||||
|
|
||||||
|
## Testing Strategy
|
||||||
|
|
||||||
|
### Unit Tests
|
||||||
|
- Test each component in isolation
|
||||||
|
- Mock external dependencies
|
||||||
|
- Verify error handling paths
|
||||||
|
- Test async operation handling
|
||||||
|
|
||||||
|
### Integration Tests
|
||||||
|
- Test inter-process communication
|
||||||
|
- Verify resource sharing mechanisms
|
||||||
|
- Test process lifecycle management
|
||||||
|
- Validate error recovery scenarios
|
||||||
|
|
||||||
|
### System Tests
|
||||||
|
- End-to-end stability testing
|
||||||
|
- Load testing with concurrent processes
|
||||||
|
- Failure injection testing
|
||||||
|
- Performance regression testing
|
||||||
|
|
||||||
|
### Monitoring Tests
|
||||||
|
- Health check endpoint testing
|
||||||
|
- Metrics collection validation
|
||||||
|
- Alert system testing
|
||||||
|
- Dashboard functionality testing
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
### Resource Optimization
|
||||||
|
- Minimize memory footprint of each process
|
||||||
|
- Optimize file I/O operations for data sharing
|
||||||
|
- Implement efficient data serialization
|
||||||
|
- Use connection pooling for external services
|
||||||
|
|
||||||
|
### Scalability
|
||||||
|
- Support multiple dashboard instances
|
||||||
|
- Handle increased data volume gracefully
|
||||||
|
- Implement efficient caching strategies
|
||||||
|
- Optimize for high-frequency updates
|
||||||
|
|
||||||
|
### Monitoring
|
||||||
|
- Real-time performance metrics collection
|
||||||
|
- Resource usage tracking per process
|
||||||
|
- Response time monitoring
|
||||||
|
- Throughput measurement
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
### Process Isolation
|
||||||
|
- Separate user contexts for processes
|
||||||
|
- Limited file system access permissions
|
||||||
|
- Network access restrictions
|
||||||
|
- Resource usage limits
|
||||||
|
|
||||||
|
### Data Protection
|
||||||
|
- Secure file sharing mechanisms
|
||||||
|
- Data validation and sanitization
|
||||||
|
- Access control for shared resources
|
||||||
|
- Audit logging for sensitive operations
|
||||||
|
|
||||||
|
### Communication Security
|
||||||
|
- Encrypted inter-process communication
|
||||||
|
- Authentication for API endpoints
|
||||||
|
- Input validation for all interfaces
|
||||||
|
- Rate limiting for external requests
|
||||||
|
|
||||||
|
## Deployment Strategy
|
||||||
|
|
||||||
|
### Development Environment
|
||||||
|
- Local process management scripts
|
||||||
|
- Development-specific configuration
|
||||||
|
- Enhanced logging and debugging
|
||||||
|
- Hot-reload capabilities
|
||||||
|
|
||||||
|
### Production Environment
|
||||||
|
- Systemd service management
|
||||||
|
- Production configuration templates
|
||||||
|
- Log rotation and archiving
|
||||||
|
- Monitoring and alerting setup
|
||||||
|
|
||||||
|
### Migration Plan
|
||||||
|
1. Deploy new process management components
|
||||||
|
2. Update configuration files
|
||||||
|
3. Test process isolation functionality
|
||||||
|
4. Gradually migrate existing deployments
|
||||||
|
5. Monitor stability improvements
|
||||||
|
6. Remove legacy components
|
||||||
111
.kiro/specs/ui-stability-fix/requirements.md
Normal file
111
.kiro/specs/ui-stability-fix/requirements.md
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
# Requirements Document
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The UI Stability Fix addresses critical issues where loading the dashboard UI crashes the training process and causes unhandled exceptions. The system currently suffers from async/await handling problems, threading conflicts, resource contention, and improper separation of concerns between the UI and training processes. This fix will ensure the dashboard can run independently without affecting the training system's stability.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
### Requirement 1: Async/Await Error Resolution
|
||||||
|
|
||||||
|
**User Story:** As a developer, I want the dashboard to properly handle async operations, so that unhandled exceptions don't crash the entire system.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the dashboard initializes THEN it SHALL properly handle all async operations without throwing "An asyncio.Future, a coroutine or an awaitable is required" errors.
|
||||||
|
2. WHEN connecting to the orchestrator THEN the system SHALL use proper async/await patterns for all coroutine calls.
|
||||||
|
3. WHEN starting COB integration THEN the system SHALL properly manage event loops without conflicts.
|
||||||
|
4. WHEN handling trading decisions THEN async callbacks SHALL be properly awaited and handled.
|
||||||
|
5. WHEN the dashboard starts THEN it SHALL not create multiple conflicting event loops.
|
||||||
|
6. WHEN async operations fail THEN the system SHALL handle exceptions gracefully without crashing.
|
||||||
|
|
||||||
|
### Requirement 2: Process Isolation
|
||||||
|
|
||||||
|
**User Story:** As a user, I want the dashboard and training processes to run independently, so that UI issues don't affect training stability.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the dashboard starts THEN it SHALL run in a completely separate process from the training system.
|
||||||
|
2. WHEN the dashboard crashes THEN the training process SHALL continue running unaffected.
|
||||||
|
3. WHEN the training process encounters issues THEN the dashboard SHALL remain functional.
|
||||||
|
4. WHEN both processes are running THEN they SHALL communicate only through well-defined interfaces (files, APIs, or message queues).
|
||||||
|
5. WHEN either process restarts THEN the other process SHALL continue operating normally.
|
||||||
|
6. WHEN resources are accessed THEN there SHALL be no direct shared memory or threading conflicts between processes.
|
||||||
|
|
||||||
|
### Requirement 3: Resource Contention Resolution
|
||||||
|
|
||||||
|
**User Story:** As a system administrator, I want to eliminate resource conflicts between UI and training, so that both can operate efficiently without interference.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN both dashboard and training are running THEN they SHALL not compete for the same GPU resources.
|
||||||
|
2. WHEN accessing data files THEN proper file locking SHALL prevent corruption or access conflicts.
|
||||||
|
3. WHEN using network resources THEN rate limiting SHALL prevent API conflicts between processes.
|
||||||
|
4. WHEN accessing model files THEN proper synchronization SHALL prevent read/write conflicts.
|
||||||
|
5. WHEN logging THEN separate log files SHALL be used to prevent write conflicts.
|
||||||
|
6. WHEN using temporary files THEN separate directories SHALL be used for each process.
|
||||||
|
|
||||||
|
### Requirement 4: Threading Safety
|
||||||
|
|
||||||
|
**User Story:** As a developer, I want all threading operations to be safe and properly managed, so that race conditions and deadlocks don't occur.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the dashboard uses threads THEN all shared data SHALL be properly synchronized.
|
||||||
|
2. WHEN background updates run THEN they SHALL not interfere with main UI thread operations.
|
||||||
|
3. WHEN stopping threads THEN proper cleanup SHALL occur without hanging or deadlocks.
|
||||||
|
4. WHEN accessing shared resources THEN proper locking mechanisms SHALL be used.
|
||||||
|
5. WHEN threads encounter exceptions THEN they SHALL be handled without crashing the main process.
|
||||||
|
6. WHEN the dashboard shuts down THEN all threads SHALL be properly terminated.
|
||||||
|
|
||||||
|
### Requirement 5: Error Handling and Recovery
|
||||||
|
|
||||||
|
**User Story:** As a user, I want the system to handle errors gracefully and recover automatically, so that temporary issues don't cause permanent failures.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN unhandled exceptions occur THEN they SHALL be caught and logged without crashing the process.
|
||||||
|
2. WHEN network connections fail THEN the system SHALL retry with exponential backoff.
|
||||||
|
3. WHEN data sources are unavailable THEN fallback mechanisms SHALL provide basic functionality.
|
||||||
|
4. WHEN memory issues occur THEN the system SHALL free resources and continue operating.
|
||||||
|
5. WHEN critical errors happen THEN the system SHALL attempt automatic recovery.
|
||||||
|
6. WHEN recovery fails THEN the system SHALL provide clear error messages and graceful degradation.
|
||||||
|
|
||||||
|
### Requirement 6: Monitoring and Diagnostics
|
||||||
|
|
||||||
|
**User Story:** As a developer, I want comprehensive monitoring and diagnostics, so that I can quickly identify and resolve stability issues.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the system runs THEN it SHALL provide real-time health monitoring for all components.
|
||||||
|
2. WHEN errors occur THEN detailed diagnostic information SHALL be logged with timestamps and context.
|
||||||
|
3. WHEN performance issues arise THEN resource usage metrics SHALL be available.
|
||||||
|
4. WHEN processes communicate THEN message flow SHALL be traceable for debugging.
|
||||||
|
5. WHEN the system starts THEN startup diagnostics SHALL verify all components are working correctly.
|
||||||
|
6. WHEN stability issues occur THEN automated alerts SHALL notify administrators.
|
||||||
|
|
||||||
|
### Requirement 7: Configuration and Control
|
||||||
|
|
||||||
|
**User Story:** As a system administrator, I want flexible configuration options, so that I can optimize system behavior for different environments.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN configuring the system THEN separate configuration files SHALL be used for dashboard and training processes.
|
||||||
|
2. WHEN adjusting resource limits THEN configuration SHALL allow tuning memory, CPU, and GPU usage.
|
||||||
|
3. WHEN setting update intervals THEN dashboard refresh rates SHALL be configurable.
|
||||||
|
4. WHEN enabling features THEN individual components SHALL be independently controllable.
|
||||||
|
5. WHEN debugging THEN log levels SHALL be adjustable without restarting processes.
|
||||||
|
6. WHEN deploying THEN environment-specific configurations SHALL be supported.
|
||||||
|
|
||||||
|
### Requirement 8: Backward Compatibility
|
||||||
|
|
||||||
|
**User Story:** As a user, I want the stability fixes to maintain existing functionality, so that current workflows continue to work.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the fixes are applied THEN all existing dashboard features SHALL continue to work.
|
||||||
|
2. WHEN training processes run THEN they SHALL maintain the same interfaces and outputs.
|
||||||
|
3. WHEN data is accessed THEN existing data formats SHALL remain compatible.
|
||||||
|
4. WHEN APIs are used THEN existing endpoints SHALL continue to function.
|
||||||
|
5. WHEN configurations are loaded THEN existing config files SHALL remain valid.
|
||||||
|
6. WHEN the system upgrades THEN migration paths SHALL preserve user settings and data.
|
||||||
79
.kiro/specs/ui-stability-fix/tasks.md
Normal file
79
.kiro/specs/ui-stability-fix/tasks.md
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Implementation Plan
|
||||||
|
|
||||||
|
- [x] 1. Create Shared Data Manager for inter-process communication
|
||||||
|
|
||||||
|
|
||||||
|
- Implement JSON-based file sharing with atomic writes and file locking
|
||||||
|
- Create data models for training status, dashboard state, and process status
|
||||||
|
- Add validation and error handling for all data operations
|
||||||
|
- _Requirements: 2.4, 3.4, 5.2_
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- [ ] 2. Implement Async Handler for proper async/await management
|
||||||
|
- Create centralized async operation handler with single event loop management
|
||||||
|
- Fix all async/await patterns in dashboard code
|
||||||
|
- Add proper exception handling for async operations with timeout support
|
||||||
|
- _Requirements: 1.1, 1.2, 1.3, 1.6_
|
||||||
|
|
||||||
|
- [ ] 3. Create Isolated Training Process
|
||||||
|
- Extract training logic into standalone process without UI dependencies
|
||||||
|
- Implement file-based status reporting and metrics sharing
|
||||||
|
- Add proper resource cleanup and error handling
|
||||||
|
- _Requirements: 2.1, 2.2, 3.1, 4.5_
|
||||||
|
|
||||||
|
- [ ] 4. Create Isolated Dashboard Process
|
||||||
|
- Refactor dashboard to run independently with file-based data access
|
||||||
|
- Remove direct memory sharing and threading conflicts with training
|
||||||
|
- Implement proper process lifecycle management
|
||||||
|
- _Requirements: 2.1, 2.3, 4.1, 4.2_
|
||||||
|
|
||||||
|
- [ ] 5. Implement Process Manager
|
||||||
|
- Create process lifecycle management with subprocess handling
|
||||||
|
- Add process monitoring, health checks, and automatic restart capabilities
|
||||||
|
- Implement graceful shutdown with proper cleanup
|
||||||
|
- _Requirements: 2.5, 5.5, 6.1, 6.6_
|
||||||
|
|
||||||
|
- [ ] 6. Create Resource Manager
|
||||||
|
- Implement GPU resource allocation and conflict prevention
|
||||||
|
- Add memory usage monitoring and resource limits enforcement
|
||||||
|
- Create separate logging and temporary file management
|
||||||
|
- _Requirements: 3.1, 3.2, 3.5, 3.6_
|
||||||
|
|
||||||
|
- [ ] 7. Fix Threading Safety Issues
|
||||||
|
- Audit and fix all shared data access with proper synchronization
|
||||||
|
- Implement proper thread cleanup and exception handling
|
||||||
|
- Remove race conditions and deadlock potential
|
||||||
|
- _Requirements: 4.1, 4.2, 4.3, 4.6_
|
||||||
|
|
||||||
|
- [ ] 8. Implement Error Handling and Recovery
|
||||||
|
- Add comprehensive exception handling with proper logging
|
||||||
|
- Create automatic retry mechanisms with exponential backoff
|
||||||
|
- Implement fallback mechanisms and graceful degradation
|
||||||
|
- _Requirements: 5.1, 5.2, 5.3, 5.6_
|
||||||
|
|
||||||
|
- [ ] 9. Create System Launcher and Configuration
|
||||||
|
- Build unified launcher script for both processes
|
||||||
|
- Create separate configuration files for dashboard and training
|
||||||
|
- Add environment-specific configuration support
|
||||||
|
- _Requirements: 7.1, 7.2, 7.4, 7.6_
|
||||||
|
|
||||||
|
- [ ] 10. Add Monitoring and Diagnostics
|
||||||
|
- Implement real-time health monitoring for all components
|
||||||
|
- Create detailed diagnostic logging with structured format
|
||||||
|
- Add performance metrics collection and resource usage tracking
|
||||||
|
- _Requirements: 6.1, 6.2, 6.3, 6.5_
|
||||||
|
|
||||||
|
- [ ] 11. Create Integration Tests
|
||||||
|
- Write tests for inter-process communication and data sharing
|
||||||
|
- Test process lifecycle management and error recovery
|
||||||
|
- Validate resource conflict resolution and stability improvements
|
||||||
|
- _Requirements: 5.4, 5.5, 6.4, 8.1_
|
||||||
|
|
||||||
|
- [ ] 12. Update Documentation and Migration Guide
|
||||||
|
- Document new architecture and deployment procedures
|
||||||
|
- Create migration guide from existing system
|
||||||
|
- Add troubleshooting guide for common stability issues
|
||||||
|
- _Requirements: 8.2, 8.5, 8.6_
|
||||||
293
.kiro/specs/websocket-cob-data-fix/design.md
Normal file
293
.kiro/specs/websocket-cob-data-fix/design.md
Normal file
@@ -0,0 +1,293 @@
|
|||||||
|
# WebSocket COB Data Fix Design Document
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This design document outlines the approach to fix the WebSocket COB (Change of Basis) data processing issue in the trading system. The current implementation is failing with `'NoneType' object has no attribute 'append'` errors for both BTC/USDT and ETH/USDT pairs, which indicates that a data structure expected to be a list is actually None. This issue is preventing the dashboard from functioning properly and needs to be addressed to ensure reliable real-time market data processing.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
The COB data processing pipeline involves several components:
|
||||||
|
|
||||||
|
1. **MultiExchangeCOBProvider**: Collects order book data from exchanges via WebSockets
|
||||||
|
2. **StandardizedDataProvider**: Extends DataProvider with standardized BaseDataInput functionality
|
||||||
|
3. **Dashboard Components**: Display COB data in the UI
|
||||||
|
|
||||||
|
The error occurs during WebSocket data processing, specifically when trying to append data to a collection that hasn't been properly initialized. The fix will focus on ensuring proper initialization of data structures and implementing robust error handling.
|
||||||
|
|
||||||
|
## Components and Interfaces
|
||||||
|
|
||||||
|
### 1. MultiExchangeCOBProvider
|
||||||
|
|
||||||
|
The `MultiExchangeCOBProvider` class is responsible for collecting order book data from exchanges and distributing it to subscribers. The issue appears to be in the WebSocket data processing logic, where data structures may not be properly initialized before use.
|
||||||
|
|
||||||
|
#### Key Issues to Address
|
||||||
|
|
||||||
|
1. **Data Structure Initialization**: Ensure all data structures (particularly collections that will have `append` called on them) are properly initialized during object creation.
|
||||||
|
2. **Subscriber Notification**: Fix the `_notify_cob_subscribers` method to handle edge cases and ensure data is properly formatted before notification.
|
||||||
|
3. **WebSocket Processing**: Enhance error handling in WebSocket processing methods to prevent cascading failures.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
```python
|
||||||
|
class MultiExchangeCOBProvider:
|
||||||
|
def __init__(self, symbols: List[str], exchange_configs: Dict[str, ExchangeConfig]):
|
||||||
|
# Existing initialization code...
|
||||||
|
|
||||||
|
# Ensure all data structures are properly initialized
|
||||||
|
self.cob_data_cache = {} # Cache for COB data
|
||||||
|
self.cob_subscribers = [] # List of callback functions
|
||||||
|
self.exchange_order_books = {}
|
||||||
|
self.session_trades = {}
|
||||||
|
self.svp_cache = {}
|
||||||
|
|
||||||
|
# Initialize data structures for each symbol
|
||||||
|
for symbol in symbols:
|
||||||
|
self.cob_data_cache[symbol] = {}
|
||||||
|
self.exchange_order_books[symbol] = {}
|
||||||
|
self.session_trades[symbol] = []
|
||||||
|
self.svp_cache[symbol] = {}
|
||||||
|
|
||||||
|
# Initialize exchange-specific data structures
|
||||||
|
for exchange_name in self.active_exchanges:
|
||||||
|
self.exchange_order_books[symbol][exchange_name] = {
|
||||||
|
'bids': {},
|
||||||
|
'asks': {},
|
||||||
|
'deep_bids': {},
|
||||||
|
'deep_asks': {},
|
||||||
|
'timestamp': datetime.now(),
|
||||||
|
'deep_timestamp': datetime.now(),
|
||||||
|
'connected': False,
|
||||||
|
'last_update_id': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Multi-exchange COB provider initialized for symbols: {symbols}")
|
||||||
|
|
||||||
|
async def _notify_cob_subscribers(self, symbol: str, cob_snapshot: Dict):
|
||||||
|
"""Notify all subscribers of COB data updates with improved error handling"""
|
||||||
|
try:
|
||||||
|
if not cob_snapshot:
|
||||||
|
logger.warning(f"Attempted to notify subscribers with empty COB snapshot for {symbol}")
|
||||||
|
return
|
||||||
|
|
||||||
|
for callback in self.cob_subscribers:
|
||||||
|
try:
|
||||||
|
if asyncio.iscoroutinefunction(callback):
|
||||||
|
await callback(symbol, cob_snapshot)
|
||||||
|
else:
|
||||||
|
callback(symbol, cob_snapshot)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in COB subscriber callback: {e}", exc_info=True)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error notifying COB subscribers: {e}", exc_info=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. StandardizedDataProvider
|
||||||
|
|
||||||
|
The `StandardizedDataProvider` class extends the base `DataProvider` with standardized data input functionality. It needs to properly handle COB data and ensure all data structures are initialized.
|
||||||
|
|
||||||
|
#### Key Issues to Address
|
||||||
|
|
||||||
|
1. **COB Data Handling**: Ensure proper initialization and validation of COB data structures.
|
||||||
|
2. **Error Handling**: Improve error handling when processing COB data.
|
||||||
|
3. **Data Structure Consistency**: Maintain consistent data structures throughout the processing pipeline.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
```python
|
||||||
|
class StandardizedDataProvider(DataProvider):
|
||||||
|
def __init__(self, symbols: List[str] = None, timeframes: List[str] = None):
|
||||||
|
"""Initialize the standardized data provider with proper data structure initialization"""
|
||||||
|
super().__init__(symbols, timeframes)
|
||||||
|
|
||||||
|
# Standardized data storage
|
||||||
|
self.base_data_cache = {} # {symbol: BaseDataInput}
|
||||||
|
self.cob_data_cache = {} # {symbol: COBData}
|
||||||
|
|
||||||
|
# Model output management with extensible storage
|
||||||
|
self.model_output_manager = ModelOutputManager(
|
||||||
|
cache_dir=str(self.cache_dir / "model_outputs"),
|
||||||
|
max_history=1000
|
||||||
|
)
|
||||||
|
|
||||||
|
# COB moving averages calculation
|
||||||
|
self.cob_imbalance_history = {} # {symbol: deque of (timestamp, imbalance_data)}
|
||||||
|
self.ma_calculation_lock = Lock()
|
||||||
|
|
||||||
|
# Initialize caches for each symbol
|
||||||
|
for symbol in self.symbols:
|
||||||
|
self.base_data_cache[symbol] = None
|
||||||
|
self.cob_data_cache[symbol] = None
|
||||||
|
self.cob_imbalance_history[symbol] = deque(maxlen=300) # 5 minutes of 1s data
|
||||||
|
|
||||||
|
# COB provider integration
|
||||||
|
self.cob_provider = None
|
||||||
|
self._initialize_cob_provider()
|
||||||
|
|
||||||
|
logger.info("StandardizedDataProvider initialized with BaseDataInput support")
|
||||||
|
|
||||||
|
def _process_cob_data(self, symbol: str, cob_snapshot: Dict):
|
||||||
|
"""Process COB data with improved error handling"""
|
||||||
|
try:
|
||||||
|
if not cob_snapshot:
|
||||||
|
logger.warning(f"Received empty COB snapshot for {symbol}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Process COB data and update caches
|
||||||
|
# ...
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing COB data for {symbol}: {e}", exc_info=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. WebSocket COB Data Processing
|
||||||
|
|
||||||
|
The WebSocket COB data processing logic needs to be enhanced to handle edge cases and ensure proper data structure initialization.
|
||||||
|
|
||||||
|
#### Key Issues to Address
|
||||||
|
|
||||||
|
1. **WebSocket Connection Management**: Improve connection management to handle disconnections gracefully.
|
||||||
|
2. **Data Processing**: Ensure data is properly validated before processing.
|
||||||
|
3. **Error Recovery**: Implement recovery mechanisms for WebSocket failures.
|
||||||
|
|
||||||
|
#### Implementation Details
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def _stream_binance_orderbook(self, symbol: str, config: ExchangeConfig):
|
||||||
|
"""Stream order book data from Binance with improved error handling"""
|
||||||
|
reconnect_delay = 1 # Start with 1 second delay
|
||||||
|
max_reconnect_delay = 60 # Maximum delay of 60 seconds
|
||||||
|
|
||||||
|
while self.is_streaming:
|
||||||
|
try:
|
||||||
|
ws_url = f"{config.websocket_url}{config.symbols_mapping[symbol].lower()}@depth20@100ms"
|
||||||
|
logger.info(f"Connecting to Binance WebSocket: {ws_url}")
|
||||||
|
|
||||||
|
if websockets is None or websockets_connect is None:
|
||||||
|
raise ImportError("websockets module not available")
|
||||||
|
|
||||||
|
async with websockets_connect(ws_url) as websocket:
|
||||||
|
# Ensure data structures are initialized
|
||||||
|
if symbol not in self.exchange_order_books:
|
||||||
|
self.exchange_order_books[symbol] = {}
|
||||||
|
|
||||||
|
if 'binance' not in self.exchange_order_books[symbol]:
|
||||||
|
self.exchange_order_books[symbol]['binance'] = {
|
||||||
|
'bids': {},
|
||||||
|
'asks': {},
|
||||||
|
'deep_bids': {},
|
||||||
|
'deep_asks': {},
|
||||||
|
'timestamp': datetime.now(),
|
||||||
|
'deep_timestamp': datetime.now(),
|
||||||
|
'connected': False,
|
||||||
|
'last_update_id': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
self.exchange_order_books[symbol]['binance']['connected'] = True
|
||||||
|
logger.info(f"Connected to Binance order book stream for {symbol}")
|
||||||
|
|
||||||
|
# Reset reconnect delay on successful connection
|
||||||
|
reconnect_delay = 1
|
||||||
|
|
||||||
|
async for message in websocket:
|
||||||
|
if not self.is_streaming:
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = json.loads(message)
|
||||||
|
await self._process_binance_orderbook(symbol, data)
|
||||||
|
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.error(f"Error parsing Binance message: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing Binance data: {e}", exc_info=True)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Binance WebSocket error for {symbol}: {e}", exc_info=True)
|
||||||
|
|
||||||
|
# Mark as disconnected
|
||||||
|
if symbol in self.exchange_order_books and 'binance' in self.exchange_order_books[symbol]:
|
||||||
|
self.exchange_order_books[symbol]['binance']['connected'] = False
|
||||||
|
|
||||||
|
# Implement exponential backoff for reconnection
|
||||||
|
logger.info(f"Reconnecting to Binance WebSocket for {symbol} in {reconnect_delay}s")
|
||||||
|
await asyncio.sleep(reconnect_delay)
|
||||||
|
reconnect_delay = min(reconnect_delay * 2, max_reconnect_delay)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Data Models
|
||||||
|
|
||||||
|
The data models remain unchanged, but we need to ensure they are properly initialized and validated throughout the system.
|
||||||
|
|
||||||
|
### COBSnapshot
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class COBSnapshot:
|
||||||
|
"""Complete Consolidated Order Book snapshot"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
consolidated_bids: List[ConsolidatedOrderBookLevel]
|
||||||
|
consolidated_asks: List[ConsolidatedOrderBookLevel]
|
||||||
|
exchanges_active: List[str]
|
||||||
|
volume_weighted_mid: float
|
||||||
|
total_bid_liquidity: float
|
||||||
|
total_ask_liquidity: float
|
||||||
|
spread_bps: float
|
||||||
|
liquidity_imbalance: float
|
||||||
|
price_buckets: Dict[str, Dict[str, float]] # Fine-grain volume buckets
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### WebSocket Connection Errors
|
||||||
|
|
||||||
|
- Implement exponential backoff for reconnection attempts
|
||||||
|
- Log detailed error information
|
||||||
|
- Maintain system operation with last valid data
|
||||||
|
|
||||||
|
### Data Processing Errors
|
||||||
|
|
||||||
|
- Validate data before processing
|
||||||
|
- Handle edge cases gracefully
|
||||||
|
- Log detailed error information
|
||||||
|
- Continue operation with last valid data
|
||||||
|
|
||||||
|
### Subscriber Notification Errors
|
||||||
|
|
||||||
|
- Catch and log errors in subscriber callbacks
|
||||||
|
- Prevent errors in one subscriber from affecting others
|
||||||
|
- Ensure data is properly formatted before notification
|
||||||
|
|
||||||
|
## Testing Strategy
|
||||||
|
|
||||||
|
### Unit Testing
|
||||||
|
|
||||||
|
- Test data structure initialization
|
||||||
|
- Test error handling in WebSocket processing
|
||||||
|
- Test subscriber notification with various edge cases
|
||||||
|
|
||||||
|
### Integration Testing
|
||||||
|
|
||||||
|
- Test end-to-end COB data flow
|
||||||
|
- Test recovery from WebSocket disconnections
|
||||||
|
- Test handling of malformed data
|
||||||
|
|
||||||
|
### System Testing
|
||||||
|
|
||||||
|
- Test dashboard operation with COB data
|
||||||
|
- Test system stability under high load
|
||||||
|
- Test recovery from various failure scenarios
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
1. Fix data structure initialization in `MultiExchangeCOBProvider`
|
||||||
|
2. Enhance error handling in WebSocket processing
|
||||||
|
3. Improve subscriber notification logic
|
||||||
|
4. Update `StandardizedDataProvider` to properly handle COB data
|
||||||
|
5. Add comprehensive logging for debugging
|
||||||
|
6. Implement recovery mechanisms for WebSocket failures
|
||||||
|
7. Test all changes thoroughly
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
This design addresses the WebSocket COB data processing issue by ensuring proper initialization of data structures, implementing robust error handling, and adding recovery mechanisms for WebSocket failures. These changes will improve the reliability and stability of the trading system, allowing traders to monitor market data in real-time without interruptions.
|
||||||
43
.kiro/specs/websocket-cob-data-fix/requirements.md
Normal file
43
.kiro/specs/websocket-cob-data-fix/requirements.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# Requirements Document
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The WebSocket COB Data Fix is needed to address a critical issue in the trading system where WebSocket COB (Change of Basis) data processing is failing with the error `'NoneType' object has no attribute 'append'`. This error is occurring for both BTC/USDT and ETH/USDT pairs and is preventing the dashboard from functioning properly. The fix will ensure proper initialization and handling of data structures in the COB data processing pipeline.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
### Requirement 1: Fix WebSocket COB Data Processing
|
||||||
|
|
||||||
|
**User Story:** As a trader, I want the WebSocket COB data processing to work reliably without errors, so that I can monitor market data in real-time and make informed trading decisions.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN WebSocket COB data is received for any trading pair THEN the system SHALL process it without throwing 'NoneType' object has no attribute 'append' errors
|
||||||
|
2. WHEN the dashboard is started THEN all data structures for COB processing SHALL be properly initialized
|
||||||
|
3. WHEN COB data is processed THEN the system SHALL handle edge cases such as missing or incomplete data gracefully
|
||||||
|
4. WHEN a WebSocket connection is established THEN the system SHALL verify that all required data structures are initialized before processing data
|
||||||
|
5. WHEN COB data is being processed THEN the system SHALL log appropriate debug information to help diagnose any issues
|
||||||
|
|
||||||
|
### Requirement 2: Ensure Data Structure Consistency
|
||||||
|
|
||||||
|
**User Story:** As a system administrator, I want consistent data structures throughout the COB processing pipeline, so that data can flow smoothly between components without errors.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN the multi_exchange_cob_provider initializes THEN it SHALL properly initialize all required data structures
|
||||||
|
2. WHEN the standardized_data_provider receives COB data THEN it SHALL validate the data structure before processing
|
||||||
|
3. WHEN COB data is passed between components THEN the system SHALL ensure type consistency
|
||||||
|
4. WHEN new COB data arrives THEN the system SHALL update the data structures atomically to prevent race conditions
|
||||||
|
5. WHEN a component subscribes to COB updates THEN the system SHALL verify the subscriber can handle the data format
|
||||||
|
|
||||||
|
### Requirement 3: Improve Error Handling and Recovery
|
||||||
|
|
||||||
|
**User Story:** As a system operator, I want robust error handling and recovery mechanisms in the COB data processing pipeline, so that temporary failures don't cause the entire system to crash.
|
||||||
|
|
||||||
|
#### Acceptance Criteria
|
||||||
|
|
||||||
|
1. WHEN an error occurs in COB data processing THEN the system SHALL log detailed error information
|
||||||
|
2. WHEN a WebSocket connection fails THEN the system SHALL attempt to reconnect automatically
|
||||||
|
3. WHEN data processing fails THEN the system SHALL continue operation with the last valid data
|
||||||
|
4. WHEN the system recovers from an error THEN it SHALL restore normal operation without manual intervention
|
||||||
|
5. WHEN multiple consecutive errors occur THEN the system SHALL implement exponential backoff to prevent overwhelming the system
|
||||||
115
.kiro/specs/websocket-cob-data-fix/tasks.md
Normal file
115
.kiro/specs/websocket-cob-data-fix/tasks.md
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# Implementation Plan
|
||||||
|
|
||||||
|
- [ ] 1. Fix data structure initialization in MultiExchangeCOBProvider
|
||||||
|
- Ensure all collections are properly initialized during object creation
|
||||||
|
- Add defensive checks before accessing data structures
|
||||||
|
- Implement proper initialization for symbol-specific data structures
|
||||||
|
- _Requirements: 1.1, 1.2, 2.1_
|
||||||
|
|
||||||
|
- [ ] 1.1. Update MultiExchangeCOBProvider constructor
|
||||||
|
- Modify __init__ method to properly initialize all data structures
|
||||||
|
- Ensure exchange_order_books is initialized for each symbol and exchange
|
||||||
|
- Initialize session_trades and svp_cache for each symbol
|
||||||
|
- Add defensive checks to prevent NoneType errors
|
||||||
|
- _Requirements: 1.2, 2.1_
|
||||||
|
|
||||||
|
- [ ] 1.2. Fix _notify_cob_subscribers method
|
||||||
|
- Add validation to ensure cob_snapshot is not None before processing
|
||||||
|
- Add defensive checks before accessing cob_snapshot attributes
|
||||||
|
- Improve error handling for subscriber callbacks
|
||||||
|
- Add detailed logging for debugging
|
||||||
|
- _Requirements: 1.1, 1.5, 2.3_
|
||||||
|
|
||||||
|
- [ ] 2. Enhance WebSocket data processing in MultiExchangeCOBProvider
|
||||||
|
- Improve error handling in WebSocket connection methods
|
||||||
|
- Add validation for incoming data
|
||||||
|
- Implement reconnection logic with exponential backoff
|
||||||
|
- _Requirements: 1.3, 1.4, 3.1, 3.2_
|
||||||
|
|
||||||
|
- [ ] 2.1. Update _stream_binance_orderbook method
|
||||||
|
- Add data structure initialization checks
|
||||||
|
- Implement exponential backoff for reconnection attempts
|
||||||
|
- Add detailed error logging
|
||||||
|
- Ensure proper cleanup on disconnection
|
||||||
|
- _Requirements: 1.4, 3.2, 3.4_
|
||||||
|
|
||||||
|
- [ ] 2.2. Fix _process_binance_orderbook method
|
||||||
|
- Add validation for incoming data
|
||||||
|
- Ensure data structures exist before updating
|
||||||
|
- Add defensive checks to prevent NoneType errors
|
||||||
|
- Improve error handling and logging
|
||||||
|
- _Requirements: 1.1, 1.3, 3.1_
|
||||||
|
|
||||||
|
- [ ] 3. Update StandardizedDataProvider to handle COB data properly
|
||||||
|
- Improve initialization of COB-related data structures
|
||||||
|
- Add validation for COB data
|
||||||
|
- Enhance error handling for COB data processing
|
||||||
|
- _Requirements: 1.3, 2.2, 2.3_
|
||||||
|
|
||||||
|
- [ ] 3.1. Fix _get_cob_data method
|
||||||
|
- Add validation for COB provider availability
|
||||||
|
- Ensure proper initialization of COB data structures
|
||||||
|
- Add defensive checks to prevent NoneType errors
|
||||||
|
- Improve error handling and logging
|
||||||
|
- _Requirements: 1.3, 2.2, 3.3_
|
||||||
|
|
||||||
|
- [ ] 3.2. Update _calculate_cob_moving_averages method
|
||||||
|
- Add validation for input data
|
||||||
|
- Ensure proper initialization of moving average data structures
|
||||||
|
- Add defensive checks to prevent NoneType errors
|
||||||
|
- Improve error handling for edge cases
|
||||||
|
- _Requirements: 1.3, 2.2, 3.3_
|
||||||
|
|
||||||
|
- [ ] 4. Implement recovery mechanisms for WebSocket failures
|
||||||
|
- Add state tracking for WebSocket connections
|
||||||
|
- Implement automatic reconnection with exponential backoff
|
||||||
|
- Add fallback mechanisms for temporary failures
|
||||||
|
- _Requirements: 3.2, 3.3, 3.4_
|
||||||
|
|
||||||
|
- [ ] 4.1. Add connection state management
|
||||||
|
- Track connection state for each WebSocket
|
||||||
|
- Implement health check mechanism
|
||||||
|
- Add reconnection logic based on connection state
|
||||||
|
- _Requirements: 3.2, 3.4_
|
||||||
|
|
||||||
|
- [ ] 4.2. Implement data recovery mechanisms
|
||||||
|
- Add caching for last valid data
|
||||||
|
- Implement fallback to cached data during connection issues
|
||||||
|
- Add mechanism to rebuild state after reconnection
|
||||||
|
- _Requirements: 3.3, 3.4_
|
||||||
|
|
||||||
|
- [ ] 5. Add comprehensive logging for debugging
|
||||||
|
- Add detailed logging throughout the COB processing pipeline
|
||||||
|
- Include context information in log messages
|
||||||
|
- Add performance metrics logging
|
||||||
|
- _Requirements: 1.5, 3.1_
|
||||||
|
|
||||||
|
- [ ] 5.1. Enhance logging in MultiExchangeCOBProvider
|
||||||
|
- Add detailed logging for WebSocket connections
|
||||||
|
- Log data processing steps and outcomes
|
||||||
|
- Add performance metrics for data processing
|
||||||
|
- _Requirements: 1.5, 3.1_
|
||||||
|
|
||||||
|
- [ ] 5.2. Add logging in StandardizedDataProvider
|
||||||
|
- Log COB data processing steps
|
||||||
|
- Add validation logging
|
||||||
|
- Include performance metrics for data processing
|
||||||
|
- _Requirements: 1.5, 3.1_
|
||||||
|
|
||||||
|
- [ ] 6. Test all changes thoroughly
|
||||||
|
- Write unit tests for fixed components
|
||||||
|
- Test integration between components
|
||||||
|
- Verify dashboard operation with COB data
|
||||||
|
- _Requirements: 1.1, 2.3, 3.4_
|
||||||
|
|
||||||
|
- [ ] 6.1. Write unit tests for MultiExchangeCOBProvider
|
||||||
|
- Test data structure initialization
|
||||||
|
- Test WebSocket processing with mock data
|
||||||
|
- Test error handling and recovery
|
||||||
|
- _Requirements: 1.1, 1.3, 3.1_
|
||||||
|
|
||||||
|
- [ ] 6.2. Test integration with dashboard
|
||||||
|
- Verify COB data display in dashboard
|
||||||
|
- Test system stability under load
|
||||||
|
- Verify recovery from failures
|
||||||
|
- _Requirements: 1.1, 3.3, 3.4_
|
||||||
4
.kiro/steering/focus.md
Normal file
4
.kiro/steering/focus.md
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
inclusion: manual
|
||||||
|
---
|
||||||
|
focus only on web\dashboard.py and it's dependencies besides the usual support files (.env, launch.json, etc..) we're developing this dash as our project main entry and interaction
|
||||||
3
.kiro/steering/specs.md
Normal file
3
.kiro/steering/specs.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
inclusion: manual
|
||||||
|
---
|
||||||
90
.vscode/launch.json
vendored
90
.vscode/launch.json
vendored
@@ -1,6 +1,7 @@
|
|||||||
{
|
{
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"configurations": [
|
"configurations": [
|
||||||
|
<<<<<<< HEAD
|
||||||
{
|
{
|
||||||
"name": "📊 Dashboard (Real-time + Training)",
|
"name": "📊 Dashboard (Real-time + Training)",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
@@ -20,10 +21,14 @@
|
|||||||
"preLaunchTask": "Kill Stale Processes"
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
|
|
||||||
|
=======
|
||||||
|
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
{
|
{
|
||||||
"name": "🔬 Backtest Training (30 days)",
|
"name": "🔬 Backtest Training (30 days)",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
|
<<<<<<< HEAD
|
||||||
"program": "main_backtest.py",
|
"program": "main_backtest.py",
|
||||||
"args": [
|
"args": [
|
||||||
"--start",
|
"--start",
|
||||||
@@ -52,10 +57,14 @@
|
|||||||
"--symbol",
|
"--symbol",
|
||||||
"ETH/USDT"
|
"ETH/USDT"
|
||||||
],
|
],
|
||||||
|
=======
|
||||||
|
"program": "run_clean_dashboard.py",
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
"console": "integratedTerminal",
|
"console": "integratedTerminal",
|
||||||
"justMyCode": false,
|
"justMyCode": false,
|
||||||
"env": {
|
"env": {
|
||||||
"PYTHONUNBUFFERED": "1",
|
"PYTHONUNBUFFERED": "1",
|
||||||
|
<<<<<<< HEAD
|
||||||
"CUDA_VISIBLE_DEVICES": "0"
|
"CUDA_VISIBLE_DEVICES": "0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -80,6 +89,12 @@
|
|||||||
"PYTHONUNBUFFERED": "1"
|
"PYTHONUNBUFFERED": "1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
=======
|
||||||
|
"ENABLE_REALTIME_CHARTS": "1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
{
|
{
|
||||||
"name": "🏗️ Python Debugger: Current File",
|
"name": "🏗️ Python Debugger: Current File",
|
||||||
"type": "debugpy",
|
"type": "debugpy",
|
||||||
@@ -105,6 +120,7 @@
|
|||||||
},
|
},
|
||||||
"preLaunchTask": "Kill Stale Processes"
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
|
<<<<<<< HEAD
|
||||||
{
|
{
|
||||||
"name": "🔥 Real-time RL COB Trader",
|
"name": "🔥 Real-time RL COB Trader",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
@@ -137,6 +153,9 @@
|
|||||||
},
|
},
|
||||||
"preLaunchTask": "Kill Stale Processes"
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
|
=======
|
||||||
|
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
{
|
{
|
||||||
"name": "🧪 Run Tests",
|
"name": "🧪 Run Tests",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
@@ -158,7 +177,38 @@
|
|||||||
"env": {
|
"env": {
|
||||||
"PYTHONUNBUFFERED": "1"
|
"PYTHONUNBUFFERED": "1"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "🔧 COBY Development Mode (Auto-reload) - main",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "COBY/main.py",
|
||||||
|
"args": [
|
||||||
|
"--debug",
|
||||||
|
"--reload"
|
||||||
|
],
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"justMyCode": false,
|
||||||
|
"env": {
|
||||||
|
"PYTHONUNBUFFERED": "1",
|
||||||
|
"COBY_API_HOST": "localhost",
|
||||||
|
"COBY_API_PORT": "8080",
|
||||||
|
"COBY_WEBSOCKET_PORT": "8081",
|
||||||
|
"COBY_LOG_LEVEL": "DEBUG"
|
||||||
|
},
|
||||||
|
"preLaunchTask": "Kill Stale Processes",
|
||||||
|
"presentation": {
|
||||||
|
"hidden": false,
|
||||||
|
"group": "COBY System",
|
||||||
|
"order": 3
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
<<<<<<< HEAD
|
||||||
|
=======
|
||||||
|
|
||||||
|
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
],
|
],
|
||||||
"compounds": [
|
"compounds": [
|
||||||
{
|
{
|
||||||
@@ -175,17 +225,57 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
<<<<<<< HEAD
|
||||||
"name": "🔥 COB Trading System",
|
"name": "🔥 COB Trading System",
|
||||||
"configurations": [
|
"configurations": [
|
||||||
"📈 COB Data Provider Dashboard",
|
"📈 COB Data Provider Dashboard",
|
||||||
"🔥 Real-time RL COB Trader"
|
"🔥 Real-time RL COB Trader"
|
||||||
|
=======
|
||||||
|
"name": "💹 Live Trading System (Dashboard + Monitor)",
|
||||||
|
"configurations": [
|
||||||
|
"💹 Live Scalping Dashboard (500x Leverage)",
|
||||||
|
"🌙 Overnight Training Monitor (504M Model)"
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
],
|
],
|
||||||
"stopAll": true,
|
"stopAll": true,
|
||||||
"presentation": {
|
"presentation": {
|
||||||
"hidden": false,
|
"hidden": false,
|
||||||
|
<<<<<<< HEAD
|
||||||
"group": "COB",
|
"group": "COB",
|
||||||
"order": 2
|
"order": 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
=======
|
||||||
|
"group": "Trading",
|
||||||
|
"order": 2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "🌐 COBY Multi-Exchange System (Full Stack)",
|
||||||
|
"configurations": [
|
||||||
|
"🌐 COBY Multi-Exchange Data Aggregation"
|
||||||
|
],
|
||||||
|
"stopAll": true,
|
||||||
|
"presentation": {
|
||||||
|
"hidden": false,
|
||||||
|
"group": "COBY System",
|
||||||
|
"order": 6
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "🔧 COBY Development Environment",
|
||||||
|
"configurations": [
|
||||||
|
"🔧 COBY Development Mode (Auto-reload)"
|
||||||
|
],
|
||||||
|
"stopAll": true,
|
||||||
|
"presentation": {
|
||||||
|
"hidden": false,
|
||||||
|
"group": "COBY System",
|
||||||
|
"order": 7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
2
.vscode/settings.json
vendored
Normal file
2
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
{
|
||||||
|
}
|
||||||
7
.vscode/tasks.json
vendored
7
.vscode/tasks.json
vendored
@@ -6,7 +6,14 @@
|
|||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "python",
|
"command": "python",
|
||||||
"args": [
|
"args": [
|
||||||
|
<<<<<<< HEAD
|
||||||
"kill_dashboard.py"
|
"kill_dashboard.py"
|
||||||
|
=======
|
||||||
|
"-ExecutionPolicy",
|
||||||
|
"Bypass",
|
||||||
|
"-File",
|
||||||
|
"scripts/kill_stale_processes.ps1"
|
||||||
|
>>>>>>> d49a473ed6f4aef55bfdd47d6370e53582be6b7b
|
||||||
],
|
],
|
||||||
"group": "build",
|
"group": "build",
|
||||||
"presentation": {
|
"presentation": {
|
||||||
|
|||||||
56
CLEANUP_TODO.md
Normal file
56
CLEANUP_TODO.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
Cleanup run summary:
|
||||||
|
- Deleted files: 183
|
||||||
|
- NN\__init__.py
|
||||||
|
- NN\models\__init__.py
|
||||||
|
- NN\models\cnn_model.py
|
||||||
|
- NN\models\transformer_model.py
|
||||||
|
- NN\start_tensorboard.py
|
||||||
|
- NN\training\enhanced_rl_training_integration.py
|
||||||
|
- NN\training\example_checkpoint_usage.py
|
||||||
|
- NN\training\integrate_checkpoint_management.py
|
||||||
|
- NN\utils\__init__.py
|
||||||
|
- NN\utils\data_interface.py
|
||||||
|
- NN\utils\multi_data_interface.py
|
||||||
|
- NN\utils\realtime_analyzer.py
|
||||||
|
- NN\utils\signal_interpreter.py
|
||||||
|
- NN\utils\trading_env.py
|
||||||
|
- _dev\cleanup_models_now.py
|
||||||
|
- _tools\build_keep_set.py
|
||||||
|
- apply_trading_fixes.py
|
||||||
|
- apply_trading_fixes_to_main.py
|
||||||
|
- audit_training_system.py
|
||||||
|
- balance_trading_signals.py
|
||||||
|
- check_live_trading.py
|
||||||
|
- check_mexc_symbols.py
|
||||||
|
- cleanup_checkpoint_db.py
|
||||||
|
- cleanup_checkpoints.py
|
||||||
|
- core\__init__.py
|
||||||
|
- core\api_rate_limiter.py
|
||||||
|
- core\async_handler.py
|
||||||
|
- core\bookmap_data_provider.py
|
||||||
|
- core\bookmap_integration.py
|
||||||
|
- core\cnn_monitor.py
|
||||||
|
- core\cnn_training_pipeline.py
|
||||||
|
- core\config_sync.py
|
||||||
|
- core\enhanced_cnn_adapter.py
|
||||||
|
- core\enhanced_cob_websocket.py
|
||||||
|
- core\enhanced_orchestrator.py
|
||||||
|
- core\enhanced_training_integration.py
|
||||||
|
- core\exchanges\__init__.py
|
||||||
|
- core\exchanges\binance_interface.py
|
||||||
|
- core\exchanges\bybit\debug\test_bybit_balance.py
|
||||||
|
- core\exchanges\bybit_interface.py
|
||||||
|
- core\exchanges\bybit_rest_client.py
|
||||||
|
- core\exchanges\deribit_interface.py
|
||||||
|
- core\exchanges\mexc\debug\final_mexc_order_test.py
|
||||||
|
- core\exchanges\mexc\debug\fix_mexc_orders.py
|
||||||
|
- core\exchanges\mexc\debug\fix_mexc_orders_v2.py
|
||||||
|
- core\exchanges\mexc\debug\fix_mexc_orders_v3.py
|
||||||
|
- core\exchanges\mexc\debug\test_mexc_interface_debug.py
|
||||||
|
- core\exchanges\mexc\debug\test_mexc_order_signature.py
|
||||||
|
- core\exchanges\mexc\debug\test_mexc_order_signature_v2.py
|
||||||
|
- core\exchanges\mexc\debug\test_mexc_signature_debug.py
|
||||||
|
... and 133 more
|
||||||
|
- Removed test directories: 1
|
||||||
|
- tests
|
||||||
|
- Kept (excluded): 1
|
||||||
83
COBY/Dockerfile
Normal file
83
COBY/Dockerfile
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# Multi-stage Docker build for COBY Multi-Exchange Data Aggregation System
|
||||||
|
FROM python:3.11-slim as base
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||||
|
PYTHONUNBUFFERED=1 \
|
||||||
|
PYTHONPATH=/app \
|
||||||
|
PIP_NO_CACHE_DIR=1 \
|
||||||
|
PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||||
|
|
||||||
|
# Install system dependencies
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
gcc \
|
||||||
|
g++ \
|
||||||
|
libpq-dev \
|
||||||
|
curl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Create app user
|
||||||
|
RUN groupadd -r coby && useradd -r -g coby coby
|
||||||
|
|
||||||
|
# Set work directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy requirements first for better caching
|
||||||
|
COPY requirements.txt .
|
||||||
|
|
||||||
|
# Install Python dependencies
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
# Copy application code
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Create necessary directories
|
||||||
|
RUN mkdir -p logs data && \
|
||||||
|
chown -R coby:coby /app
|
||||||
|
|
||||||
|
# Switch to non-root user
|
||||||
|
USER coby
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD python -c "import requests; requests.get('http://localhost:8080/health', timeout=5)" || exit 1
|
||||||
|
|
||||||
|
# Default command
|
||||||
|
CMD ["python", "-m", "COBY.main"]
|
||||||
|
|
||||||
|
# Development stage
|
||||||
|
FROM base as development
|
||||||
|
|
||||||
|
USER root
|
||||||
|
|
||||||
|
# Install development dependencies
|
||||||
|
RUN pip install --no-cache-dir pytest pytest-asyncio pytest-cov black flake8 mypy
|
||||||
|
|
||||||
|
# Install debugging tools
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
vim \
|
||||||
|
htop \
|
||||||
|
net-tools \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
USER coby
|
||||||
|
|
||||||
|
# Override command for development
|
||||||
|
CMD ["python", "-m", "COBY.main", "--debug"]
|
||||||
|
|
||||||
|
# Production stage
|
||||||
|
FROM base as production
|
||||||
|
|
||||||
|
# Copy only necessary files for production
|
||||||
|
COPY --from=base /app /app
|
||||||
|
|
||||||
|
# Set production environment
|
||||||
|
ENV ENVIRONMENT=production \
|
||||||
|
DEBUG=false \
|
||||||
|
LOG_LEVEL=INFO
|
||||||
|
|
||||||
|
# Expose ports
|
||||||
|
EXPOSE 8080 8081
|
||||||
|
|
||||||
|
# Use production command
|
||||||
|
CMD ["python", "-m", "COBY.main"]
|
||||||
264
COBY/PORTAINER_DEPLOYMENT.md
Normal file
264
COBY/PORTAINER_DEPLOYMENT.md
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
# COBY Portainer Deployment Guide
|
||||||
|
|
||||||
|
This guide explains how to deploy the COBY Multi-Exchange Data Aggregation System using Portainer with Git repository integration.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Portainer CE/EE installed and running
|
||||||
|
- Docker Swarm or Docker Compose environment
|
||||||
|
- Access to the Git repository containing the COBY project
|
||||||
|
- Minimum system requirements:
|
||||||
|
- 4GB RAM
|
||||||
|
- 2 CPU cores
|
||||||
|
- 20GB disk space
|
||||||
|
|
||||||
|
## Deployment Steps
|
||||||
|
|
||||||
|
### 1. Access Portainer
|
||||||
|
|
||||||
|
1. Open your Portainer web interface
|
||||||
|
2. Navigate to your environment (local Docker or Docker Swarm)
|
||||||
|
|
||||||
|
### 2. Create Stack from Git Repository
|
||||||
|
|
||||||
|
1. Go to **Stacks** in the left sidebar
|
||||||
|
2. Click **Add stack**
|
||||||
|
3. Choose **Repository** as the build method
|
||||||
|
4. Configure the repository settings:
|
||||||
|
|
||||||
|
**Repository Configuration:**
|
||||||
|
- **Repository URL**: `https://github.com/your-username/your-repo.git`
|
||||||
|
- **Repository reference**: `main` (or your preferred branch)
|
||||||
|
- **Compose path**: `COBY/docker-compose.portainer.yml`
|
||||||
|
- **Additional files**: Leave empty (all configs are embedded)
|
||||||
|
|
||||||
|
### 3. Configure Environment Variables
|
||||||
|
|
||||||
|
In the **Environment variables** section, add the following variables (optional customizations):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Database Configuration
|
||||||
|
DB_PASSWORD=your_secure_database_password
|
||||||
|
REDIS_PASSWORD=your_secure_redis_password
|
||||||
|
|
||||||
|
# API Configuration
|
||||||
|
API_PORT=8080
|
||||||
|
WS_PORT=8081
|
||||||
|
|
||||||
|
# Monitoring (if using monitoring profile)
|
||||||
|
PROMETHEUS_PORT=9090
|
||||||
|
GRAFANA_PORT=3001
|
||||||
|
GRAFANA_PASSWORD=your_grafana_password
|
||||||
|
|
||||||
|
# Performance Tuning
|
||||||
|
MAX_CONNECTIONS_PER_EXCHANGE=5
|
||||||
|
DATA_BUFFER_SIZE=10000
|
||||||
|
BATCH_WRITE_SIZE=1000
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Deploy the Stack
|
||||||
|
|
||||||
|
1. **Stack name**: Enter `coby-system` (or your preferred name)
|
||||||
|
2. **Environment variables**: Configure as needed (see above)
|
||||||
|
3. **Access control**: Set appropriate permissions
|
||||||
|
4. Click **Deploy the stack**
|
||||||
|
|
||||||
|
### 5. Monitor Deployment
|
||||||
|
|
||||||
|
1. Watch the deployment logs in Portainer
|
||||||
|
2. Check that all services start successfully:
|
||||||
|
- `coby-timescaledb` (Database)
|
||||||
|
- `coby-redis` (Cache)
|
||||||
|
- `coby-app` (Main application)
|
||||||
|
- `coby-dashboard` (Web interface)
|
||||||
|
|
||||||
|
### 6. Verify Installation
|
||||||
|
|
||||||
|
Once deployed, verify the installation:
|
||||||
|
|
||||||
|
1. **Health Checks**: All services should show as "healthy" in Portainer
|
||||||
|
2. **Web Dashboard**: Access `http://your-server:8080/` (served by your reverse proxy)
|
||||||
|
3. **API Endpoint**: Check `http://your-server:8080/health`
|
||||||
|
4. **Logs**: Review logs for any errors
|
||||||
|
|
||||||
|
**Reverse Proxy Configuration**: Configure your reverse proxy to forward requests to the COBY app on port 8080. The application serves both the API and web dashboard from the same port.
|
||||||
|
|
||||||
|
## Service Ports
|
||||||
|
|
||||||
|
The following ports will be exposed:
|
||||||
|
|
||||||
|
- **8080**: REST API + Web Dashboard (served by FastAPI)
|
||||||
|
- **8081**: WebSocket API
|
||||||
|
- **5432**: TimescaleDB (optional external access)
|
||||||
|
- **6379**: Redis (optional external access)
|
||||||
|
|
||||||
|
**Note**: The web dashboard is now served directly by the FastAPI application at port 8080, eliminating the need for a separate nginx container since you have a reverse proxy.
|
||||||
|
|
||||||
|
## Optional Monitoring Stack
|
||||||
|
|
||||||
|
To enable Prometheus and Grafana monitoring:
|
||||||
|
|
||||||
|
1. In the stack configuration, add the profile: `monitoring`
|
||||||
|
2. Additional ports will be exposed:
|
||||||
|
- **9090**: Prometheus
|
||||||
|
- **3001**: Grafana
|
||||||
|
- **9100**: Node Exporter
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
### Resource Limits
|
||||||
|
|
||||||
|
The stack includes resource limits for each service:
|
||||||
|
|
||||||
|
- **COBY App**: 2GB RAM, 2 CPU cores (includes web dashboard)
|
||||||
|
- **TimescaleDB**: 1GB RAM, 1 CPU core
|
||||||
|
- **Redis**: 512MB RAM, 0.5 CPU cores
|
||||||
|
|
||||||
|
### Persistent Data
|
||||||
|
|
||||||
|
The following volumes are created for persistent data:
|
||||||
|
|
||||||
|
- `timescale_data`: Database storage
|
||||||
|
- `redis_data`: Redis persistence
|
||||||
|
- `coby_logs`: Application logs
|
||||||
|
- `coby_data`: Application data
|
||||||
|
- `prometheus_data`: Metrics storage (if monitoring enabled)
|
||||||
|
- `grafana_data`: Grafana dashboards (if monitoring enabled)
|
||||||
|
|
||||||
|
### Network Configuration
|
||||||
|
|
||||||
|
- **Network**: `coby-network` (172.20.0.0/16)
|
||||||
|
- **Internal communication**: All services communicate via Docker network
|
||||||
|
- **External access**: Only specified ports are exposed
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Services not starting**:
|
||||||
|
- Check resource availability
|
||||||
|
- Review service logs in Portainer
|
||||||
|
- Verify environment variables
|
||||||
|
|
||||||
|
2. **Database connection issues**:
|
||||||
|
- Ensure TimescaleDB is healthy
|
||||||
|
- Check database credentials
|
||||||
|
- Verify network connectivity
|
||||||
|
|
||||||
|
3. **Web dashboard not accessible**:
|
||||||
|
- Confirm port 8080 is accessible through your reverse proxy
|
||||||
|
- Check that coby-app is running and healthy
|
||||||
|
- Verify static files are being served at the root path
|
||||||
|
|
||||||
|
### Log Access
|
||||||
|
|
||||||
|
Access logs through Portainer:
|
||||||
|
|
||||||
|
1. Go to **Containers**
|
||||||
|
2. Click on the container name
|
||||||
|
3. Select **Logs** tab
|
||||||
|
4. Use filters to find specific issues
|
||||||
|
|
||||||
|
### Health Checks
|
||||||
|
|
||||||
|
Monitor service health:
|
||||||
|
|
||||||
|
1. **Portainer Dashboard**: Shows health status
|
||||||
|
2. **API Health**: `GET /health` endpoint
|
||||||
|
3. **Database**: `pg_isready` command
|
||||||
|
4. **Redis**: `redis-cli ping` command
|
||||||
|
|
||||||
|
## Scaling and Updates
|
||||||
|
|
||||||
|
### Horizontal Scaling
|
||||||
|
|
||||||
|
To scale the main application:
|
||||||
|
|
||||||
|
1. Go to the stack in Portainer
|
||||||
|
2. Edit the stack
|
||||||
|
3. Modify the `coby-app` service replicas
|
||||||
|
4. Redeploy the stack
|
||||||
|
|
||||||
|
### Updates
|
||||||
|
|
||||||
|
To update the system:
|
||||||
|
|
||||||
|
1. **Git-based updates**: Portainer will pull latest changes
|
||||||
|
2. **Manual updates**: Edit stack configuration
|
||||||
|
3. **Rolling updates**: Use Docker Swarm mode for zero-downtime updates
|
||||||
|
|
||||||
|
### Backup
|
||||||
|
|
||||||
|
Regular backups should include:
|
||||||
|
|
||||||
|
- **Database**: TimescaleDB data volume
|
||||||
|
- **Configuration**: Stack configuration in Portainer
|
||||||
|
- **Logs**: Application logs for troubleshooting
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
1. **Change default passwords** for database and Redis
|
||||||
|
2. **Use environment variables** for sensitive data
|
||||||
|
3. **Limit network exposure** to required ports only
|
||||||
|
4. **Regular updates** of base images
|
||||||
|
5. **Monitor logs** for security events
|
||||||
|
|
||||||
|
## Performance Tuning
|
||||||
|
|
||||||
|
### Database Optimization
|
||||||
|
|
||||||
|
- Adjust `shared_buffers` in TimescaleDB
|
||||||
|
- Configure connection pooling
|
||||||
|
- Monitor query performance
|
||||||
|
|
||||||
|
### Application Tuning
|
||||||
|
|
||||||
|
- Adjust `DATA_BUFFER_SIZE` for throughput
|
||||||
|
- Configure `BATCH_WRITE_SIZE` for database writes
|
||||||
|
- Monitor memory usage and adjust limits
|
||||||
|
|
||||||
|
### Network Optimization
|
||||||
|
|
||||||
|
- Use Docker overlay networks for multi-host deployments
|
||||||
|
- Configure load balancing for high availability
|
||||||
|
- Monitor network latency between services
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
For issues and support:
|
||||||
|
|
||||||
|
1. Check the application logs
|
||||||
|
2. Review Portainer container status
|
||||||
|
3. Consult the main project documentation
|
||||||
|
4. Submit issues to the project repository
|
||||||
|
|
||||||
|
## Example Stack Configuration
|
||||||
|
|
||||||
|
Here's a complete example of environment variables for production:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Production Configuration
|
||||||
|
ENVIRONMENT=production
|
||||||
|
DEBUG=false
|
||||||
|
LOG_LEVEL=INFO
|
||||||
|
|
||||||
|
# Security
|
||||||
|
DB_PASSWORD=prod_secure_db_pass_2024
|
||||||
|
REDIS_PASSWORD=prod_secure_redis_pass_2024
|
||||||
|
|
||||||
|
# Performance
|
||||||
|
MAX_CONNECTIONS_PER_EXCHANGE=10
|
||||||
|
DATA_BUFFER_SIZE=20000
|
||||||
|
BATCH_WRITE_SIZE=2000
|
||||||
|
|
||||||
|
# Monitoring
|
||||||
|
PROMETHEUS_PORT=9090
|
||||||
|
GRAFANA_PORT=3001
|
||||||
|
GRAFANA_PASSWORD=secure_grafana_pass
|
||||||
|
|
||||||
|
# Exchange Configuration
|
||||||
|
EXCHANGES=binance,coinbase,kraken,bybit,okx,huobi,kucoin,gateio,bitfinex,mexc
|
||||||
|
SYMBOLS=BTCUSDT,ETHUSDT,ADAUSDT,DOTUSDT
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration provides a robust production deployment suitable for high-throughput cryptocurrency data aggregation.
|
||||||
280
COBY/README.md
Normal file
280
COBY/README.md
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
# COBY - Multi-Exchange Data Aggregation System
|
||||||
|
|
||||||
|
COBY (Cryptocurrency Order Book Yielder) is a comprehensive data collection and aggregation subsystem designed to serve as the foundational data layer for trading systems. It collects real-time order book and OHLCV data from multiple cryptocurrency exchanges, aggregates it into standardized formats, and provides both live data feeds and historical replay capabilities.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Kickstart
|
||||||
|
|
||||||
|
🌐 Web Dashboard Access:
|
||||||
|
URL: http://localhost:8080/ (same port as the API)
|
||||||
|
|
||||||
|
The FastAPI application serves both:
|
||||||
|
|
||||||
|
API endpoints at http://localhost:8080/api/...
|
||||||
|
Web dashboard at http://localhost:8080/ (root path)
|
||||||
|
📁 Dashboard Files:
|
||||||
|
The dashboard is served from static files located at:
|
||||||
|
|
||||||
|
HTML: COBY/web/static/index.html
|
||||||
|
Static assets: COBY/web/static/ directory
|
||||||
|
🔧 How it's configured:
|
||||||
|
In COBY/api/rest_api.py, the FastAPI app mounts static files:
|
||||||
|
|
||||||
|
# Mount static files for web dashboard (since we removed nginx)
|
||||||
|
static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "web", "static")
|
||||||
|
if os.path.exists(static_path):
|
||||||
|
app.mount("/static", StaticFiles(directory=static_path), name="static")
|
||||||
|
# Serve index.html at root for dashboard
|
||||||
|
app.mount("/", StaticFiles(directory=static_path, html=True), name="dashboard")
|
||||||
|
🚀 To access the dashboard:
|
||||||
|
Start the application: python COBY/main.py --debug
|
||||||
|
Open browser: Navigate to http://localhost:8080/
|
||||||
|
API health check: http://localhost:8080/health
|
||||||
|
📊 Dashboard Features:
|
||||||
|
The dashboard (COBY/web/static/index.html) includes:
|
||||||
|
|
||||||
|
System status monitoring
|
||||||
|
Exchange connection status
|
||||||
|
Performance metrics (CPU, memory, throughput, latency)
|
||||||
|
Real-time updates via WebSocket
|
||||||
|
Responsive design
|
||||||
|
🔌 WebSocket Connection:
|
||||||
|
The dashboard connects to WebSocket on port 8081 for real-time updates:
|
||||||
|
|
||||||
|
WebSocket URL: ws://localhost:8081/dashboard
|
||||||
|
So to summarize:
|
||||||
|
|
||||||
|
Web Dashboard: http://localhost:8080/
|
||||||
|
API: http://localhost:8080/api/...
|
||||||
|
WebSocket: ws://localhost:8081/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 🏗️ Architecture
|
||||||
|
|
||||||
|
The system follows a modular architecture with clear separation of concerns:
|
||||||
|
|
||||||
|
```
|
||||||
|
COBY/
|
||||||
|
├── config.py # Configuration management
|
||||||
|
├── models/ # Data models and structures
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ └── core.py # Core data models
|
||||||
|
├── interfaces/ # Abstract interfaces
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ ├── exchange_connector.py
|
||||||
|
│ ├── data_processor.py
|
||||||
|
│ ├── aggregation_engine.py
|
||||||
|
│ ├── storage_manager.py
|
||||||
|
│ └── replay_manager.py
|
||||||
|
├── utils/ # Utility functions
|
||||||
|
│ ├── __init__.py
|
||||||
|
│ ├── exceptions.py
|
||||||
|
│ ├── logging.py
|
||||||
|
│ ├── validation.py
|
||||||
|
│ └── timing.py
|
||||||
|
└── README.md
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🚀 Features
|
||||||
|
|
||||||
|
- **Multi-Exchange Support**: Connect to 10+ major cryptocurrency exchanges
|
||||||
|
- **Real-Time Data**: High-frequency order book and trade data collection
|
||||||
|
- **Price Bucket Aggregation**: Configurable price buckets ($10 for BTC, $1 for ETH)
|
||||||
|
- **Heatmap Visualization**: Real-time market depth heatmaps
|
||||||
|
- **Historical Replay**: Replay past market events for model training
|
||||||
|
- **TimescaleDB Storage**: Optimized time-series data storage
|
||||||
|
- **Redis Caching**: High-performance data caching layer
|
||||||
|
- **Orchestrator Integration**: Compatible with existing trading systems
|
||||||
|
|
||||||
|
## 📊 Data Models
|
||||||
|
|
||||||
|
### Core Models
|
||||||
|
|
||||||
|
- **OrderBookSnapshot**: Standardized order book data
|
||||||
|
- **TradeEvent**: Individual trade events
|
||||||
|
- **PriceBuckets**: Aggregated price bucket data
|
||||||
|
- **HeatmapData**: Visualization-ready heatmap data
|
||||||
|
- **ConnectionStatus**: Exchange connection monitoring
|
||||||
|
- **ReplaySession**: Historical data replay management
|
||||||
|
|
||||||
|
### Key Features
|
||||||
|
|
||||||
|
- Automatic data validation and normalization
|
||||||
|
- Configurable price bucket sizes per symbol
|
||||||
|
- Real-time metrics calculation
|
||||||
|
- Cross-exchange data consolidation
|
||||||
|
- Quality scoring and anomaly detection
|
||||||
|
|
||||||
|
## ⚙️ Configuration
|
||||||
|
|
||||||
|
The system uses environment variables for configuration:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Database settings
|
||||||
|
DB_HOST=192.168.0.10
|
||||||
|
DB_PORT=5432
|
||||||
|
DB_NAME=market_data
|
||||||
|
DB_USER=market_user
|
||||||
|
DB_PASSWORD=your_password
|
||||||
|
|
||||||
|
# Redis settings
|
||||||
|
REDIS_HOST=192.168.0.10
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_PASSWORD=your_password
|
||||||
|
|
||||||
|
# Aggregation settings
|
||||||
|
BTC_BUCKET_SIZE=10.0
|
||||||
|
ETH_BUCKET_SIZE=1.0
|
||||||
|
HEATMAP_DEPTH=50
|
||||||
|
UPDATE_FREQUENCY=0.5
|
||||||
|
|
||||||
|
# Performance settings
|
||||||
|
DATA_BUFFER_SIZE=10000
|
||||||
|
BATCH_WRITE_SIZE=1000
|
||||||
|
MAX_MEMORY_USAGE=2048
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔌 Interfaces
|
||||||
|
|
||||||
|
### ExchangeConnector
|
||||||
|
Abstract base class for exchange WebSocket connectors with:
|
||||||
|
- Connection management with auto-reconnect
|
||||||
|
- Order book and trade subscriptions
|
||||||
|
- Data normalization callbacks
|
||||||
|
- Health monitoring
|
||||||
|
|
||||||
|
### DataProcessor
|
||||||
|
Interface for data processing and validation:
|
||||||
|
- Raw data normalization
|
||||||
|
- Quality validation
|
||||||
|
- Metrics calculation
|
||||||
|
- Anomaly detection
|
||||||
|
|
||||||
|
### AggregationEngine
|
||||||
|
Interface for data aggregation:
|
||||||
|
- Price bucket creation
|
||||||
|
- Heatmap generation
|
||||||
|
- Cross-exchange consolidation
|
||||||
|
- Imbalance calculations
|
||||||
|
|
||||||
|
### StorageManager
|
||||||
|
Interface for data persistence:
|
||||||
|
- TimescaleDB operations
|
||||||
|
- Batch processing
|
||||||
|
- Historical data retrieval
|
||||||
|
- Storage optimization
|
||||||
|
|
||||||
|
### ReplayManager
|
||||||
|
Interface for historical data replay:
|
||||||
|
- Session management
|
||||||
|
- Configurable playback speeds
|
||||||
|
- Time-based seeking
|
||||||
|
- Real-time compatibility
|
||||||
|
|
||||||
|
## 🛠️ Utilities
|
||||||
|
|
||||||
|
### Logging
|
||||||
|
- Structured logging with correlation IDs
|
||||||
|
- Configurable log levels and outputs
|
||||||
|
- Rotating file handlers
|
||||||
|
- Context-aware logging
|
||||||
|
|
||||||
|
### Validation
|
||||||
|
- Symbol format validation
|
||||||
|
- Price and volume validation
|
||||||
|
- Configuration validation
|
||||||
|
- Data quality checks
|
||||||
|
|
||||||
|
### Timing
|
||||||
|
- UTC timestamp handling
|
||||||
|
- Performance measurement
|
||||||
|
- Time-based operations
|
||||||
|
- Interval calculations
|
||||||
|
|
||||||
|
### Exceptions
|
||||||
|
- Custom exception hierarchy
|
||||||
|
- Error code management
|
||||||
|
- Detailed error context
|
||||||
|
- Structured error responses
|
||||||
|
|
||||||
|
## 🔧 Usage
|
||||||
|
|
||||||
|
### Basic Configuration
|
||||||
|
|
||||||
|
```python
|
||||||
|
from COBY.config import config
|
||||||
|
|
||||||
|
# Access configuration
|
||||||
|
db_url = config.get_database_url()
|
||||||
|
bucket_size = config.get_bucket_size('BTCUSDT')
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data Models
|
||||||
|
|
||||||
|
```python
|
||||||
|
from COBY.models import OrderBookSnapshot, PriceLevel
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol='BTCUSDT',
|
||||||
|
exchange='binance',
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=[PriceLevel(50000.0, 1.5)],
|
||||||
|
asks=[PriceLevel(50100.0, 2.0)]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Access calculated properties
|
||||||
|
mid_price = orderbook.mid_price
|
||||||
|
spread = orderbook.spread
|
||||||
|
```
|
||||||
|
|
||||||
|
### Logging
|
||||||
|
|
||||||
|
```python
|
||||||
|
from COBY.utils import setup_logging, get_logger, set_correlation_id
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
setup_logging(level='INFO', log_file='logs/coby.log')
|
||||||
|
|
||||||
|
# Get logger
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
# Use correlation ID
|
||||||
|
set_correlation_id('req-123')
|
||||||
|
logger.info("Processing order book data")
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🏃 Next Steps
|
||||||
|
|
||||||
|
This is the foundational structure for the COBY system. The next implementation tasks will build upon these interfaces and models to create:
|
||||||
|
|
||||||
|
1. TimescaleDB integration
|
||||||
|
2. Exchange connector implementations
|
||||||
|
3. Data processing engines
|
||||||
|
4. Aggregation algorithms
|
||||||
|
5. Web dashboard
|
||||||
|
6. API endpoints
|
||||||
|
7. Replay functionality
|
||||||
|
|
||||||
|
Each component will implement the defined interfaces, ensuring consistency and maintainability across the entire system.
|
||||||
|
|
||||||
|
## 📝 Development Guidelines
|
||||||
|
|
||||||
|
- All components must implement the defined interfaces
|
||||||
|
- Use the provided data models for consistency
|
||||||
|
- Follow the logging and error handling patterns
|
||||||
|
- Validate all input data using the utility functions
|
||||||
|
- Maintain backward compatibility with the orchestrator interface
|
||||||
|
- Write comprehensive tests for all functionality
|
||||||
|
|
||||||
|
## 🔍 Monitoring
|
||||||
|
|
||||||
|
The system provides comprehensive monitoring through:
|
||||||
|
- Structured logging with correlation IDs
|
||||||
|
- Performance metrics collection
|
||||||
|
- Health check endpoints
|
||||||
|
- Connection status monitoring
|
||||||
|
- Data quality indicators
|
||||||
|
- System resource tracking
|
||||||
274
COBY/REVERSE_PROXY_CONFIG.md
Normal file
274
COBY/REVERSE_PROXY_CONFIG.md
Normal file
@@ -0,0 +1,274 @@
|
|||||||
|
# Reverse Proxy Configuration for COBY
|
||||||
|
|
||||||
|
Since COBY now serves both the API and web dashboard from port 8080, here are configuration examples for common reverse proxies.
|
||||||
|
|
||||||
|
## Nginx Reverse Proxy
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
# COBY upstream
|
||||||
|
upstream coby_backend {
|
||||||
|
server coby-app:8080;
|
||||||
|
# Add more servers for load balancing if needed
|
||||||
|
# server coby-app-2:8080;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name coby.yourdomain.com;
|
||||||
|
|
||||||
|
# Optional: Redirect HTTP to HTTPS
|
||||||
|
# return 301 https://$server_name$request_uri;
|
||||||
|
|
||||||
|
# Main application proxy
|
||||||
|
location / {
|
||||||
|
proxy_pass http://coby_backend;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection 'upgrade';
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_cache_bypass $http_upgrade;
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
|
||||||
|
# CORS headers (if needed)
|
||||||
|
add_header Access-Control-Allow-Origin *;
|
||||||
|
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS";
|
||||||
|
add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range";
|
||||||
|
}
|
||||||
|
|
||||||
|
# WebSocket specific configuration (if needed separately)
|
||||||
|
location /ws/ {
|
||||||
|
proxy_pass http://coby_backend;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Health check endpoint
|
||||||
|
location /health {
|
||||||
|
proxy_pass http://coby_backend;
|
||||||
|
access_log off;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Optional: Serve static files with caching
|
||||||
|
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
|
||||||
|
proxy_pass http://coby_backend;
|
||||||
|
expires 1y;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# HTTPS configuration (recommended)
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
server_name coby.yourdomain.com;
|
||||||
|
|
||||||
|
# SSL configuration
|
||||||
|
ssl_certificate /path/to/your/certificate.crt;
|
||||||
|
ssl_certificate_key /path/to/your/private.key;
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384;
|
||||||
|
ssl_prefer_server_ciphers off;
|
||||||
|
|
||||||
|
# Same location blocks as above
|
||||||
|
location / {
|
||||||
|
proxy_pass http://coby_backend;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection 'upgrade';
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_cache_bypass $http_upgrade;
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Apache Reverse Proxy
|
||||||
|
|
||||||
|
```apache
|
||||||
|
<VirtualHost *:80>
|
||||||
|
ServerName coby.yourdomain.com
|
||||||
|
|
||||||
|
# Enable required modules
|
||||||
|
# a2enmod proxy proxy_http proxy_wstunnel rewrite
|
||||||
|
|
||||||
|
# Proxy configuration
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyRequests Off
|
||||||
|
|
||||||
|
# Main application
|
||||||
|
ProxyPass / http://coby-app:8080/
|
||||||
|
ProxyPassReverse / http://coby-app:8080/
|
||||||
|
|
||||||
|
# WebSocket support
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||||
|
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||||
|
RewriteRule ^/?(.*) "ws://coby-app:8080/$1" [P,L]
|
||||||
|
|
||||||
|
# Headers
|
||||||
|
ProxyPassReverse / http://coby-app:8080/
|
||||||
|
ProxyPassReverseMatch ^(/.*) http://coby-app:8080$1
|
||||||
|
|
||||||
|
# Optional: Logging
|
||||||
|
ErrorLog ${APACHE_LOG_DIR}/coby_error.log
|
||||||
|
CustomLog ${APACHE_LOG_DIR}/coby_access.log combined
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
# HTTPS version
|
||||||
|
<VirtualHost *:443>
|
||||||
|
ServerName coby.yourdomain.com
|
||||||
|
|
||||||
|
# SSL configuration
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /path/to/your/certificate.crt
|
||||||
|
SSLCertificateKeyFile /path/to/your/private.key
|
||||||
|
|
||||||
|
# Same proxy configuration as above
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyRequests Off
|
||||||
|
ProxyPass / http://coby-app:8080/
|
||||||
|
ProxyPassReverse / http://coby-app:8080/
|
||||||
|
|
||||||
|
# WebSocket support
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||||
|
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||||
|
RewriteRule ^/?(.*) "ws://coby-app:8080/$1" [P,L]
|
||||||
|
</VirtualHost>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Traefik (Docker Labels)
|
||||||
|
|
||||||
|
If you're using Traefik, add these labels to your COBY app service in docker-compose:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
coby-app:
|
||||||
|
# ... other configuration
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.coby.rule=Host(`coby.yourdomain.com`)"
|
||||||
|
- "traefik.http.routers.coby.entrypoints=websecure"
|
||||||
|
- "traefik.http.routers.coby.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.services.coby.loadbalancer.server.port=8080"
|
||||||
|
|
||||||
|
# WebSocket support
|
||||||
|
- "traefik.http.routers.coby-ws.rule=Host(`coby.yourdomain.com`) && PathPrefix(`/ws`)"
|
||||||
|
- "traefik.http.routers.coby-ws.entrypoints=websecure"
|
||||||
|
- "traefik.http.routers.coby-ws.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.services.coby-ws.loadbalancer.server.port=8081"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Caddy
|
||||||
|
|
||||||
|
```caddy
|
||||||
|
coby.yourdomain.com {
|
||||||
|
reverse_proxy coby-app:8080
|
||||||
|
|
||||||
|
# WebSocket support is automatic in Caddy
|
||||||
|
|
||||||
|
# Optional: Custom headers
|
||||||
|
header {
|
||||||
|
# Security headers
|
||||||
|
X-Frame-Options "SAMEORIGIN"
|
||||||
|
X-XSS-Protection "1; mode=block"
|
||||||
|
X-Content-Type-Options "nosniff"
|
||||||
|
Referrer-Policy "no-referrer-when-downgrade"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Optional: Logging
|
||||||
|
log {
|
||||||
|
output file /var/log/caddy/coby.log
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## HAProxy
|
||||||
|
|
||||||
|
```haproxy
|
||||||
|
global
|
||||||
|
daemon
|
||||||
|
|
||||||
|
defaults
|
||||||
|
mode http
|
||||||
|
timeout connect 5000ms
|
||||||
|
timeout client 50000ms
|
||||||
|
timeout server 50000ms
|
||||||
|
|
||||||
|
frontend coby_frontend
|
||||||
|
bind *:80
|
||||||
|
bind *:443 ssl crt /path/to/your/certificate.pem
|
||||||
|
redirect scheme https if !{ ssl_fc }
|
||||||
|
|
||||||
|
# WebSocket detection
|
||||||
|
acl is_websocket hdr(Upgrade) -i websocket
|
||||||
|
acl is_websocket_path path_beg /ws
|
||||||
|
|
||||||
|
use_backend coby_websocket if is_websocket or is_websocket_path
|
||||||
|
default_backend coby_backend
|
||||||
|
|
||||||
|
backend coby_backend
|
||||||
|
balance roundrobin
|
||||||
|
option httpchk GET /health
|
||||||
|
server coby1 coby-app:8080 check
|
||||||
|
|
||||||
|
backend coby_websocket
|
||||||
|
balance roundrobin
|
||||||
|
server coby1 coby-app:8081 check
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker Compose with Reverse Proxy
|
||||||
|
|
||||||
|
Here's an example of how to integrate with an existing reverse proxy network:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Add to your docker-compose.portainer.yml
|
||||||
|
networks:
|
||||||
|
coby-network:
|
||||||
|
driver: bridge
|
||||||
|
reverse-proxy:
|
||||||
|
external: true # Your existing reverse proxy network
|
||||||
|
|
||||||
|
services:
|
||||||
|
coby-app:
|
||||||
|
# ... existing configuration
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
- reverse-proxy # Connect to reverse proxy network
|
||||||
|
# Remove port mappings if using reverse proxy
|
||||||
|
# ports:
|
||||||
|
# - "8080:8080"
|
||||||
|
# - "8081:8081"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Important Notes
|
||||||
|
|
||||||
|
1. **WebSocket Support**: Ensure your reverse proxy supports WebSocket upgrades for real-time features
|
||||||
|
2. **Health Checks**: Configure health checks to use `/health` endpoint
|
||||||
|
3. **Timeouts**: Set appropriate timeouts for long-running WebSocket connections
|
||||||
|
4. **SSL/TLS**: Always use HTTPS in production
|
||||||
|
5. **Rate Limiting**: Consider implementing rate limiting at the reverse proxy level
|
||||||
|
6. **Caching**: Static assets can be cached at the reverse proxy level
|
||||||
|
7. **Load Balancing**: If scaling horizontally, configure load balancing appropriately
|
||||||
|
|
||||||
|
## Testing Your Configuration
|
||||||
|
|
||||||
|
After configuring your reverse proxy:
|
||||||
|
|
||||||
|
1. **Basic connectivity**: `curl http://your-domain/health`
|
||||||
|
2. **Web dashboard**: Visit `http://your-domain/` in browser
|
||||||
|
3. **API endpoints**: Test `http://your-domain/api/` endpoints
|
||||||
|
4. **WebSocket**: Test WebSocket connections to `/ws/` path
|
||||||
|
5. **SSL**: Verify HTTPS is working if configured
|
||||||
|
|
||||||
|
The COBY application will handle all routing internally, so your reverse proxy just needs to forward all traffic to port 8080.
|
||||||
9
COBY/__init__.py
Normal file
9
COBY/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
"""
|
||||||
|
Multi-Exchange Data Aggregation System (COBY)
|
||||||
|
|
||||||
|
A comprehensive data collection and aggregation subsystem for cryptocurrency exchanges.
|
||||||
|
Provides real-time order book data, heatmap visualization, and historical replay capabilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = "1.0.0"
|
||||||
|
__author__ = "Trading System Team"
|
||||||
15
COBY/aggregation/__init__.py
Normal file
15
COBY/aggregation/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
"""
|
||||||
|
Data aggregation components for the COBY system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .aggregation_engine import StandardAggregationEngine
|
||||||
|
from .price_bucketer import PriceBucketer
|
||||||
|
from .heatmap_generator import HeatmapGenerator
|
||||||
|
from .cross_exchange_aggregator import CrossExchangeAggregator
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'StandardAggregationEngine',
|
||||||
|
'PriceBucketer',
|
||||||
|
'HeatmapGenerator',
|
||||||
|
'CrossExchangeAggregator'
|
||||||
|
]
|
||||||
338
COBY/aggregation/aggregation_engine.py
Normal file
338
COBY/aggregation/aggregation_engine.py
Normal file
@@ -0,0 +1,338 @@
|
|||||||
|
"""
|
||||||
|
Main aggregation engine implementation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, List
|
||||||
|
from ..interfaces.aggregation_engine import AggregationEngine
|
||||||
|
from ..models.core import (
|
||||||
|
OrderBookSnapshot, PriceBuckets, HeatmapData,
|
||||||
|
ImbalanceMetrics, ConsolidatedOrderBook
|
||||||
|
)
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import AggregationError
|
||||||
|
from .price_bucketer import PriceBucketer
|
||||||
|
from .heatmap_generator import HeatmapGenerator
|
||||||
|
from .cross_exchange_aggregator import CrossExchangeAggregator
|
||||||
|
from ..processing.metrics_calculator import MetricsCalculator
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class StandardAggregationEngine(AggregationEngine):
|
||||||
|
"""
|
||||||
|
Standard implementation of aggregation engine interface.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- Price bucket creation with $1 USD buckets
|
||||||
|
- Heatmap generation
|
||||||
|
- Cross-exchange aggregation
|
||||||
|
- Imbalance calculations
|
||||||
|
- Support/resistance detection
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize aggregation engine with components"""
|
||||||
|
self.price_bucketer = PriceBucketer()
|
||||||
|
self.heatmap_generator = HeatmapGenerator()
|
||||||
|
self.cross_exchange_aggregator = CrossExchangeAggregator()
|
||||||
|
self.metrics_calculator = MetricsCalculator()
|
||||||
|
|
||||||
|
# Processing statistics
|
||||||
|
self.buckets_created = 0
|
||||||
|
self.heatmaps_generated = 0
|
||||||
|
self.consolidations_performed = 0
|
||||||
|
|
||||||
|
logger.info("Standard aggregation engine initialized")
|
||||||
|
|
||||||
|
def create_price_buckets(self, orderbook: OrderBookSnapshot,
|
||||||
|
bucket_size: float = None) -> PriceBuckets:
|
||||||
|
"""
|
||||||
|
Convert order book data to price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
bucket_size: Size of each price bucket (uses $1 default)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriceBuckets: Aggregated price bucket data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Use provided bucket size or default $1
|
||||||
|
if bucket_size:
|
||||||
|
bucketer = PriceBucketer(bucket_size)
|
||||||
|
else:
|
||||||
|
bucketer = self.price_bucketer
|
||||||
|
|
||||||
|
buckets = bucketer.create_price_buckets(orderbook)
|
||||||
|
self.buckets_created += 1
|
||||||
|
|
||||||
|
logger.debug(f"Created price buckets for {orderbook.symbol}@{orderbook.exchange}")
|
||||||
|
return buckets
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error creating price buckets: {e}")
|
||||||
|
raise AggregationError(f"Price bucket creation failed: {e}", "BUCKET_ERROR")
|
||||||
|
|
||||||
|
def update_heatmap(self, symbol: str, buckets: PriceBuckets) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Update heatmap data with new price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
buckets: Price bucket data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Updated heatmap visualization data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
heatmap = self.heatmap_generator.generate_heatmap(buckets)
|
||||||
|
self.heatmaps_generated += 1
|
||||||
|
|
||||||
|
logger.debug(f"Generated heatmap for {symbol}: {len(heatmap.data)} points")
|
||||||
|
return heatmap
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating heatmap: {e}")
|
||||||
|
raise AggregationError(f"Heatmap update failed: {e}", "HEATMAP_ERROR")
|
||||||
|
|
||||||
|
def calculate_imbalances(self, orderbook: OrderBookSnapshot) -> ImbalanceMetrics:
|
||||||
|
"""
|
||||||
|
Calculate order book imbalance metrics.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ImbalanceMetrics: Calculated imbalance metrics
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
return self.metrics_calculator.calculate_imbalance_metrics(orderbook)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating imbalances: {e}")
|
||||||
|
raise AggregationError(f"Imbalance calculation failed: {e}", "IMBALANCE_ERROR")
|
||||||
|
|
||||||
|
def aggregate_across_exchanges(self, symbol: str,
|
||||||
|
orderbooks: List[OrderBookSnapshot]) -> ConsolidatedOrderBook:
|
||||||
|
"""
|
||||||
|
Aggregate order book data from multiple exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
orderbooks: List of order book snapshots from different exchanges
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ConsolidatedOrderBook: Consolidated order book data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
consolidated = self.cross_exchange_aggregator.aggregate_across_exchanges(
|
||||||
|
symbol, orderbooks
|
||||||
|
)
|
||||||
|
self.consolidations_performed += 1
|
||||||
|
|
||||||
|
logger.debug(f"Consolidated {len(orderbooks)} order books for {symbol}")
|
||||||
|
return consolidated
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error aggregating across exchanges: {e}")
|
||||||
|
raise AggregationError(f"Cross-exchange aggregation failed: {e}", "CONSOLIDATION_ERROR")
|
||||||
|
|
||||||
|
def calculate_volume_weighted_price(self, orderbooks: List[OrderBookSnapshot]) -> float:
|
||||||
|
"""
|
||||||
|
Calculate volume-weighted average price across exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: Volume-weighted average price
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
return self.cross_exchange_aggregator._calculate_weighted_mid_price(orderbooks)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating volume weighted price: {e}")
|
||||||
|
raise AggregationError(f"VWAP calculation failed: {e}", "VWAP_ERROR")
|
||||||
|
|
||||||
|
def get_market_depth(self, orderbook: OrderBookSnapshot,
|
||||||
|
depth_levels: List[float]) -> Dict[float, Dict[str, float]]:
|
||||||
|
"""
|
||||||
|
Calculate market depth at different price levels.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
depth_levels: List of depth percentages (e.g., [0.1, 0.5, 1.0])
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Market depth data {level: {'bid_volume': x, 'ask_volume': y}}
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
depth_data = {}
|
||||||
|
|
||||||
|
if not orderbook.mid_price:
|
||||||
|
return depth_data
|
||||||
|
|
||||||
|
for level_pct in depth_levels:
|
||||||
|
# Calculate price range for this depth level
|
||||||
|
price_range = orderbook.mid_price * (level_pct / 100.0)
|
||||||
|
min_bid_price = orderbook.mid_price - price_range
|
||||||
|
max_ask_price = orderbook.mid_price + price_range
|
||||||
|
|
||||||
|
# Calculate volumes within this range
|
||||||
|
bid_volume = sum(
|
||||||
|
bid.size for bid in orderbook.bids
|
||||||
|
if bid.price >= min_bid_price
|
||||||
|
)
|
||||||
|
|
||||||
|
ask_volume = sum(
|
||||||
|
ask.size for ask in orderbook.asks
|
||||||
|
if ask.price <= max_ask_price
|
||||||
|
)
|
||||||
|
|
||||||
|
depth_data[level_pct] = {
|
||||||
|
'bid_volume': bid_volume,
|
||||||
|
'ask_volume': ask_volume,
|
||||||
|
'total_volume': bid_volume + ask_volume
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug(f"Calculated market depth for {len(depth_levels)} levels")
|
||||||
|
return depth_data
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating market depth: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def smooth_heatmap(self, heatmap: HeatmapData, smoothing_factor: float) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Apply smoothing to heatmap data to reduce noise.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Raw heatmap data
|
||||||
|
smoothing_factor: Smoothing factor (0.0 to 1.0)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Smoothed heatmap data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
return self.heatmap_generator.apply_smoothing(heatmap, smoothing_factor)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error smoothing heatmap: {e}")
|
||||||
|
return heatmap # Return original on error
|
||||||
|
|
||||||
|
def calculate_liquidity_score(self, orderbook: OrderBookSnapshot) -> float:
|
||||||
|
"""
|
||||||
|
Calculate liquidity score for an order book.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: Liquidity score (0.0 to 1.0)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
return self.metrics_calculator.calculate_liquidity_score(orderbook)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating liquidity score: {e}")
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def detect_support_resistance(self, heatmap: HeatmapData) -> Dict[str, List[float]]:
|
||||||
|
"""
|
||||||
|
Detect support and resistance levels from heatmap data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Heatmap data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: {'support': [prices], 'resistance': [prices]}
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
return self.heatmap_generator.calculate_support_resistance(heatmap)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error detecting support/resistance: {e}")
|
||||||
|
return {'support': [], 'resistance': []}
|
||||||
|
|
||||||
|
def create_consolidated_heatmap(self, symbol: str,
|
||||||
|
orderbooks: List[OrderBookSnapshot]) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Create consolidated heatmap from multiple exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Consolidated heatmap data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
return self.cross_exchange_aggregator.create_consolidated_heatmap(
|
||||||
|
symbol, orderbooks
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error creating consolidated heatmap: {e}")
|
||||||
|
raise AggregationError(f"Consolidated heatmap creation failed: {e}", "CONSOLIDATED_HEATMAP_ERROR")
|
||||||
|
|
||||||
|
def detect_arbitrage_opportunities(self, orderbooks: List[OrderBookSnapshot]) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Detect arbitrage opportunities between exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict]: Arbitrage opportunities
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
return self.cross_exchange_aggregator.detect_arbitrage_opportunities(orderbooks)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error detecting arbitrage opportunities: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_processing_stats(self) -> Dict[str, any]:
|
||||||
|
"""Get processing statistics"""
|
||||||
|
return {
|
||||||
|
'buckets_created': self.buckets_created,
|
||||||
|
'heatmaps_generated': self.heatmaps_generated,
|
||||||
|
'consolidations_performed': self.consolidations_performed,
|
||||||
|
'price_bucketer_stats': self.price_bucketer.get_processing_stats(),
|
||||||
|
'heatmap_generator_stats': self.heatmap_generator.get_processing_stats(),
|
||||||
|
'cross_exchange_stats': self.cross_exchange_aggregator.get_processing_stats()
|
||||||
|
}
|
||||||
|
|
||||||
|
def reset_stats(self) -> None:
|
||||||
|
"""Reset processing statistics"""
|
||||||
|
self.buckets_created = 0
|
||||||
|
self.heatmaps_generated = 0
|
||||||
|
self.consolidations_performed = 0
|
||||||
|
|
||||||
|
self.price_bucketer.reset_stats()
|
||||||
|
self.heatmap_generator.reset_stats()
|
||||||
|
self.cross_exchange_aggregator.reset_stats()
|
||||||
|
|
||||||
|
logger.info("Aggregation engine statistics reset")
|
||||||
390
COBY/aggregation/cross_exchange_aggregator.py
Normal file
390
COBY/aggregation/cross_exchange_aggregator.py
Normal file
@@ -0,0 +1,390 @@
|
|||||||
|
"""
|
||||||
|
Cross-exchange data aggregation and consolidation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import List, Dict, Optional
|
||||||
|
from collections import defaultdict
|
||||||
|
from datetime import datetime
|
||||||
|
from ..models.core import (
|
||||||
|
OrderBookSnapshot, ConsolidatedOrderBook, PriceLevel,
|
||||||
|
PriceBuckets, HeatmapData, HeatmapPoint
|
||||||
|
)
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.timing import get_current_timestamp
|
||||||
|
from .price_bucketer import PriceBucketer
|
||||||
|
from .heatmap_generator import HeatmapGenerator
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CrossExchangeAggregator:
|
||||||
|
"""
|
||||||
|
Aggregates data across multiple exchanges.
|
||||||
|
|
||||||
|
Provides consolidated order books and cross-exchange heatmaps.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize cross-exchange aggregator"""
|
||||||
|
self.price_bucketer = PriceBucketer()
|
||||||
|
self.heatmap_generator = HeatmapGenerator()
|
||||||
|
|
||||||
|
# Exchange weights for aggregation
|
||||||
|
self.exchange_weights = {
|
||||||
|
'binance': 1.0,
|
||||||
|
'coinbase': 0.9,
|
||||||
|
'kraken': 0.8,
|
||||||
|
'bybit': 0.7,
|
||||||
|
'okx': 0.7,
|
||||||
|
'huobi': 0.6,
|
||||||
|
'kucoin': 0.6,
|
||||||
|
'gateio': 0.5,
|
||||||
|
'bitfinex': 0.5,
|
||||||
|
'mexc': 0.4
|
||||||
|
}
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.consolidations_performed = 0
|
||||||
|
self.exchanges_processed = set()
|
||||||
|
|
||||||
|
logger.info("Cross-exchange aggregator initialized")
|
||||||
|
|
||||||
|
def aggregate_across_exchanges(self, symbol: str,
|
||||||
|
orderbooks: List[OrderBookSnapshot]) -> ConsolidatedOrderBook:
|
||||||
|
"""
|
||||||
|
Aggregate order book data from multiple exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
orderbooks: List of order book snapshots from different exchanges
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ConsolidatedOrderBook: Consolidated order book data
|
||||||
|
"""
|
||||||
|
if not orderbooks:
|
||||||
|
raise ValueError("Cannot aggregate empty orderbook list")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Track exchanges
|
||||||
|
exchanges = [ob.exchange for ob in orderbooks]
|
||||||
|
self.exchanges_processed.update(exchanges)
|
||||||
|
|
||||||
|
# Calculate weighted mid price
|
||||||
|
weighted_mid_price = self._calculate_weighted_mid_price(orderbooks)
|
||||||
|
|
||||||
|
# Consolidate bids and asks
|
||||||
|
consolidated_bids = self._consolidate_price_levels(
|
||||||
|
[ob.bids for ob in orderbooks],
|
||||||
|
[ob.exchange for ob in orderbooks],
|
||||||
|
'bid'
|
||||||
|
)
|
||||||
|
|
||||||
|
consolidated_asks = self._consolidate_price_levels(
|
||||||
|
[ob.asks for ob in orderbooks],
|
||||||
|
[ob.exchange for ob in orderbooks],
|
||||||
|
'ask'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate total volumes
|
||||||
|
total_bid_volume = sum(level.size for level in consolidated_bids)
|
||||||
|
total_ask_volume = sum(level.size for level in consolidated_asks)
|
||||||
|
|
||||||
|
# Create consolidated order book
|
||||||
|
consolidated = ConsolidatedOrderBook(
|
||||||
|
symbol=symbol,
|
||||||
|
timestamp=get_current_timestamp(),
|
||||||
|
exchanges=exchanges,
|
||||||
|
bids=consolidated_bids,
|
||||||
|
asks=consolidated_asks,
|
||||||
|
weighted_mid_price=weighted_mid_price,
|
||||||
|
total_bid_volume=total_bid_volume,
|
||||||
|
total_ask_volume=total_ask_volume,
|
||||||
|
exchange_weights={ex: self.exchange_weights.get(ex, 0.5) for ex in exchanges}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.consolidations_performed += 1
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Consolidated {len(orderbooks)} order books for {symbol}: "
|
||||||
|
f"{len(consolidated_bids)} bids, {len(consolidated_asks)} asks"
|
||||||
|
)
|
||||||
|
|
||||||
|
return consolidated
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error aggregating across exchanges: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def create_consolidated_heatmap(self, symbol: str,
|
||||||
|
orderbooks: List[OrderBookSnapshot]) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Create consolidated heatmap from multiple exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Consolidated heatmap data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Create price buckets for each exchange
|
||||||
|
all_buckets = []
|
||||||
|
for orderbook in orderbooks:
|
||||||
|
buckets = self.price_bucketer.create_price_buckets(orderbook)
|
||||||
|
all_buckets.append(buckets)
|
||||||
|
|
||||||
|
# Aggregate all buckets
|
||||||
|
if len(all_buckets) == 1:
|
||||||
|
consolidated_buckets = all_buckets[0]
|
||||||
|
else:
|
||||||
|
consolidated_buckets = self.price_bucketer.aggregate_buckets(all_buckets)
|
||||||
|
|
||||||
|
# Generate heatmap from consolidated buckets
|
||||||
|
heatmap = self.heatmap_generator.generate_heatmap(consolidated_buckets)
|
||||||
|
|
||||||
|
# Add exchange metadata to heatmap points
|
||||||
|
self._add_exchange_metadata(heatmap, orderbooks)
|
||||||
|
|
||||||
|
logger.debug(f"Created consolidated heatmap for {symbol} from {len(orderbooks)} exchanges")
|
||||||
|
return heatmap
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error creating consolidated heatmap: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _calculate_weighted_mid_price(self, orderbooks: List[OrderBookSnapshot]) -> float:
|
||||||
|
"""Calculate volume-weighted mid price across exchanges"""
|
||||||
|
total_weight = 0.0
|
||||||
|
weighted_sum = 0.0
|
||||||
|
|
||||||
|
for orderbook in orderbooks:
|
||||||
|
if orderbook.mid_price:
|
||||||
|
# Use total volume as weight
|
||||||
|
volume_weight = orderbook.bid_volume + orderbook.ask_volume
|
||||||
|
exchange_weight = self.exchange_weights.get(orderbook.exchange, 0.5)
|
||||||
|
|
||||||
|
# Combined weight
|
||||||
|
weight = volume_weight * exchange_weight
|
||||||
|
|
||||||
|
weighted_sum += orderbook.mid_price * weight
|
||||||
|
total_weight += weight
|
||||||
|
|
||||||
|
return weighted_sum / total_weight if total_weight > 0 else 0.0
|
||||||
|
|
||||||
|
def _consolidate_price_levels(self, level_lists: List[List[PriceLevel]],
|
||||||
|
exchanges: List[str], side: str) -> List[PriceLevel]:
|
||||||
|
"""Consolidate price levels from multiple exchanges"""
|
||||||
|
# Group levels by price bucket
|
||||||
|
price_groups = defaultdict(lambda: {'size': 0.0, 'count': 0, 'exchanges': set()})
|
||||||
|
|
||||||
|
for levels, exchange in zip(level_lists, exchanges):
|
||||||
|
exchange_weight = self.exchange_weights.get(exchange, 0.5)
|
||||||
|
|
||||||
|
for level in levels:
|
||||||
|
# Round price to bucket
|
||||||
|
bucket_price = self.price_bucketer.get_bucket_price(level.price)
|
||||||
|
|
||||||
|
# Add weighted volume
|
||||||
|
weighted_size = level.size * exchange_weight
|
||||||
|
price_groups[bucket_price]['size'] += weighted_size
|
||||||
|
price_groups[bucket_price]['count'] += level.count or 1
|
||||||
|
price_groups[bucket_price]['exchanges'].add(exchange)
|
||||||
|
|
||||||
|
# Create consolidated price levels
|
||||||
|
consolidated_levels = []
|
||||||
|
for price, data in price_groups.items():
|
||||||
|
if data['size'] > 0: # Only include non-zero volumes
|
||||||
|
level = PriceLevel(
|
||||||
|
price=price,
|
||||||
|
size=data['size'],
|
||||||
|
count=data['count']
|
||||||
|
)
|
||||||
|
consolidated_levels.append(level)
|
||||||
|
|
||||||
|
# Sort levels appropriately
|
||||||
|
if side == 'bid':
|
||||||
|
consolidated_levels.sort(key=lambda x: x.price, reverse=True)
|
||||||
|
else:
|
||||||
|
consolidated_levels.sort(key=lambda x: x.price)
|
||||||
|
|
||||||
|
return consolidated_levels
|
||||||
|
|
||||||
|
def _add_exchange_metadata(self, heatmap: HeatmapData,
|
||||||
|
orderbooks: List[OrderBookSnapshot]) -> None:
|
||||||
|
"""Add exchange metadata to heatmap points"""
|
||||||
|
# Create exchange mapping by price bucket
|
||||||
|
exchange_map = defaultdict(set)
|
||||||
|
|
||||||
|
for orderbook in orderbooks:
|
||||||
|
# Map bid prices to exchanges
|
||||||
|
for bid in orderbook.bids:
|
||||||
|
bucket_price = self.price_bucketer.get_bucket_price(bid.price)
|
||||||
|
exchange_map[bucket_price].add(orderbook.exchange)
|
||||||
|
|
||||||
|
# Map ask prices to exchanges
|
||||||
|
for ask in orderbook.asks:
|
||||||
|
bucket_price = self.price_bucketer.get_bucket_price(ask.price)
|
||||||
|
exchange_map[bucket_price].add(orderbook.exchange)
|
||||||
|
|
||||||
|
# Add exchange information to heatmap points
|
||||||
|
for point in heatmap.data:
|
||||||
|
bucket_price = self.price_bucketer.get_bucket_price(point.price)
|
||||||
|
# Store exchange info in a custom attribute (would need to extend HeatmapPoint)
|
||||||
|
# For now, we'll log it
|
||||||
|
exchanges_at_price = exchange_map.get(bucket_price, set())
|
||||||
|
if len(exchanges_at_price) > 1:
|
||||||
|
logger.debug(f"Price {point.price} has data from {len(exchanges_at_price)} exchanges")
|
||||||
|
|
||||||
|
def calculate_exchange_dominance(self, orderbooks: List[OrderBookSnapshot]) -> Dict[str, float]:
|
||||||
|
"""
|
||||||
|
Calculate which exchanges dominate at different price levels.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, float]: Exchange dominance scores
|
||||||
|
"""
|
||||||
|
exchange_volumes = defaultdict(float)
|
||||||
|
total_volume = 0.0
|
||||||
|
|
||||||
|
for orderbook in orderbooks:
|
||||||
|
volume = orderbook.bid_volume + orderbook.ask_volume
|
||||||
|
exchange_volumes[orderbook.exchange] += volume
|
||||||
|
total_volume += volume
|
||||||
|
|
||||||
|
# Calculate dominance percentages
|
||||||
|
dominance = {}
|
||||||
|
for exchange, volume in exchange_volumes.items():
|
||||||
|
dominance[exchange] = (volume / total_volume * 100) if total_volume > 0 else 0.0
|
||||||
|
|
||||||
|
return dominance
|
||||||
|
|
||||||
|
def detect_arbitrage_opportunities(self, orderbooks: List[OrderBookSnapshot],
|
||||||
|
min_spread_pct: float = 0.1) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Detect potential arbitrage opportunities between exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
min_spread_pct: Minimum spread percentage to consider
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict]: Arbitrage opportunities
|
||||||
|
"""
|
||||||
|
opportunities = []
|
||||||
|
|
||||||
|
if len(orderbooks) < 2:
|
||||||
|
return opportunities
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Find best bid and ask across exchanges
|
||||||
|
best_bids = []
|
||||||
|
best_asks = []
|
||||||
|
|
||||||
|
for orderbook in orderbooks:
|
||||||
|
if orderbook.bids and orderbook.asks:
|
||||||
|
best_bids.append({
|
||||||
|
'exchange': orderbook.exchange,
|
||||||
|
'price': orderbook.bids[0].price,
|
||||||
|
'size': orderbook.bids[0].size
|
||||||
|
})
|
||||||
|
best_asks.append({
|
||||||
|
'exchange': orderbook.exchange,
|
||||||
|
'price': orderbook.asks[0].price,
|
||||||
|
'size': orderbook.asks[0].size
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort to find best opportunities
|
||||||
|
best_bids.sort(key=lambda x: x['price'], reverse=True)
|
||||||
|
best_asks.sort(key=lambda x: x['price'])
|
||||||
|
|
||||||
|
# Check for arbitrage opportunities
|
||||||
|
for bid in best_bids:
|
||||||
|
for ask in best_asks:
|
||||||
|
if bid['exchange'] != ask['exchange'] and bid['price'] > ask['price']:
|
||||||
|
spread = bid['price'] - ask['price']
|
||||||
|
spread_pct = (spread / ask['price']) * 100
|
||||||
|
|
||||||
|
if spread_pct >= min_spread_pct:
|
||||||
|
opportunities.append({
|
||||||
|
'buy_exchange': ask['exchange'],
|
||||||
|
'sell_exchange': bid['exchange'],
|
||||||
|
'buy_price': ask['price'],
|
||||||
|
'sell_price': bid['price'],
|
||||||
|
'spread': spread,
|
||||||
|
'spread_percentage': spread_pct,
|
||||||
|
'max_size': min(bid['size'], ask['size'])
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort by spread percentage
|
||||||
|
opportunities.sort(key=lambda x: x['spread_percentage'], reverse=True)
|
||||||
|
|
||||||
|
if opportunities:
|
||||||
|
logger.info(f"Found {len(opportunities)} arbitrage opportunities")
|
||||||
|
|
||||||
|
return opportunities
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error detecting arbitrage opportunities: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_exchange_correlation(self, orderbooks: List[OrderBookSnapshot]) -> Dict[str, Dict[str, float]]:
|
||||||
|
"""
|
||||||
|
Calculate price correlation between exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Correlation matrix between exchanges
|
||||||
|
"""
|
||||||
|
correlations = {}
|
||||||
|
|
||||||
|
# Extract mid prices by exchange
|
||||||
|
exchange_prices = {}
|
||||||
|
for orderbook in orderbooks:
|
||||||
|
if orderbook.mid_price:
|
||||||
|
exchange_prices[orderbook.exchange] = orderbook.mid_price
|
||||||
|
|
||||||
|
# Calculate simple correlation (would need historical data for proper correlation)
|
||||||
|
exchanges = list(exchange_prices.keys())
|
||||||
|
for i, exchange1 in enumerate(exchanges):
|
||||||
|
correlations[exchange1] = {}
|
||||||
|
for j, exchange2 in enumerate(exchanges):
|
||||||
|
if i == j:
|
||||||
|
correlations[exchange1][exchange2] = 1.0
|
||||||
|
else:
|
||||||
|
# Simple price difference as correlation proxy
|
||||||
|
price1 = exchange_prices[exchange1]
|
||||||
|
price2 = exchange_prices[exchange2]
|
||||||
|
diff_pct = abs(price1 - price2) / max(price1, price2) * 100
|
||||||
|
# Convert to correlation-like score (lower difference = higher correlation)
|
||||||
|
correlation = max(0.0, 1.0 - (diff_pct / 10.0))
|
||||||
|
correlations[exchange1][exchange2] = correlation
|
||||||
|
|
||||||
|
return correlations
|
||||||
|
|
||||||
|
def get_processing_stats(self) -> Dict[str, int]:
|
||||||
|
"""Get processing statistics"""
|
||||||
|
return {
|
||||||
|
'consolidations_performed': self.consolidations_performed,
|
||||||
|
'unique_exchanges_processed': len(self.exchanges_processed),
|
||||||
|
'exchanges_processed': list(self.exchanges_processed),
|
||||||
|
'bucketer_stats': self.price_bucketer.get_processing_stats(),
|
||||||
|
'heatmap_stats': self.heatmap_generator.get_processing_stats()
|
||||||
|
}
|
||||||
|
|
||||||
|
def update_exchange_weights(self, new_weights: Dict[str, float]) -> None:
|
||||||
|
"""Update exchange weights for aggregation"""
|
||||||
|
self.exchange_weights.update(new_weights)
|
||||||
|
logger.info(f"Updated exchange weights: {new_weights}")
|
||||||
|
|
||||||
|
def reset_stats(self) -> None:
|
||||||
|
"""Reset processing statistics"""
|
||||||
|
self.consolidations_performed = 0
|
||||||
|
self.exchanges_processed.clear()
|
||||||
|
self.price_bucketer.reset_stats()
|
||||||
|
self.heatmap_generator.reset_stats()
|
||||||
|
logger.info("Cross-exchange aggregator statistics reset")
|
||||||
376
COBY/aggregation/heatmap_generator.py
Normal file
376
COBY/aggregation/heatmap_generator.py
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
"""
|
||||||
|
Heatmap data generation from price buckets.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import List, Dict, Optional, Tuple
|
||||||
|
from ..models.core import PriceBuckets, HeatmapData, HeatmapPoint
|
||||||
|
from ..config import config
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class HeatmapGenerator:
|
||||||
|
"""
|
||||||
|
Generates heatmap visualization data from price buckets.
|
||||||
|
|
||||||
|
Creates intensity-based heatmap points for visualization.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize heatmap generator"""
|
||||||
|
self.heatmaps_generated = 0
|
||||||
|
self.total_points_created = 0
|
||||||
|
|
||||||
|
logger.info("Heatmap generator initialized")
|
||||||
|
|
||||||
|
def generate_heatmap(self, buckets: PriceBuckets,
|
||||||
|
max_points: Optional[int] = None) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Generate heatmap data from price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
buckets: Price buckets to convert
|
||||||
|
max_points: Maximum number of points to include (None = all)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Heatmap visualization data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
heatmap = HeatmapData(
|
||||||
|
symbol=buckets.symbol,
|
||||||
|
timestamp=buckets.timestamp,
|
||||||
|
bucket_size=buckets.bucket_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate maximum volume for intensity normalization
|
||||||
|
all_volumes = list(buckets.bid_buckets.values()) + list(buckets.ask_buckets.values())
|
||||||
|
max_volume = max(all_volumes) if all_volumes else 1.0
|
||||||
|
|
||||||
|
# Generate bid points
|
||||||
|
bid_points = self._create_heatmap_points(
|
||||||
|
buckets.bid_buckets, 'bid', max_volume
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate ask points
|
||||||
|
ask_points = self._create_heatmap_points(
|
||||||
|
buckets.ask_buckets, 'ask', max_volume
|
||||||
|
)
|
||||||
|
|
||||||
|
# Combine all points
|
||||||
|
all_points = bid_points + ask_points
|
||||||
|
|
||||||
|
# Limit points if requested
|
||||||
|
if max_points and len(all_points) > max_points:
|
||||||
|
# Sort by volume and take top points
|
||||||
|
all_points.sort(key=lambda p: p.volume, reverse=True)
|
||||||
|
all_points = all_points[:max_points]
|
||||||
|
|
||||||
|
heatmap.data = all_points
|
||||||
|
|
||||||
|
self.heatmaps_generated += 1
|
||||||
|
self.total_points_created += len(all_points)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Generated heatmap for {buckets.symbol}: {len(all_points)} points "
|
||||||
|
f"(max_volume: {max_volume:.6f})"
|
||||||
|
)
|
||||||
|
|
||||||
|
return heatmap
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating heatmap: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _create_heatmap_points(self, bucket_dict: Dict[float, float],
|
||||||
|
side: str, max_volume: float) -> List[HeatmapPoint]:
|
||||||
|
"""
|
||||||
|
Create heatmap points from bucket dictionary.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
bucket_dict: Dictionary of price -> volume
|
||||||
|
side: 'bid' or 'ask'
|
||||||
|
max_volume: Maximum volume for intensity calculation
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[HeatmapPoint]: List of heatmap points
|
||||||
|
"""
|
||||||
|
points = []
|
||||||
|
|
||||||
|
for price, volume in bucket_dict.items():
|
||||||
|
if volume > 0: # Only include non-zero volumes
|
||||||
|
intensity = min(volume / max_volume, 1.0) if max_volume > 0 else 0.0
|
||||||
|
|
||||||
|
point = HeatmapPoint(
|
||||||
|
price=price,
|
||||||
|
volume=volume,
|
||||||
|
intensity=intensity,
|
||||||
|
side=side
|
||||||
|
)
|
||||||
|
points.append(point)
|
||||||
|
|
||||||
|
return points
|
||||||
|
|
||||||
|
def apply_smoothing(self, heatmap: HeatmapData,
|
||||||
|
smoothing_factor: float = 0.3) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Apply smoothing to heatmap data to reduce noise.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Original heatmap data
|
||||||
|
smoothing_factor: Smoothing factor (0.0 = no smoothing, 1.0 = maximum)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Smoothed heatmap data
|
||||||
|
"""
|
||||||
|
if smoothing_factor <= 0:
|
||||||
|
return heatmap
|
||||||
|
|
||||||
|
try:
|
||||||
|
smoothed = HeatmapData(
|
||||||
|
symbol=heatmap.symbol,
|
||||||
|
timestamp=heatmap.timestamp,
|
||||||
|
bucket_size=heatmap.bucket_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Separate bids and asks
|
||||||
|
bids = [p for p in heatmap.data if p.side == 'bid']
|
||||||
|
asks = [p for p in heatmap.data if p.side == 'ask']
|
||||||
|
|
||||||
|
# Apply smoothing to each side
|
||||||
|
smoothed_bids = self._smooth_points(bids, smoothing_factor)
|
||||||
|
smoothed_asks = self._smooth_points(asks, smoothing_factor)
|
||||||
|
|
||||||
|
smoothed.data = smoothed_bids + smoothed_asks
|
||||||
|
|
||||||
|
logger.debug(f"Applied smoothing with factor {smoothing_factor}")
|
||||||
|
return smoothed
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error applying smoothing: {e}")
|
||||||
|
return heatmap # Return original on error
|
||||||
|
|
||||||
|
def _smooth_points(self, points: List[HeatmapPoint],
|
||||||
|
smoothing_factor: float) -> List[HeatmapPoint]:
|
||||||
|
"""
|
||||||
|
Apply smoothing to a list of heatmap points.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
points: Points to smooth
|
||||||
|
smoothing_factor: Smoothing factor
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[HeatmapPoint]: Smoothed points
|
||||||
|
"""
|
||||||
|
if len(points) < 3:
|
||||||
|
return points
|
||||||
|
|
||||||
|
# Sort points by price
|
||||||
|
sorted_points = sorted(points, key=lambda p: p.price)
|
||||||
|
smoothed_points = []
|
||||||
|
|
||||||
|
for i, point in enumerate(sorted_points):
|
||||||
|
# Calculate weighted average with neighbors
|
||||||
|
total_weight = 1.0
|
||||||
|
weighted_volume = point.volume
|
||||||
|
weighted_intensity = point.intensity
|
||||||
|
|
||||||
|
# Add left neighbor
|
||||||
|
if i > 0:
|
||||||
|
left_point = sorted_points[i - 1]
|
||||||
|
weight = smoothing_factor
|
||||||
|
total_weight += weight
|
||||||
|
weighted_volume += left_point.volume * weight
|
||||||
|
weighted_intensity += left_point.intensity * weight
|
||||||
|
|
||||||
|
# Add right neighbor
|
||||||
|
if i < len(sorted_points) - 1:
|
||||||
|
right_point = sorted_points[i + 1]
|
||||||
|
weight = smoothing_factor
|
||||||
|
total_weight += weight
|
||||||
|
weighted_volume += right_point.volume * weight
|
||||||
|
weighted_intensity += right_point.intensity * weight
|
||||||
|
|
||||||
|
# Create smoothed point
|
||||||
|
smoothed_point = HeatmapPoint(
|
||||||
|
price=point.price,
|
||||||
|
volume=weighted_volume / total_weight,
|
||||||
|
intensity=min(weighted_intensity / total_weight, 1.0),
|
||||||
|
side=point.side
|
||||||
|
)
|
||||||
|
smoothed_points.append(smoothed_point)
|
||||||
|
|
||||||
|
return smoothed_points
|
||||||
|
|
||||||
|
def filter_by_intensity(self, heatmap: HeatmapData,
|
||||||
|
min_intensity: float = 0.1) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Filter heatmap points by minimum intensity.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Original heatmap data
|
||||||
|
min_intensity: Minimum intensity threshold
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Filtered heatmap data
|
||||||
|
"""
|
||||||
|
filtered = HeatmapData(
|
||||||
|
symbol=heatmap.symbol,
|
||||||
|
timestamp=heatmap.timestamp,
|
||||||
|
bucket_size=heatmap.bucket_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter points by intensity
|
||||||
|
filtered.data = [
|
||||||
|
point for point in heatmap.data
|
||||||
|
if point.intensity >= min_intensity
|
||||||
|
]
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Filtered heatmap: {len(heatmap.data)} -> {len(filtered.data)} points "
|
||||||
|
f"(min_intensity: {min_intensity})"
|
||||||
|
)
|
||||||
|
|
||||||
|
return filtered
|
||||||
|
|
||||||
|
def get_price_levels(self, heatmap: HeatmapData,
|
||||||
|
side: str = None) -> List[float]:
|
||||||
|
"""
|
||||||
|
Get sorted list of price levels from heatmap.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Heatmap data
|
||||||
|
side: 'bid', 'ask', or None for both
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[float]: Sorted price levels
|
||||||
|
"""
|
||||||
|
if side:
|
||||||
|
points = [p for p in heatmap.data if p.side == side]
|
||||||
|
else:
|
||||||
|
points = heatmap.data
|
||||||
|
|
||||||
|
prices = [p.price for p in points]
|
||||||
|
return sorted(prices)
|
||||||
|
|
||||||
|
def get_volume_profile(self, heatmap: HeatmapData) -> Dict[str, List[Tuple[float, float]]]:
|
||||||
|
"""
|
||||||
|
Get volume profile from heatmap data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Heatmap data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Volume profile with 'bids' and 'asks' as (price, volume) tuples
|
||||||
|
"""
|
||||||
|
profile = {'bids': [], 'asks': []}
|
||||||
|
|
||||||
|
# Extract bid profile
|
||||||
|
bid_points = [p for p in heatmap.data if p.side == 'bid']
|
||||||
|
profile['bids'] = [(p.price, p.volume) for p in bid_points]
|
||||||
|
profile['bids'].sort(key=lambda x: x[0], reverse=True) # Highest price first
|
||||||
|
|
||||||
|
# Extract ask profile
|
||||||
|
ask_points = [p for p in heatmap.data if p.side == 'ask']
|
||||||
|
profile['asks'] = [(p.price, p.volume) for p in ask_points]
|
||||||
|
profile['asks'].sort(key=lambda x: x[0]) # Lowest price first
|
||||||
|
|
||||||
|
return profile
|
||||||
|
|
||||||
|
def calculate_support_resistance(self, heatmap: HeatmapData,
|
||||||
|
threshold: float = 0.7) -> Dict[str, List[float]]:
|
||||||
|
"""
|
||||||
|
Identify potential support and resistance levels from heatmap.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Heatmap data
|
||||||
|
threshold: Intensity threshold for significant levels
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Support and resistance levels
|
||||||
|
"""
|
||||||
|
levels = {'support': [], 'resistance': []}
|
||||||
|
|
||||||
|
# Find high-intensity bid levels (potential support)
|
||||||
|
bid_points = [p for p in heatmap.data if p.side == 'bid' and p.intensity >= threshold]
|
||||||
|
levels['support'] = sorted([p.price for p in bid_points], reverse=True)
|
||||||
|
|
||||||
|
# Find high-intensity ask levels (potential resistance)
|
||||||
|
ask_points = [p for p in heatmap.data if p.side == 'ask' and p.intensity >= threshold]
|
||||||
|
levels['resistance'] = sorted([p.price for p in ask_points])
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Identified {len(levels['support'])} support and "
|
||||||
|
f"{len(levels['resistance'])} resistance levels"
|
||||||
|
)
|
||||||
|
|
||||||
|
return levels
|
||||||
|
|
||||||
|
def get_heatmap_summary(self, heatmap: HeatmapData) -> Dict[str, float]:
|
||||||
|
"""
|
||||||
|
Get summary statistics for heatmap data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Heatmap data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Summary statistics
|
||||||
|
"""
|
||||||
|
if not heatmap.data:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Separate bids and asks
|
||||||
|
bids = [p for p in heatmap.data if p.side == 'bid']
|
||||||
|
asks = [p for p in heatmap.data if p.side == 'ask']
|
||||||
|
|
||||||
|
summary = {
|
||||||
|
'total_points': len(heatmap.data),
|
||||||
|
'bid_points': len(bids),
|
||||||
|
'ask_points': len(asks),
|
||||||
|
'total_volume': sum(p.volume for p in heatmap.data),
|
||||||
|
'bid_volume': sum(p.volume for p in bids),
|
||||||
|
'ask_volume': sum(p.volume for p in asks),
|
||||||
|
'max_intensity': max(p.intensity for p in heatmap.data),
|
||||||
|
'avg_intensity': sum(p.intensity for p in heatmap.data) / len(heatmap.data),
|
||||||
|
'price_range': 0.0,
|
||||||
|
'best_bid': 0.0,
|
||||||
|
'best_ask': 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate price range
|
||||||
|
all_prices = [p.price for p in heatmap.data]
|
||||||
|
if all_prices:
|
||||||
|
summary['price_range'] = max(all_prices) - min(all_prices)
|
||||||
|
|
||||||
|
# Calculate best bid and ask
|
||||||
|
if bids:
|
||||||
|
summary['best_bid'] = max(p.price for p in bids)
|
||||||
|
if asks:
|
||||||
|
summary['best_ask'] = min(p.price for p in asks)
|
||||||
|
|
||||||
|
# Calculate volume imbalance
|
||||||
|
total_volume = summary['total_volume']
|
||||||
|
if total_volume > 0:
|
||||||
|
summary['volume_imbalance'] = (
|
||||||
|
(summary['bid_volume'] - summary['ask_volume']) / total_volume
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
summary['volume_imbalance'] = 0.0
|
||||||
|
|
||||||
|
return summary
|
||||||
|
|
||||||
|
def get_processing_stats(self) -> Dict[str, int]:
|
||||||
|
"""Get processing statistics"""
|
||||||
|
return {
|
||||||
|
'heatmaps_generated': self.heatmaps_generated,
|
||||||
|
'total_points_created': self.total_points_created,
|
||||||
|
'avg_points_per_heatmap': (
|
||||||
|
self.total_points_created // max(self.heatmaps_generated, 1)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
def reset_stats(self) -> None:
|
||||||
|
"""Reset processing statistics"""
|
||||||
|
self.heatmaps_generated = 0
|
||||||
|
self.total_points_created = 0
|
||||||
|
logger.info("Heatmap generator statistics reset")
|
||||||
353
COBY/aggregation/price_bucketer.py
Normal file
353
COBY/aggregation/price_bucketer.py
Normal file
@@ -0,0 +1,353 @@
|
|||||||
|
"""
|
||||||
|
Price bucketing system for order book aggregation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import math
|
||||||
|
from typing import Dict, List, Tuple, Optional
|
||||||
|
from collections import defaultdict
|
||||||
|
from ..models.core import OrderBookSnapshot, PriceBuckets, PriceLevel
|
||||||
|
from ..config import config
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.validation import validate_price, validate_volume
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PriceBucketer:
|
||||||
|
"""
|
||||||
|
Converts order book data into price buckets for heatmap visualization.
|
||||||
|
|
||||||
|
Uses universal $1 USD buckets for all symbols to simplify logic.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, bucket_size: float = None):
|
||||||
|
"""
|
||||||
|
Initialize price bucketer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
bucket_size: Size of price buckets in USD (defaults to config value)
|
||||||
|
"""
|
||||||
|
self.bucket_size = bucket_size or config.get_bucket_size()
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.buckets_created = 0
|
||||||
|
self.total_volume_processed = 0.0
|
||||||
|
|
||||||
|
logger.info(f"Price bucketer initialized with ${self.bucket_size} buckets")
|
||||||
|
|
||||||
|
def create_price_buckets(self, orderbook: OrderBookSnapshot) -> PriceBuckets:
|
||||||
|
"""
|
||||||
|
Convert order book data to price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriceBuckets: Aggregated price bucket data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Create price buckets object
|
||||||
|
buckets = PriceBuckets(
|
||||||
|
symbol=orderbook.symbol,
|
||||||
|
timestamp=orderbook.timestamp,
|
||||||
|
bucket_size=self.bucket_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process bids (aggregate into buckets)
|
||||||
|
for bid in orderbook.bids:
|
||||||
|
if validate_price(bid.price) and validate_volume(bid.size):
|
||||||
|
buckets.add_bid(bid.price, bid.size)
|
||||||
|
self.total_volume_processed += bid.size
|
||||||
|
|
||||||
|
# Process asks (aggregate into buckets)
|
||||||
|
for ask in orderbook.asks:
|
||||||
|
if validate_price(ask.price) and validate_volume(ask.size):
|
||||||
|
buckets.add_ask(ask.price, ask.size)
|
||||||
|
self.total_volume_processed += ask.size
|
||||||
|
|
||||||
|
self.buckets_created += 1
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Created price buckets for {orderbook.symbol}: "
|
||||||
|
f"{len(buckets.bid_buckets)} bid buckets, {len(buckets.ask_buckets)} ask buckets"
|
||||||
|
)
|
||||||
|
|
||||||
|
return buckets
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error creating price buckets: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def aggregate_buckets(self, bucket_list: List[PriceBuckets]) -> PriceBuckets:
|
||||||
|
"""
|
||||||
|
Aggregate multiple price buckets into a single bucket set.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
bucket_list: List of price buckets to aggregate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriceBuckets: Aggregated buckets
|
||||||
|
"""
|
||||||
|
if not bucket_list:
|
||||||
|
raise ValueError("Cannot aggregate empty bucket list")
|
||||||
|
|
||||||
|
# Use first bucket as template
|
||||||
|
first_bucket = bucket_list[0]
|
||||||
|
aggregated = PriceBuckets(
|
||||||
|
symbol=first_bucket.symbol,
|
||||||
|
timestamp=first_bucket.timestamp,
|
||||||
|
bucket_size=self.bucket_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Aggregate all bid buckets
|
||||||
|
for buckets in bucket_list:
|
||||||
|
for price, volume in buckets.bid_buckets.items():
|
||||||
|
bucket_price = aggregated.get_bucket_price(price)
|
||||||
|
aggregated.bid_buckets[bucket_price] = (
|
||||||
|
aggregated.bid_buckets.get(bucket_price, 0) + volume
|
||||||
|
)
|
||||||
|
|
||||||
|
# Aggregate all ask buckets
|
||||||
|
for buckets in bucket_list:
|
||||||
|
for price, volume in buckets.ask_buckets.items():
|
||||||
|
bucket_price = aggregated.get_bucket_price(price)
|
||||||
|
aggregated.ask_buckets[bucket_price] = (
|
||||||
|
aggregated.ask_buckets.get(bucket_price, 0) + volume
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug(f"Aggregated {len(bucket_list)} bucket sets")
|
||||||
|
return aggregated
|
||||||
|
|
||||||
|
def get_bucket_price(self, price: float) -> float:
|
||||||
|
"""
|
||||||
|
Get the bucket price for a given price.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
price: Original price
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: Bucket price (rounded to bucket boundaries)
|
||||||
|
"""
|
||||||
|
return math.floor(price / self.bucket_size) * self.bucket_size
|
||||||
|
|
||||||
|
def get_bucket_range(self, center_price: float, depth: int) -> Tuple[float, float]:
|
||||||
|
"""
|
||||||
|
Get price range for buckets around a center price.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
center_price: Center price for the range
|
||||||
|
depth: Number of buckets on each side
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[float, float]: (min_price, max_price)
|
||||||
|
"""
|
||||||
|
half_range = depth * self.bucket_size
|
||||||
|
min_price = center_price - half_range
|
||||||
|
max_price = center_price + half_range
|
||||||
|
|
||||||
|
return (max(0, min_price), max_price)
|
||||||
|
|
||||||
|
def filter_buckets_by_range(self, buckets: PriceBuckets,
|
||||||
|
min_price: float, max_price: float) -> PriceBuckets:
|
||||||
|
"""
|
||||||
|
Filter buckets to only include those within a price range.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
buckets: Original price buckets
|
||||||
|
min_price: Minimum price to include
|
||||||
|
max_price: Maximum price to include
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriceBuckets: Filtered buckets
|
||||||
|
"""
|
||||||
|
filtered = PriceBuckets(
|
||||||
|
symbol=buckets.symbol,
|
||||||
|
timestamp=buckets.timestamp,
|
||||||
|
bucket_size=buckets.bucket_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter bid buckets
|
||||||
|
for price, volume in buckets.bid_buckets.items():
|
||||||
|
if min_price <= price <= max_price:
|
||||||
|
filtered.bid_buckets[price] = volume
|
||||||
|
|
||||||
|
# Filter ask buckets
|
||||||
|
for price, volume in buckets.ask_buckets.items():
|
||||||
|
if min_price <= price <= max_price:
|
||||||
|
filtered.ask_buckets[price] = volume
|
||||||
|
|
||||||
|
return filtered
|
||||||
|
|
||||||
|
def get_top_buckets(self, buckets: PriceBuckets, count: int) -> PriceBuckets:
|
||||||
|
"""
|
||||||
|
Get top N buckets by volume.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
buckets: Original price buckets
|
||||||
|
count: Number of top buckets to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriceBuckets: Top buckets by volume
|
||||||
|
"""
|
||||||
|
top_buckets = PriceBuckets(
|
||||||
|
symbol=buckets.symbol,
|
||||||
|
timestamp=buckets.timestamp,
|
||||||
|
bucket_size=buckets.bucket_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get top bid buckets
|
||||||
|
top_bids = sorted(
|
||||||
|
buckets.bid_buckets.items(),
|
||||||
|
key=lambda x: x[1], # Sort by volume
|
||||||
|
reverse=True
|
||||||
|
)[:count]
|
||||||
|
|
||||||
|
for price, volume in top_bids:
|
||||||
|
top_buckets.bid_buckets[price] = volume
|
||||||
|
|
||||||
|
# Get top ask buckets
|
||||||
|
top_asks = sorted(
|
||||||
|
buckets.ask_buckets.items(),
|
||||||
|
key=lambda x: x[1], # Sort by volume
|
||||||
|
reverse=True
|
||||||
|
)[:count]
|
||||||
|
|
||||||
|
for price, volume in top_asks:
|
||||||
|
top_buckets.ask_buckets[price] = volume
|
||||||
|
|
||||||
|
return top_buckets
|
||||||
|
|
||||||
|
def calculate_bucket_statistics(self, buckets: PriceBuckets) -> Dict[str, float]:
|
||||||
|
"""
|
||||||
|
Calculate statistics for price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
buckets: Price buckets to analyze
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, float]: Bucket statistics
|
||||||
|
"""
|
||||||
|
stats = {
|
||||||
|
'total_bid_buckets': len(buckets.bid_buckets),
|
||||||
|
'total_ask_buckets': len(buckets.ask_buckets),
|
||||||
|
'total_bid_volume': sum(buckets.bid_buckets.values()),
|
||||||
|
'total_ask_volume': sum(buckets.ask_buckets.values()),
|
||||||
|
'bid_price_range': 0.0,
|
||||||
|
'ask_price_range': 0.0,
|
||||||
|
'max_bid_volume': 0.0,
|
||||||
|
'max_ask_volume': 0.0,
|
||||||
|
'avg_bid_volume': 0.0,
|
||||||
|
'avg_ask_volume': 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate bid statistics
|
||||||
|
if buckets.bid_buckets:
|
||||||
|
bid_prices = list(buckets.bid_buckets.keys())
|
||||||
|
bid_volumes = list(buckets.bid_buckets.values())
|
||||||
|
|
||||||
|
stats['bid_price_range'] = max(bid_prices) - min(bid_prices)
|
||||||
|
stats['max_bid_volume'] = max(bid_volumes)
|
||||||
|
stats['avg_bid_volume'] = sum(bid_volumes) / len(bid_volumes)
|
||||||
|
|
||||||
|
# Calculate ask statistics
|
||||||
|
if buckets.ask_buckets:
|
||||||
|
ask_prices = list(buckets.ask_buckets.keys())
|
||||||
|
ask_volumes = list(buckets.ask_buckets.values())
|
||||||
|
|
||||||
|
stats['ask_price_range'] = max(ask_prices) - min(ask_prices)
|
||||||
|
stats['max_ask_volume'] = max(ask_volumes)
|
||||||
|
stats['avg_ask_volume'] = sum(ask_volumes) / len(ask_volumes)
|
||||||
|
|
||||||
|
# Calculate combined statistics
|
||||||
|
stats['total_volume'] = stats['total_bid_volume'] + stats['total_ask_volume']
|
||||||
|
stats['volume_imbalance'] = (
|
||||||
|
(stats['total_bid_volume'] - stats['total_ask_volume']) /
|
||||||
|
max(stats['total_volume'], 1e-10)
|
||||||
|
)
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
def merge_adjacent_buckets(self, buckets: PriceBuckets, merge_factor: int = 2) -> PriceBuckets:
|
||||||
|
"""
|
||||||
|
Merge adjacent buckets to create larger bucket sizes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
buckets: Original price buckets
|
||||||
|
merge_factor: Number of adjacent buckets to merge
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriceBuckets: Merged buckets with larger bucket size
|
||||||
|
"""
|
||||||
|
merged = PriceBuckets(
|
||||||
|
symbol=buckets.symbol,
|
||||||
|
timestamp=buckets.timestamp,
|
||||||
|
bucket_size=buckets.bucket_size * merge_factor
|
||||||
|
)
|
||||||
|
|
||||||
|
# Merge bid buckets
|
||||||
|
bid_groups = defaultdict(float)
|
||||||
|
for price, volume in buckets.bid_buckets.items():
|
||||||
|
# Calculate new bucket price
|
||||||
|
new_bucket_price = merged.get_bucket_price(price)
|
||||||
|
bid_groups[new_bucket_price] += volume
|
||||||
|
|
||||||
|
merged.bid_buckets = dict(bid_groups)
|
||||||
|
|
||||||
|
# Merge ask buckets
|
||||||
|
ask_groups = defaultdict(float)
|
||||||
|
for price, volume in buckets.ask_buckets.items():
|
||||||
|
# Calculate new bucket price
|
||||||
|
new_bucket_price = merged.get_bucket_price(price)
|
||||||
|
ask_groups[new_bucket_price] += volume
|
||||||
|
|
||||||
|
merged.ask_buckets = dict(ask_groups)
|
||||||
|
|
||||||
|
logger.debug(f"Merged buckets with factor {merge_factor}")
|
||||||
|
return merged
|
||||||
|
|
||||||
|
def get_bucket_depth_profile(self, buckets: PriceBuckets,
|
||||||
|
center_price: float) -> Dict[str, List[Tuple[float, float]]]:
|
||||||
|
"""
|
||||||
|
Get depth profile showing volume at different distances from center price.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
buckets: Price buckets
|
||||||
|
center_price: Center price for depth calculation
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Depth profile with 'bids' and 'asks' lists of (distance, volume) tuples
|
||||||
|
"""
|
||||||
|
profile = {'bids': [], 'asks': []}
|
||||||
|
|
||||||
|
# Calculate bid depth profile
|
||||||
|
for price, volume in buckets.bid_buckets.items():
|
||||||
|
distance = abs(center_price - price)
|
||||||
|
profile['bids'].append((distance, volume))
|
||||||
|
|
||||||
|
# Calculate ask depth profile
|
||||||
|
for price, volume in buckets.ask_buckets.items():
|
||||||
|
distance = abs(price - center_price)
|
||||||
|
profile['asks'].append((distance, volume))
|
||||||
|
|
||||||
|
# Sort by distance
|
||||||
|
profile['bids'].sort(key=lambda x: x[0])
|
||||||
|
profile['asks'].sort(key=lambda x: x[0])
|
||||||
|
|
||||||
|
return profile
|
||||||
|
|
||||||
|
def get_processing_stats(self) -> Dict[str, float]:
|
||||||
|
"""Get processing statistics"""
|
||||||
|
return {
|
||||||
|
'bucket_size': self.bucket_size,
|
||||||
|
'buckets_created': self.buckets_created,
|
||||||
|
'total_volume_processed': self.total_volume_processed,
|
||||||
|
'avg_volume_per_bucket': (
|
||||||
|
self.total_volume_processed / max(self.buckets_created, 1)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
def reset_stats(self) -> None:
|
||||||
|
"""Reset processing statistics"""
|
||||||
|
self.buckets_created = 0
|
||||||
|
self.total_volume_processed = 0.0
|
||||||
|
logger.info("Price bucketer statistics reset")
|
||||||
9
COBY/api/__init__.py
Normal file
9
COBY/api/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
"""
|
||||||
|
API layer for the COBY system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .rest_api import create_app
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'create_app'
|
||||||
|
]
|
||||||
97
COBY/api/rate_limiter.py
Normal file
97
COBY/api/rate_limiter.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
"""
|
||||||
|
Simple rate limiter for API requests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimiter:
|
||||||
|
"""Simple rate limiter implementation"""
|
||||||
|
|
||||||
|
def __init__(self, requests_per_minute: int = 100, burst_size: int = 20):
|
||||||
|
self.requests_per_minute = requests_per_minute
|
||||||
|
self.burst_size = burst_size
|
||||||
|
self.requests: Dict[str, list] = defaultdict(list)
|
||||||
|
|
||||||
|
def is_allowed(self, client_id: str) -> bool:
|
||||||
|
"""Check if request is allowed for client"""
|
||||||
|
now = time.time()
|
||||||
|
minute_ago = now - 60
|
||||||
|
|
||||||
|
# Clean old requests
|
||||||
|
self.requests[client_id] = [
|
||||||
|
req_time for req_time in self.requests[client_id]
|
||||||
|
if req_time > minute_ago
|
||||||
|
]
|
||||||
|
|
||||||
|
# Check rate limit
|
||||||
|
if len(self.requests[client_id]) >= self.requests_per_minute:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Add current request
|
||||||
|
self.requests[client_id].append(now)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_client_stats(self, client_id: str) -> Dict:
|
||||||
|
"""Get rate limiting stats for a specific client"""
|
||||||
|
now = time.time()
|
||||||
|
minute_ago = now - 60
|
||||||
|
|
||||||
|
# Clean old requests
|
||||||
|
self.requests[client_id] = [
|
||||||
|
req_time for req_time in self.requests[client_id]
|
||||||
|
if req_time > minute_ago
|
||||||
|
]
|
||||||
|
|
||||||
|
current_requests = len(self.requests[client_id])
|
||||||
|
remaining_tokens = max(0, self.requests_per_minute - current_requests)
|
||||||
|
|
||||||
|
# Calculate reset time (next minute boundary)
|
||||||
|
reset_time = int(now) + (60 - int(now) % 60)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'client_id': client_id,
|
||||||
|
'current_requests': current_requests,
|
||||||
|
'remaining_tokens': remaining_tokens,
|
||||||
|
'requests_per_minute': self.requests_per_minute,
|
||||||
|
'reset_time': reset_time,
|
||||||
|
'window_start': minute_ago,
|
||||||
|
'window_end': now
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_global_stats(self) -> Dict:
|
||||||
|
"""Get global rate limiting statistics"""
|
||||||
|
now = time.time()
|
||||||
|
minute_ago = now - 60
|
||||||
|
|
||||||
|
total_clients = len(self.requests)
|
||||||
|
total_requests = 0
|
||||||
|
active_clients = 0
|
||||||
|
|
||||||
|
for client_id in list(self.requests.keys()):
|
||||||
|
# Clean old requests
|
||||||
|
self.requests[client_id] = [
|
||||||
|
req_time for req_time in self.requests[client_id]
|
||||||
|
if req_time > minute_ago
|
||||||
|
]
|
||||||
|
|
||||||
|
client_requests = len(self.requests[client_id])
|
||||||
|
total_requests += client_requests
|
||||||
|
|
||||||
|
if client_requests > 0:
|
||||||
|
active_clients += 1
|
||||||
|
|
||||||
|
# Remove clients with no recent requests
|
||||||
|
if client_requests == 0:
|
||||||
|
del self.requests[client_id]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'total_clients': total_clients,
|
||||||
|
'active_clients': active_clients,
|
||||||
|
'total_requests_last_minute': total_requests,
|
||||||
|
'requests_per_minute_limit': self.requests_per_minute,
|
||||||
|
'burst_size': self.burst_size,
|
||||||
|
'window_duration': 60
|
||||||
|
}
|
||||||
306
COBY/api/replay_api.py
Normal file
306
COBY/api/replay_api.py
Normal file
@@ -0,0 +1,306 @@
|
|||||||
|
"""
|
||||||
|
REST API endpoints for historical data replay functionality.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, HTTPException, Query, Path
|
||||||
|
from typing import Optional, List, Dict, Any
|
||||||
|
from datetime import datetime
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from ..replay.replay_manager import HistoricalReplayManager
|
||||||
|
from ..models.core import ReplayStatus
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ReplayError, ValidationError
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CreateReplayRequest(BaseModel):
|
||||||
|
"""Request model for creating replay session"""
|
||||||
|
start_time: datetime = Field(..., description="Replay start time")
|
||||||
|
end_time: datetime = Field(..., description="Replay end time")
|
||||||
|
speed: float = Field(1.0, gt=0, le=100, description="Playback speed multiplier")
|
||||||
|
symbols: Optional[List[str]] = Field(None, description="Symbols to replay")
|
||||||
|
exchanges: Optional[List[str]] = Field(None, description="Exchanges to replay")
|
||||||
|
|
||||||
|
|
||||||
|
class ReplayControlRequest(BaseModel):
|
||||||
|
"""Request model for replay control operations"""
|
||||||
|
action: str = Field(..., description="Control action: start, pause, resume, stop")
|
||||||
|
|
||||||
|
|
||||||
|
class SeekRequest(BaseModel):
|
||||||
|
"""Request model for seeking in replay"""
|
||||||
|
timestamp: datetime = Field(..., description="Target timestamp")
|
||||||
|
|
||||||
|
|
||||||
|
class SpeedRequest(BaseModel):
|
||||||
|
"""Request model for changing replay speed"""
|
||||||
|
speed: float = Field(..., gt=0, le=100, description="New playback speed")
|
||||||
|
|
||||||
|
|
||||||
|
def create_replay_router(replay_manager: HistoricalReplayManager) -> APIRouter:
|
||||||
|
"""Create replay API router with endpoints"""
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/replay", tags=["replay"])
|
||||||
|
|
||||||
|
@router.post("/sessions", response_model=Dict[str, str])
|
||||||
|
async def create_replay_session(request: CreateReplayRequest):
|
||||||
|
"""Create a new replay session"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
session_id = replay_manager.create_replay_session(
|
||||||
|
start_time=request.start_time,
|
||||||
|
end_time=request.end_time,
|
||||||
|
speed=request.speed,
|
||||||
|
symbols=request.symbols,
|
||||||
|
exchanges=request.exchanges
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Created replay session {session_id}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"session_id": session_id,
|
||||||
|
"status": "created",
|
||||||
|
"message": "Replay session created successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
except ValidationError as e:
|
||||||
|
logger.warning(f"Invalid replay request: {e}")
|
||||||
|
raise HTTPException(status_code=400, detail=str(e))
|
||||||
|
except ReplayError as e:
|
||||||
|
logger.error(f"Replay creation failed: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error creating replay session: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.get("/sessions", response_model=List[Dict[str, Any]])
|
||||||
|
async def list_replay_sessions():
|
||||||
|
"""List all replay sessions"""
|
||||||
|
try:
|
||||||
|
sessions = replay_manager.list_replay_sessions()
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"start_time": session.start_time.isoformat(),
|
||||||
|
"end_time": session.end_time.isoformat(),
|
||||||
|
"current_time": session.current_time.isoformat(),
|
||||||
|
"speed": session.speed,
|
||||||
|
"status": session.status.value,
|
||||||
|
"symbols": session.symbols,
|
||||||
|
"exchanges": session.exchanges,
|
||||||
|
"progress": session.progress,
|
||||||
|
"events_replayed": session.events_replayed,
|
||||||
|
"total_events": session.total_events,
|
||||||
|
"created_at": session.created_at.isoformat(),
|
||||||
|
"started_at": session.started_at.isoformat() if session.started_at else None,
|
||||||
|
"error_message": getattr(session, 'error_message', None)
|
||||||
|
}
|
||||||
|
for session in sessions
|
||||||
|
]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error listing replay sessions: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.get("/sessions/{session_id}", response_model=Dict[str, Any])
|
||||||
|
async def get_replay_session(session_id: str = Path(..., description="Session ID")):
|
||||||
|
"""Get replay session details"""
|
||||||
|
try:
|
||||||
|
session = replay_manager.get_replay_status(session_id)
|
||||||
|
|
||||||
|
if not session:
|
||||||
|
raise HTTPException(status_code=404, detail="Session not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"start_time": session.start_time.isoformat(),
|
||||||
|
"end_time": session.end_time.isoformat(),
|
||||||
|
"current_time": session.current_time.isoformat(),
|
||||||
|
"speed": session.speed,
|
||||||
|
"status": session.status.value,
|
||||||
|
"symbols": session.symbols,
|
||||||
|
"exchanges": session.exchanges,
|
||||||
|
"progress": session.progress,
|
||||||
|
"events_replayed": session.events_replayed,
|
||||||
|
"total_events": session.total_events,
|
||||||
|
"created_at": session.created_at.isoformat(),
|
||||||
|
"started_at": session.started_at.isoformat() if session.started_at else None,
|
||||||
|
"paused_at": session.paused_at.isoformat() if session.paused_at else None,
|
||||||
|
"stopped_at": session.stopped_at.isoformat() if session.stopped_at else None,
|
||||||
|
"completed_at": session.completed_at.isoformat() if session.completed_at else None,
|
||||||
|
"error_message": getattr(session, 'error_message', None)
|
||||||
|
}
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting replay session {session_id}: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.post("/sessions/{session_id}/control", response_model=Dict[str, str])
|
||||||
|
async def control_replay_session(
|
||||||
|
session_id: str = Path(..., description="Session ID"),
|
||||||
|
request: ReplayControlRequest = None
|
||||||
|
):
|
||||||
|
"""Control replay session (start, pause, resume, stop)"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
if not request:
|
||||||
|
raise HTTPException(status_code=400, detail="Control action required")
|
||||||
|
|
||||||
|
action = request.action.lower()
|
||||||
|
|
||||||
|
if action == "start":
|
||||||
|
await replay_manager.start_replay(session_id)
|
||||||
|
message = "Replay started"
|
||||||
|
elif action == "pause":
|
||||||
|
await replay_manager.pause_replay(session_id)
|
||||||
|
message = "Replay paused"
|
||||||
|
elif action == "resume":
|
||||||
|
await replay_manager.resume_replay(session_id)
|
||||||
|
message = "Replay resumed"
|
||||||
|
elif action == "stop":
|
||||||
|
await replay_manager.stop_replay(session_id)
|
||||||
|
message = "Replay stopped"
|
||||||
|
else:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid action")
|
||||||
|
|
||||||
|
logger.info(f"Replay session {session_id} action: {action}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"session_id": session_id,
|
||||||
|
"action": action,
|
||||||
|
"message": message
|
||||||
|
}
|
||||||
|
|
||||||
|
except ReplayError as e:
|
||||||
|
logger.error(f"Replay control failed for {session_id}: {e}")
|
||||||
|
raise HTTPException(status_code=400, detail=str(e))
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error controlling replay {session_id}: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.post("/sessions/{session_id}/seek", response_model=Dict[str, str])
|
||||||
|
async def seek_replay_session(
|
||||||
|
session_id: str = Path(..., description="Session ID"),
|
||||||
|
request: SeekRequest = None
|
||||||
|
):
|
||||||
|
"""Seek to specific timestamp in replay"""
|
||||||
|
try:
|
||||||
|
if not request:
|
||||||
|
raise HTTPException(status_code=400, detail="Timestamp required")
|
||||||
|
|
||||||
|
success = replay_manager.seek_replay(session_id, request.timestamp)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
raise HTTPException(status_code=400, detail="Seek failed")
|
||||||
|
|
||||||
|
logger.info(f"Seeked replay session {session_id} to {request.timestamp}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"session_id": session_id,
|
||||||
|
"timestamp": request.timestamp.isoformat(),
|
||||||
|
"message": "Seek successful"
|
||||||
|
}
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error seeking replay session {session_id}: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.post("/sessions/{session_id}/speed", response_model=Dict[str, Any])
|
||||||
|
async def set_replay_speed(
|
||||||
|
session_id: str = Path(..., description="Session ID"),
|
||||||
|
request: SpeedRequest = None
|
||||||
|
):
|
||||||
|
"""Change replay speed"""
|
||||||
|
try:
|
||||||
|
if not request:
|
||||||
|
raise HTTPException(status_code=400, detail="Speed required")
|
||||||
|
|
||||||
|
success = replay_manager.set_replay_speed(session_id, request.speed)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
raise HTTPException(status_code=400, detail="Speed change failed")
|
||||||
|
|
||||||
|
logger.info(f"Set replay speed to {request.speed}x for session {session_id}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"session_id": session_id,
|
||||||
|
"speed": request.speed,
|
||||||
|
"message": "Speed changed successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error setting replay speed for {session_id}: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.delete("/sessions/{session_id}", response_model=Dict[str, str])
|
||||||
|
async def delete_replay_session(session_id: str = Path(..., description="Session ID")):
|
||||||
|
"""Delete replay session"""
|
||||||
|
try:
|
||||||
|
success = replay_manager.delete_replay_session(session_id)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
raise HTTPException(status_code=404, detail="Session not found")
|
||||||
|
|
||||||
|
logger.info(f"Deleted replay session {session_id}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"session_id": session_id,
|
||||||
|
"message": "Session deleted successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error deleting replay session {session_id}: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.get("/data-range/{symbol}", response_model=Dict[str, Any])
|
||||||
|
async def get_data_range(
|
||||||
|
symbol: str = Path(..., description="Trading symbol"),
|
||||||
|
exchange: Optional[str] = Query(None, description="Exchange name")
|
||||||
|
):
|
||||||
|
"""Get available data time range for a symbol"""
|
||||||
|
try:
|
||||||
|
data_range = await replay_manager.get_available_data_range(symbol, exchange)
|
||||||
|
|
||||||
|
if not data_range:
|
||||||
|
raise HTTPException(status_code=404, detail="No data available for symbol")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"symbol": symbol,
|
||||||
|
"exchange": exchange,
|
||||||
|
"start_time": data_range['start'].isoformat(),
|
||||||
|
"end_time": data_range['end'].isoformat(),
|
||||||
|
"duration_days": (data_range['end'] - data_range['start']).days
|
||||||
|
}
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting data range for {symbol}: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
@router.get("/stats", response_model=Dict[str, Any])
|
||||||
|
async def get_replay_stats():
|
||||||
|
"""Get replay system statistics"""
|
||||||
|
try:
|
||||||
|
return replay_manager.get_stats()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting replay stats: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Internal server error")
|
||||||
|
|
||||||
|
return router
|
||||||
435
COBY/api/replay_websocket.py
Normal file
435
COBY/api/replay_websocket.py
Normal file
@@ -0,0 +1,435 @@
|
|||||||
|
"""
|
||||||
|
WebSocket server for real-time replay data streaming.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Set, Optional, Any
|
||||||
|
from fastapi import WebSocket, WebSocketDisconnect
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from ..replay.replay_manager import HistoricalReplayManager
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, ReplayStatus
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ReplayError
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ReplayWebSocketManager:
|
||||||
|
"""
|
||||||
|
WebSocket manager for replay data streaming.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- Real-time replay data streaming
|
||||||
|
- Session-based connections
|
||||||
|
- Automatic cleanup on disconnect
|
||||||
|
- Status updates
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, replay_manager: HistoricalReplayManager):
|
||||||
|
"""
|
||||||
|
Initialize WebSocket manager.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
replay_manager: Replay manager instance
|
||||||
|
"""
|
||||||
|
self.replay_manager = replay_manager
|
||||||
|
|
||||||
|
# Connection management
|
||||||
|
self.connections: Dict[str, Set[WebSocket]] = {} # session_id -> websockets
|
||||||
|
self.websocket_sessions: Dict[WebSocket, str] = {} # websocket -> session_id
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.stats = {
|
||||||
|
'active_connections': 0,
|
||||||
|
'total_connections': 0,
|
||||||
|
'messages_sent': 0,
|
||||||
|
'connection_errors': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Replay WebSocket manager initialized")
|
||||||
|
|
||||||
|
async def connect_to_session(self, websocket: WebSocket, session_id: str) -> bool:
|
||||||
|
"""
|
||||||
|
Connect WebSocket to a replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
websocket: WebSocket connection
|
||||||
|
session_id: Replay session ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if connected successfully, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Check if session exists
|
||||||
|
session = self.replay_manager.get_replay_status(session_id)
|
||||||
|
if not session:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": f"Session {session_id} not found"
|
||||||
|
})
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Accept WebSocket connection
|
||||||
|
await websocket.accept()
|
||||||
|
|
||||||
|
# Add to connection tracking
|
||||||
|
if session_id not in self.connections:
|
||||||
|
self.connections[session_id] = set()
|
||||||
|
|
||||||
|
self.connections[session_id].add(websocket)
|
||||||
|
self.websocket_sessions[websocket] = session_id
|
||||||
|
|
||||||
|
# Update statistics
|
||||||
|
self.stats['active_connections'] += 1
|
||||||
|
self.stats['total_connections'] += 1
|
||||||
|
|
||||||
|
# Add callbacks to replay session
|
||||||
|
self.replay_manager.add_data_callback(session_id, self._data_callback)
|
||||||
|
self.replay_manager.add_status_callback(session_id, self._status_callback)
|
||||||
|
|
||||||
|
# Send initial session status
|
||||||
|
await self._send_session_status(websocket, session)
|
||||||
|
|
||||||
|
logger.info(f"WebSocket connected to replay session {session_id}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to connect WebSocket to session {session_id}: {e}")
|
||||||
|
self.stats['connection_errors'] += 1
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def disconnect(self, websocket: WebSocket) -> None:
|
||||||
|
"""
|
||||||
|
Disconnect WebSocket and cleanup.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
websocket: WebSocket connection to disconnect
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
session_id = self.websocket_sessions.get(websocket)
|
||||||
|
|
||||||
|
if session_id:
|
||||||
|
# Remove from connection tracking
|
||||||
|
if session_id in self.connections:
|
||||||
|
self.connections[session_id].discard(websocket)
|
||||||
|
|
||||||
|
# Clean up empty session connections
|
||||||
|
if not self.connections[session_id]:
|
||||||
|
del self.connections[session_id]
|
||||||
|
|
||||||
|
del self.websocket_sessions[websocket]
|
||||||
|
|
||||||
|
# Update statistics
|
||||||
|
self.stats['active_connections'] -= 1
|
||||||
|
|
||||||
|
logger.info(f"WebSocket disconnected from replay session {session_id}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during WebSocket disconnect: {e}")
|
||||||
|
|
||||||
|
async def handle_websocket_messages(self, websocket: WebSocket) -> None:
|
||||||
|
"""
|
||||||
|
Handle incoming WebSocket messages.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
websocket: WebSocket connection
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
# Receive message
|
||||||
|
message = await websocket.receive_json()
|
||||||
|
|
||||||
|
# Process message
|
||||||
|
await self._process_websocket_message(websocket, message)
|
||||||
|
|
||||||
|
except WebSocketDisconnect:
|
||||||
|
logger.info("WebSocket disconnected")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"WebSocket message handling error: {e}")
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Message processing error"
|
||||||
|
})
|
||||||
|
|
||||||
|
async def _process_websocket_message(self, websocket: WebSocket, message: Dict[str, Any]) -> None:
|
||||||
|
"""
|
||||||
|
Process incoming WebSocket message.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
websocket: WebSocket connection
|
||||||
|
message: Received message
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
message_type = message.get('type')
|
||||||
|
session_id = self.websocket_sessions.get(websocket)
|
||||||
|
|
||||||
|
if not session_id:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Not connected to any session"
|
||||||
|
})
|
||||||
|
return
|
||||||
|
|
||||||
|
if message_type == "control":
|
||||||
|
await self._handle_control_message(websocket, session_id, message)
|
||||||
|
elif message_type == "seek":
|
||||||
|
await self._handle_seek_message(websocket, session_id, message)
|
||||||
|
elif message_type == "speed":
|
||||||
|
await self._handle_speed_message(websocket, session_id, message)
|
||||||
|
elif message_type == "status":
|
||||||
|
await self._handle_status_request(websocket, session_id)
|
||||||
|
else:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": f"Unknown message type: {message_type}"
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing WebSocket message: {e}")
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Message processing failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
async def _handle_control_message(self, websocket: WebSocket, session_id: str,
|
||||||
|
message: Dict[str, Any]) -> None:
|
||||||
|
"""Handle replay control messages."""
|
||||||
|
try:
|
||||||
|
action = message.get('action')
|
||||||
|
|
||||||
|
if action == "start":
|
||||||
|
await self.replay_manager.start_replay(session_id)
|
||||||
|
elif action == "pause":
|
||||||
|
await self.replay_manager.pause_replay(session_id)
|
||||||
|
elif action == "resume":
|
||||||
|
await self.replay_manager.resume_replay(session_id)
|
||||||
|
elif action == "stop":
|
||||||
|
await self.replay_manager.stop_replay(session_id)
|
||||||
|
else:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": f"Invalid control action: {action}"
|
||||||
|
})
|
||||||
|
return
|
||||||
|
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "control_response",
|
||||||
|
"action": action,
|
||||||
|
"status": "success"
|
||||||
|
})
|
||||||
|
|
||||||
|
except ReplayError as e:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": str(e)
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Control message error: {e}")
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Control action failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
async def _handle_seek_message(self, websocket: WebSocket, session_id: str,
|
||||||
|
message: Dict[str, Any]) -> None:
|
||||||
|
"""Handle seek messages."""
|
||||||
|
try:
|
||||||
|
timestamp_str = message.get('timestamp')
|
||||||
|
if not timestamp_str:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Timestamp required for seek"
|
||||||
|
})
|
||||||
|
return
|
||||||
|
|
||||||
|
timestamp = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
||||||
|
success = self.replay_manager.seek_replay(session_id, timestamp)
|
||||||
|
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "seek_response",
|
||||||
|
"timestamp": timestamp_str,
|
||||||
|
"status": "success" if success else "failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Seek message error: {e}")
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Seek failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
async def _handle_speed_message(self, websocket: WebSocket, session_id: str,
|
||||||
|
message: Dict[str, Any]) -> None:
|
||||||
|
"""Handle speed change messages."""
|
||||||
|
try:
|
||||||
|
speed = message.get('speed')
|
||||||
|
if not speed or speed <= 0:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Valid speed required"
|
||||||
|
})
|
||||||
|
return
|
||||||
|
|
||||||
|
success = self.replay_manager.set_replay_speed(session_id, speed)
|
||||||
|
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "speed_response",
|
||||||
|
"speed": speed,
|
||||||
|
"status": "success" if success else "failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Speed message error: {e}")
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Speed change failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
async def _handle_status_request(self, websocket: WebSocket, session_id: str) -> None:
|
||||||
|
"""Handle status request messages."""
|
||||||
|
try:
|
||||||
|
session = self.replay_manager.get_replay_status(session_id)
|
||||||
|
if session:
|
||||||
|
await self._send_session_status(websocket, session)
|
||||||
|
else:
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Session not found"
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Status request error: {e}")
|
||||||
|
await websocket.send_json({
|
||||||
|
"type": "error",
|
||||||
|
"message": "Status request failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
async def _data_callback(self, data) -> None:
|
||||||
|
"""Callback for replay data - broadcasts to all connected WebSockets."""
|
||||||
|
try:
|
||||||
|
# Determine which session this data belongs to
|
||||||
|
# This is a simplified approach - in practice, you'd need to track
|
||||||
|
# which session generated this callback
|
||||||
|
|
||||||
|
# Serialize data
|
||||||
|
if isinstance(data, OrderBookSnapshot):
|
||||||
|
message = {
|
||||||
|
"type": "orderbook",
|
||||||
|
"data": {
|
||||||
|
"symbol": data.symbol,
|
||||||
|
"exchange": data.exchange,
|
||||||
|
"timestamp": data.timestamp.isoformat(),
|
||||||
|
"bids": [{"price": b.price, "size": b.size} for b in data.bids[:10]],
|
||||||
|
"asks": [{"price": a.price, "size": a.size} for a in data.asks[:10]],
|
||||||
|
"sequence_id": data.sequence_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elif isinstance(data, TradeEvent):
|
||||||
|
message = {
|
||||||
|
"type": "trade",
|
||||||
|
"data": {
|
||||||
|
"symbol": data.symbol,
|
||||||
|
"exchange": data.exchange,
|
||||||
|
"timestamp": data.timestamp.isoformat(),
|
||||||
|
"price": data.price,
|
||||||
|
"size": data.size,
|
||||||
|
"side": data.side,
|
||||||
|
"trade_id": data.trade_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Broadcast to all connections
|
||||||
|
await self._broadcast_message(message)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Data callback error: {e}")
|
||||||
|
|
||||||
|
async def _status_callback(self, session_id: str, status: ReplayStatus) -> None:
|
||||||
|
"""Callback for replay status changes."""
|
||||||
|
try:
|
||||||
|
message = {
|
||||||
|
"type": "status",
|
||||||
|
"session_id": session_id,
|
||||||
|
"status": status.value,
|
||||||
|
"timestamp": datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send to connections for this session
|
||||||
|
if session_id in self.connections:
|
||||||
|
await self._broadcast_to_session(session_id, message)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Status callback error: {e}")
|
||||||
|
|
||||||
|
async def _send_session_status(self, websocket: WebSocket, session) -> None:
|
||||||
|
"""Send session status to WebSocket."""
|
||||||
|
try:
|
||||||
|
message = {
|
||||||
|
"type": "session_status",
|
||||||
|
"data": {
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"status": session.status.value,
|
||||||
|
"progress": session.progress,
|
||||||
|
"current_time": session.current_time.isoformat(),
|
||||||
|
"speed": session.speed,
|
||||||
|
"events_replayed": session.events_replayed,
|
||||||
|
"total_events": session.total_events
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await websocket.send_json(message)
|
||||||
|
self.stats['messages_sent'] += 1
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending session status: {e}")
|
||||||
|
|
||||||
|
async def _broadcast_message(self, message: Dict[str, Any]) -> None:
|
||||||
|
"""Broadcast message to all connected WebSockets."""
|
||||||
|
disconnected = []
|
||||||
|
|
||||||
|
for session_id, websockets in self.connections.items():
|
||||||
|
for websocket in websockets.copy():
|
||||||
|
try:
|
||||||
|
await websocket.send_json(message)
|
||||||
|
self.stats['messages_sent'] += 1
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to send message to WebSocket: {e}")
|
||||||
|
disconnected.append((session_id, websocket))
|
||||||
|
|
||||||
|
# Clean up disconnected WebSockets
|
||||||
|
for session_id, websocket in disconnected:
|
||||||
|
await self.disconnect(websocket)
|
||||||
|
|
||||||
|
async def _broadcast_to_session(self, session_id: str, message: Dict[str, Any]) -> None:
|
||||||
|
"""Broadcast message to WebSockets connected to a specific session."""
|
||||||
|
if session_id not in self.connections:
|
||||||
|
return
|
||||||
|
|
||||||
|
disconnected = []
|
||||||
|
|
||||||
|
for websocket in self.connections[session_id].copy():
|
||||||
|
try:
|
||||||
|
await websocket.send_json(message)
|
||||||
|
self.stats['messages_sent'] += 1
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to send message to WebSocket: {e}")
|
||||||
|
disconnected.append(websocket)
|
||||||
|
|
||||||
|
# Clean up disconnected WebSockets
|
||||||
|
for websocket in disconnected:
|
||||||
|
await self.disconnect(websocket)
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get WebSocket manager statistics."""
|
||||||
|
return {
|
||||||
|
**self.stats,
|
||||||
|
'sessions_with_connections': len(self.connections),
|
||||||
|
'total_websockets': sum(len(ws_set) for ws_set in self.connections.values())
|
||||||
|
}
|
||||||
172
COBY/api/response_formatter.py
Normal file
172
COBY/api/response_formatter.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
"""
|
||||||
|
Response formatter for API responses.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
|
class ResponseFormatter:
|
||||||
|
"""Format API responses consistently"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.stats = {
|
||||||
|
'responses_formatted': 0,
|
||||||
|
'errors_formatted': 0,
|
||||||
|
'success_responses': 0,
|
||||||
|
'created_at': datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
def success(self, data: Any, message: str = "Success", metadata: Optional[Dict] = None) -> Dict[str, Any]:
|
||||||
|
"""Format success response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['success_responses'] += 1
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"status": "success",
|
||||||
|
"message": message,
|
||||||
|
"data": data,
|
||||||
|
"timestamp": datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata:
|
||||||
|
response["metadata"] = metadata
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
def error(self, message: str, code: str = "ERROR", details: Optional[Dict] = None) -> Dict[str, Any]:
|
||||||
|
"""Format error response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['errors_formatted'] += 1
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"status": "error",
|
||||||
|
"message": message,
|
||||||
|
"code": code,
|
||||||
|
"timestamp": datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
if details:
|
||||||
|
response["details"] = details
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
def health(self, healthy: bool = True, components: Optional[Dict] = None) -> Dict[str, Any]:
|
||||||
|
"""Format health check response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "healthy" if healthy else "unhealthy",
|
||||||
|
"timestamp": datetime.utcnow().isoformat(),
|
||||||
|
"components": components or {}
|
||||||
|
}
|
||||||
|
|
||||||
|
def rate_limit_error(self, client_stats: Dict) -> Dict[str, Any]:
|
||||||
|
"""Format rate limit error response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['errors_formatted'] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"message": "Rate limit exceeded",
|
||||||
|
"code": "RATE_LIMIT_EXCEEDED",
|
||||||
|
"timestamp": datetime.utcnow().isoformat(),
|
||||||
|
"details": {
|
||||||
|
"remaining_tokens": client_stats.get('remaining_tokens', 0),
|
||||||
|
"reset_time": client_stats.get('reset_time', 0),
|
||||||
|
"requests_per_minute": client_stats.get('requests_per_minute', 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def validation_error(self, field: str, message: str) -> Dict[str, Any]:
|
||||||
|
"""Format validation error response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['errors_formatted'] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"message": f"Validation error: {message}",
|
||||||
|
"code": "VALIDATION_ERROR",
|
||||||
|
"timestamp": datetime.utcnow().isoformat(),
|
||||||
|
"details": {
|
||||||
|
"field": field,
|
||||||
|
"validation_message": message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def status_response(self, data: Dict) -> Dict[str, Any]:
|
||||||
|
"""Format status response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['success_responses'] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"message": "System status",
|
||||||
|
"data": data,
|
||||||
|
"timestamp": datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
def heatmap_response(self, heatmap_data: Any, symbol: str, exchange: Optional[str] = None) -> Dict[str, Any]:
|
||||||
|
"""Format heatmap response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['success_responses'] += 1
|
||||||
|
|
||||||
|
if not heatmap_data:
|
||||||
|
return self.error("Heatmap data not found", "HEATMAP_NOT_FOUND")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"message": f"Heatmap data for {symbol}",
|
||||||
|
"data": {
|
||||||
|
"symbol": symbol,
|
||||||
|
"exchange": exchange or "consolidated",
|
||||||
|
"heatmap": heatmap_data
|
||||||
|
},
|
||||||
|
"timestamp": datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
def orderbook_response(self, orderbook_data: Any, symbol: str, exchange: str) -> Dict[str, Any]:
|
||||||
|
"""Format order book response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['success_responses'] += 1
|
||||||
|
|
||||||
|
if not orderbook_data:
|
||||||
|
return self.error("Order book data not found", "ORDERBOOK_NOT_FOUND")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"message": f"Order book data for {symbol}@{exchange}",
|
||||||
|
"data": {
|
||||||
|
"symbol": symbol,
|
||||||
|
"exchange": exchange,
|
||||||
|
"orderbook": orderbook_data
|
||||||
|
},
|
||||||
|
"timestamp": datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
def metrics_response(self, metrics_data: Any, symbol: str, exchange: str) -> Dict[str, Any]:
|
||||||
|
"""Format metrics response"""
|
||||||
|
self.stats['responses_formatted'] += 1
|
||||||
|
self.stats['success_responses'] += 1
|
||||||
|
|
||||||
|
if not metrics_data:
|
||||||
|
return self.error("Metrics data not found", "METRICS_NOT_FOUND")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"message": f"Metrics data for {symbol}@{exchange}",
|
||||||
|
"data": {
|
||||||
|
"symbol": symbol,
|
||||||
|
"exchange": exchange,
|
||||||
|
"metrics": metrics_data
|
||||||
|
},
|
||||||
|
"timestamp": datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get formatter statistics"""
|
||||||
|
return {
|
||||||
|
**self.stats,
|
||||||
|
'uptime_seconds': (datetime.utcnow() - datetime.fromisoformat(self.stats['created_at'])).total_seconds(),
|
||||||
|
'error_rate': self.stats['errors_formatted'] / max(1, self.stats['responses_formatted'])
|
||||||
|
}
|
||||||
511
COBY/api/rest_api.py
Normal file
511
COBY/api/rest_api.py
Normal file
@@ -0,0 +1,511 @@
|
|||||||
|
"""
|
||||||
|
REST API server for COBY system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import FastAPI, HTTPException, Request, Query, Path, WebSocket, WebSocketDisconnect
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
from fastapi.responses import JSONResponse, HTMLResponse
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from typing import Optional, List
|
||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from simple_config import config
|
||||||
|
from caching.redis_manager import redis_manager
|
||||||
|
from utils.logging import get_logger, set_correlation_id
|
||||||
|
from utils.validation import validate_symbol
|
||||||
|
from api.rate_limiter import RateLimiter
|
||||||
|
from api.response_formatter import ResponseFormatter
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionManager:
|
||||||
|
"""Manage WebSocket connections for dashboard updates"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.active_connections: List[WebSocket] = []
|
||||||
|
|
||||||
|
async def connect(self, websocket: WebSocket):
|
||||||
|
await websocket.accept()
|
||||||
|
self.active_connections.append(websocket)
|
||||||
|
logger.info(f"WebSocket client connected. Total connections: {len(self.active_connections)}")
|
||||||
|
|
||||||
|
def disconnect(self, websocket: WebSocket):
|
||||||
|
if websocket in self.active_connections:
|
||||||
|
self.active_connections.remove(websocket)
|
||||||
|
logger.info(f"WebSocket client disconnected. Total connections: {len(self.active_connections)}")
|
||||||
|
|
||||||
|
async def send_personal_message(self, message: str, websocket: WebSocket):
|
||||||
|
try:
|
||||||
|
await websocket.send_text(message)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending personal message: {e}")
|
||||||
|
self.disconnect(websocket)
|
||||||
|
|
||||||
|
async def broadcast(self, message: str):
|
||||||
|
disconnected = []
|
||||||
|
for connection in self.active_connections:
|
||||||
|
try:
|
||||||
|
await connection.send_text(message)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error broadcasting to connection: {e}")
|
||||||
|
disconnected.append(connection)
|
||||||
|
|
||||||
|
# Remove disconnected clients
|
||||||
|
for connection in disconnected:
|
||||||
|
self.disconnect(connection)
|
||||||
|
|
||||||
|
|
||||||
|
def create_app(config_obj=None) -> FastAPI:
|
||||||
|
"""Create and configure FastAPI application"""
|
||||||
|
|
||||||
|
app = FastAPI(
|
||||||
|
title="COBY Market Data API",
|
||||||
|
description="Real-time cryptocurrency market data aggregation API",
|
||||||
|
version="1.0.0",
|
||||||
|
docs_url="/docs",
|
||||||
|
redoc_url="/redoc"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add CORS middleware
|
||||||
|
app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=config.api.cors_origins,
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["GET", "POST", "PUT", "DELETE"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize components
|
||||||
|
rate_limiter = RateLimiter(
|
||||||
|
requests_per_minute=config.api.rate_limit,
|
||||||
|
burst_size=20
|
||||||
|
)
|
||||||
|
response_formatter = ResponseFormatter()
|
||||||
|
connection_manager = ConnectionManager()
|
||||||
|
|
||||||
|
@app.websocket("/ws/dashboard")
|
||||||
|
async def websocket_endpoint(websocket: WebSocket):
|
||||||
|
"""WebSocket endpoint for real-time dashboard updates"""
|
||||||
|
await connection_manager.connect(websocket)
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
# Send periodic status updates
|
||||||
|
status_data = {
|
||||||
|
"type": "status",
|
||||||
|
"timestamp": time.time(),
|
||||||
|
"connections": len(connection_manager.active_connections),
|
||||||
|
"system": "healthy"
|
||||||
|
}
|
||||||
|
await connection_manager.send_personal_message(
|
||||||
|
json.dumps(status_data), websocket
|
||||||
|
)
|
||||||
|
await asyncio.sleep(30) # Send update every 30 seconds
|
||||||
|
except WebSocketDisconnect:
|
||||||
|
connection_manager.disconnect(websocket)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"WebSocket error: {e}")
|
||||||
|
connection_manager.disconnect(websocket)
|
||||||
|
|
||||||
|
@app.get("/api/health")
|
||||||
|
async def api_health_check():
|
||||||
|
"""API Health check endpoint for dashboard"""
|
||||||
|
try:
|
||||||
|
# Check Redis connection
|
||||||
|
redis_healthy = await redis_manager.ping()
|
||||||
|
|
||||||
|
health_data = {
|
||||||
|
'status': 'healthy' if redis_healthy else 'degraded',
|
||||||
|
'redis': 'connected' if redis_healthy else 'disconnected',
|
||||||
|
'version': '1.0.0',
|
||||||
|
'timestamp': time.time()
|
||||||
|
}
|
||||||
|
|
||||||
|
return response_formatter.status_response(health_data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Health check failed: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=503,
|
||||||
|
content=response_formatter.error("Service unavailable", "HEALTH_CHECK_FAILED")
|
||||||
|
)
|
||||||
|
|
||||||
|
@app.get("/health")
|
||||||
|
async def health_check():
|
||||||
|
"""Health check endpoint"""
|
||||||
|
try:
|
||||||
|
# Check Redis connection
|
||||||
|
redis_healthy = await redis_manager.ping()
|
||||||
|
|
||||||
|
health_data = {
|
||||||
|
'status': 'healthy' if redis_healthy else 'degraded',
|
||||||
|
'redis': 'connected' if redis_healthy else 'disconnected',
|
||||||
|
'version': '1.0.0'
|
||||||
|
}
|
||||||
|
|
||||||
|
return response_formatter.status_response(health_data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Health check failed: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=503,
|
||||||
|
content=response_formatter.error("Service unavailable", "HEALTH_CHECK_FAILED")
|
||||||
|
)
|
||||||
|
|
||||||
|
@app.get("/", response_class=HTMLResponse)
|
||||||
|
async def root():
|
||||||
|
"""Root endpoint - serve dashboard HTML"""
|
||||||
|
static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "web", "static")
|
||||||
|
index_path = os.path.join(static_path, "index.html")
|
||||||
|
|
||||||
|
if os.path.exists(index_path):
|
||||||
|
with open(index_path, 'r', encoding='utf-8') as f:
|
||||||
|
return HTMLResponse(content=f.read())
|
||||||
|
else:
|
||||||
|
# Fallback if index.html doesn't exist
|
||||||
|
return HTMLResponse(content="""
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head><title>COBY System</title></head>
|
||||||
|
<body>
|
||||||
|
<h1>COBY Multi-Exchange Data Aggregation System</h1>
|
||||||
|
<p>System is running. Dashboard files not found.</p>
|
||||||
|
<p><a href="/api/health">Health Check</a></p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
""")
|
||||||
|
|
||||||
|
@app.middleware("http")
|
||||||
|
async def rate_limit_middleware(request: Request, call_next):
|
||||||
|
"""Rate limiting middleware"""
|
||||||
|
client_ip = request.client.host
|
||||||
|
|
||||||
|
if not rate_limiter.is_allowed(client_ip):
|
||||||
|
client_stats = rate_limiter.get_client_stats(client_ip)
|
||||||
|
error_response = response_formatter.rate_limit_error(client_stats)
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=429,
|
||||||
|
content=error_response,
|
||||||
|
headers={
|
||||||
|
"X-RateLimit-Remaining": str(int(client_stats['remaining_tokens'])),
|
||||||
|
"X-RateLimit-Reset": str(int(client_stats['reset_time']))
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await call_next(request)
|
||||||
|
|
||||||
|
# Add rate limit headers
|
||||||
|
client_stats = rate_limiter.get_client_stats(client_ip)
|
||||||
|
response.headers["X-RateLimit-Remaining"] = str(int(client_stats['remaining_tokens']))
|
||||||
|
response.headers["X-RateLimit-Reset"] = str(int(client_stats['reset_time']))
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
@app.middleware("http")
|
||||||
|
async def correlation_middleware(request: Request, call_next):
|
||||||
|
"""Add correlation ID to requests"""
|
||||||
|
set_correlation_id()
|
||||||
|
response = await call_next(request)
|
||||||
|
return response
|
||||||
|
|
||||||
|
@app.on_event("startup")
|
||||||
|
async def startup_event():
|
||||||
|
"""Initialize services on startup"""
|
||||||
|
try:
|
||||||
|
await redis_manager.initialize()
|
||||||
|
logger.info("API server startup completed")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"API server startup failed: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
@app.on_event("shutdown")
|
||||||
|
async def shutdown_event():
|
||||||
|
"""Cleanup on shutdown"""
|
||||||
|
try:
|
||||||
|
await redis_manager.close()
|
||||||
|
logger.info("API server shutdown completed")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"API server shutdown error: {e}")
|
||||||
|
|
||||||
|
# Heatmap endpoints
|
||||||
|
@app.get("/api/v1/heatmap/{symbol}")
|
||||||
|
async def get_heatmap(
|
||||||
|
symbol: str = Path(..., description="Trading symbol (e.g., BTCUSDT)"),
|
||||||
|
exchange: Optional[str] = Query(None, description="Exchange name (None for consolidated)")
|
||||||
|
):
|
||||||
|
"""Get heatmap data for a symbol"""
|
||||||
|
try:
|
||||||
|
# Validate symbol
|
||||||
|
if not validate_symbol(symbol):
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content=response_formatter.validation_error("symbol", "Invalid symbol format")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get heatmap from cache
|
||||||
|
heatmap_data = await redis_manager.get_heatmap(symbol.upper(), exchange)
|
||||||
|
|
||||||
|
return response_formatter.heatmap_response(heatmap_data, symbol.upper(), exchange)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting heatmap for {symbol}: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "HEATMAP_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Order book endpoints
|
||||||
|
@app.get("/api/v1/orderbook/{symbol}/{exchange}")
|
||||||
|
async def get_orderbook(
|
||||||
|
symbol: str = Path(..., description="Trading symbol"),
|
||||||
|
exchange: str = Path(..., description="Exchange name")
|
||||||
|
):
|
||||||
|
"""Get order book data for a symbol on an exchange"""
|
||||||
|
try:
|
||||||
|
# Validate symbol
|
||||||
|
if not validate_symbol(symbol):
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content=response_formatter.validation_error("symbol", "Invalid symbol format")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get order book from cache
|
||||||
|
orderbook_data = await redis_manager.get_orderbook(symbol.upper(), exchange.lower())
|
||||||
|
|
||||||
|
return response_formatter.orderbook_response(orderbook_data, symbol.upper(), exchange.lower())
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book for {symbol}@{exchange}: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "ORDERBOOK_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Metrics endpoints
|
||||||
|
@app.get("/api/v1/metrics/{symbol}/{exchange}")
|
||||||
|
async def get_metrics(
|
||||||
|
symbol: str = Path(..., description="Trading symbol"),
|
||||||
|
exchange: str = Path(..., description="Exchange name")
|
||||||
|
):
|
||||||
|
"""Get metrics data for a symbol on an exchange"""
|
||||||
|
try:
|
||||||
|
# Validate symbol
|
||||||
|
if not validate_symbol(symbol):
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content=response_formatter.validation_error("symbol", "Invalid symbol format")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get metrics from cache
|
||||||
|
metrics_data = await redis_manager.get_metrics(symbol.upper(), exchange.lower())
|
||||||
|
|
||||||
|
return response_formatter.metrics_response(metrics_data, symbol.upper(), exchange.lower())
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting metrics for {symbol}@{exchange}: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "METRICS_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Exchange status endpoints
|
||||||
|
@app.get("/api/v1/status/{exchange}")
|
||||||
|
async def get_exchange_status(
|
||||||
|
exchange: str = Path(..., description="Exchange name")
|
||||||
|
):
|
||||||
|
"""Get status for an exchange"""
|
||||||
|
try:
|
||||||
|
# Get status from cache
|
||||||
|
status_data = await redis_manager.get_exchange_status(exchange.lower())
|
||||||
|
|
||||||
|
if not status_data:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=404,
|
||||||
|
content=response_formatter.error("Exchange status not found", "STATUS_NOT_FOUND")
|
||||||
|
)
|
||||||
|
|
||||||
|
return response_formatter.success(
|
||||||
|
data=status_data,
|
||||||
|
message=f"Status for {exchange}"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting status for {exchange}: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "STATUS_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
# List endpoints
|
||||||
|
@app.get("/api/v1/symbols")
|
||||||
|
async def list_symbols():
|
||||||
|
"""List available trading symbols"""
|
||||||
|
try:
|
||||||
|
# Get symbols from cache (this would be populated by exchange connectors)
|
||||||
|
symbols_pattern = "symbols:*"
|
||||||
|
symbol_keys = await redis_manager.keys(symbols_pattern)
|
||||||
|
|
||||||
|
all_symbols = set()
|
||||||
|
for key in symbol_keys:
|
||||||
|
symbols_data = await redis_manager.get(key)
|
||||||
|
if symbols_data and isinstance(symbols_data, list):
|
||||||
|
all_symbols.update(symbols_data)
|
||||||
|
|
||||||
|
return response_formatter.success(
|
||||||
|
data=sorted(list(all_symbols)),
|
||||||
|
message="Available trading symbols",
|
||||||
|
metadata={'total_symbols': len(all_symbols)}
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error listing symbols: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "SYMBOLS_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
@app.get("/api/v1/exchanges")
|
||||||
|
async def list_exchanges():
|
||||||
|
"""List available exchanges"""
|
||||||
|
try:
|
||||||
|
# Get exchange status keys
|
||||||
|
status_pattern = "st:*"
|
||||||
|
status_keys = await redis_manager.keys(status_pattern)
|
||||||
|
|
||||||
|
exchanges = []
|
||||||
|
for key in status_keys:
|
||||||
|
# Extract exchange name from key (st:exchange_name)
|
||||||
|
exchange_name = key.split(':', 1)[1] if ':' in key else key
|
||||||
|
exchanges.append(exchange_name)
|
||||||
|
|
||||||
|
return response_formatter.success(
|
||||||
|
data=sorted(exchanges),
|
||||||
|
message="Available exchanges",
|
||||||
|
metadata={'total_exchanges': len(exchanges)}
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error listing exchanges: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "EXCHANGES_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Statistics endpoints
|
||||||
|
@app.get("/api/v1/stats/cache")
|
||||||
|
async def get_cache_stats():
|
||||||
|
"""Get cache statistics"""
|
||||||
|
try:
|
||||||
|
cache_stats = redis_manager.get_stats()
|
||||||
|
redis_health = await redis_manager.health_check()
|
||||||
|
|
||||||
|
stats_data = {
|
||||||
|
'cache_performance': cache_stats,
|
||||||
|
'redis_health': redis_health
|
||||||
|
}
|
||||||
|
|
||||||
|
return response_formatter.success(
|
||||||
|
data=stats_data,
|
||||||
|
message="Cache statistics"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting cache stats: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "STATS_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
@app.get("/api/v1/stats/api")
|
||||||
|
async def get_api_stats():
|
||||||
|
"""Get API statistics"""
|
||||||
|
try:
|
||||||
|
api_stats = {
|
||||||
|
'rate_limiter': rate_limiter.get_global_stats(),
|
||||||
|
'response_formatter': response_formatter.get_stats()
|
||||||
|
}
|
||||||
|
|
||||||
|
return response_formatter.success(
|
||||||
|
data=api_stats,
|
||||||
|
message="API statistics"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting API stats: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "API_STATS_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Batch endpoints for efficiency
|
||||||
|
@app.get("/api/v1/batch/heatmaps")
|
||||||
|
async def get_batch_heatmaps(
|
||||||
|
symbols: str = Query(..., description="Comma-separated list of symbols"),
|
||||||
|
exchange: Optional[str] = Query(None, description="Exchange name (None for consolidated)")
|
||||||
|
):
|
||||||
|
"""Get heatmaps for multiple symbols"""
|
||||||
|
try:
|
||||||
|
symbol_list = [s.strip().upper() for s in symbols.split(',')]
|
||||||
|
|
||||||
|
# Validate all symbols
|
||||||
|
for symbol in symbol_list:
|
||||||
|
if not validate_symbol(symbol):
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content=response_formatter.validation_error("symbols", f"Invalid symbol: {symbol}")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get heatmaps in batch
|
||||||
|
heatmaps = {}
|
||||||
|
for symbol in symbol_list:
|
||||||
|
heatmap_data = await redis_manager.get_heatmap(symbol, exchange)
|
||||||
|
if heatmap_data:
|
||||||
|
heatmaps[symbol] = {
|
||||||
|
'symbol': heatmap_data.symbol,
|
||||||
|
'timestamp': heatmap_data.timestamp.isoformat(),
|
||||||
|
'bucket_size': heatmap_data.bucket_size,
|
||||||
|
'points': [
|
||||||
|
{
|
||||||
|
'price': point.price,
|
||||||
|
'volume': point.volume,
|
||||||
|
'intensity': point.intensity,
|
||||||
|
'side': point.side
|
||||||
|
}
|
||||||
|
for point in heatmap_data.data
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
return response_formatter.success(
|
||||||
|
data=heatmaps,
|
||||||
|
message=f"Batch heatmaps for {len(symbol_list)} symbols",
|
||||||
|
metadata={
|
||||||
|
'requested_symbols': len(symbol_list),
|
||||||
|
'found_heatmaps': len(heatmaps),
|
||||||
|
'exchange': exchange or 'consolidated'
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting batch heatmaps: {e}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500,
|
||||||
|
content=response_formatter.error("Internal server error", "BATCH_HEATMAPS_ERROR")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mount static files for web dashboard AFTER all API routes are defined
|
||||||
|
static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "web", "static")
|
||||||
|
if os.path.exists(static_path):
|
||||||
|
app.mount("/static", StaticFiles(directory=static_path), name="static")
|
||||||
|
# Don't mount at root to avoid conflicts with WebSocket and API routes
|
||||||
|
|
||||||
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
# Create the FastAPI app instance
|
||||||
|
app = create_app()
|
||||||
53
COBY/api/simple_websocket_server.py
Normal file
53
COBY/api/simple_websocket_server.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
"""
|
||||||
|
Simple WebSocket server for COBY system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Set, Dict, Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class WebSocketServer:
|
||||||
|
"""Simple WebSocket server implementation"""
|
||||||
|
|
||||||
|
def __init__(self, host: str = "0.0.0.0", port: int = 8081):
|
||||||
|
self.host = host
|
||||||
|
self.port = port
|
||||||
|
self.connections: Set = set()
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
"""Start the WebSocket server"""
|
||||||
|
try:
|
||||||
|
logger.info(f"Starting WebSocket server on {self.host}:{self.port}")
|
||||||
|
self.running = True
|
||||||
|
|
||||||
|
# Simple implementation - just keep running
|
||||||
|
while self.running:
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"WebSocket server error: {e}")
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
"""Stop the WebSocket server"""
|
||||||
|
logger.info("Stopping WebSocket server")
|
||||||
|
self.running = False
|
||||||
|
|
||||||
|
async def broadcast(self, message: Dict[str, Any]):
|
||||||
|
"""Broadcast message to all connections"""
|
||||||
|
if self.connections:
|
||||||
|
logger.debug(f"Broadcasting to {len(self.connections)} connections")
|
||||||
|
|
||||||
|
def add_connection(self, websocket):
|
||||||
|
"""Add a WebSocket connection"""
|
||||||
|
self.connections.add(websocket)
|
||||||
|
logger.info(f"WebSocket connection added. Total: {len(self.connections)}")
|
||||||
|
|
||||||
|
def remove_connection(self, websocket):
|
||||||
|
"""Remove a WebSocket connection"""
|
||||||
|
self.connections.discard(websocket)
|
||||||
|
logger.info(f"WebSocket connection removed. Total: {len(self.connections)}")
|
||||||
404
COBY/api/websocket_server.py
Normal file
404
COBY/api/websocket_server.py
Normal file
@@ -0,0 +1,404 @@
|
|||||||
|
"""
|
||||||
|
WebSocket server for real-time data streaming.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
from typing import Dict, Set, Optional, Any
|
||||||
|
from fastapi import WebSocket, WebSocketDisconnect
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from utils.logging import get_logger, set_correlation_id
|
||||||
|
from utils.validation import validate_symbol
|
||||||
|
from caching.redis_manager import redis_manager
|
||||||
|
from api.response_formatter import ResponseFormatter
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class WebSocketManager:
|
||||||
|
"""
|
||||||
|
Manages WebSocket connections and real-time data streaming.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize WebSocket manager"""
|
||||||
|
# Active connections: connection_id -> WebSocket
|
||||||
|
self.connections: Dict[str, WebSocket] = {}
|
||||||
|
|
||||||
|
# Subscriptions: symbol -> set of connection_ids
|
||||||
|
self.subscriptions: Dict[str, Set[str]] = {}
|
||||||
|
|
||||||
|
# Connection metadata: connection_id -> metadata
|
||||||
|
self.connection_metadata: Dict[str, Dict[str, Any]] = {}
|
||||||
|
|
||||||
|
self.response_formatter = ResponseFormatter()
|
||||||
|
self.connection_counter = 0
|
||||||
|
|
||||||
|
logger.info("WebSocket manager initialized")
|
||||||
|
|
||||||
|
async def connect(self, websocket: WebSocket, client_ip: str) -> str:
|
||||||
|
"""
|
||||||
|
Accept new WebSocket connection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
websocket: WebSocket connection
|
||||||
|
client_ip: Client IP address
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Connection ID
|
||||||
|
"""
|
||||||
|
await websocket.accept()
|
||||||
|
|
||||||
|
# Generate connection ID
|
||||||
|
self.connection_counter += 1
|
||||||
|
connection_id = f"ws_{self.connection_counter}_{client_ip}"
|
||||||
|
|
||||||
|
# Store connection
|
||||||
|
self.connections[connection_id] = websocket
|
||||||
|
self.connection_metadata[connection_id] = {
|
||||||
|
'client_ip': client_ip,
|
||||||
|
'connected_at': asyncio.get_event_loop().time(),
|
||||||
|
'subscriptions': set(),
|
||||||
|
'messages_sent': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"WebSocket connected: {connection_id}")
|
||||||
|
|
||||||
|
# Send welcome message
|
||||||
|
welcome_msg = self.response_formatter.success(
|
||||||
|
data={'connection_id': connection_id},
|
||||||
|
message="WebSocket connected successfully"
|
||||||
|
)
|
||||||
|
await self._send_to_connection(connection_id, welcome_msg)
|
||||||
|
|
||||||
|
return connection_id
|
||||||
|
|
||||||
|
async def disconnect(self, connection_id: str) -> None:
|
||||||
|
"""
|
||||||
|
Handle WebSocket disconnection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connection_id: Connection ID to disconnect
|
||||||
|
"""
|
||||||
|
if connection_id in self.connections:
|
||||||
|
# Remove from all subscriptions
|
||||||
|
metadata = self.connection_metadata.get(connection_id, {})
|
||||||
|
for symbol in metadata.get('subscriptions', set()):
|
||||||
|
await self._unsubscribe_connection(connection_id, symbol)
|
||||||
|
|
||||||
|
# Remove connection
|
||||||
|
del self.connections[connection_id]
|
||||||
|
del self.connection_metadata[connection_id]
|
||||||
|
|
||||||
|
logger.info(f"WebSocket disconnected: {connection_id}")
|
||||||
|
|
||||||
|
async def subscribe(self, connection_id: str, symbol: str,
|
||||||
|
data_type: str = "heatmap") -> bool:
|
||||||
|
"""
|
||||||
|
Subscribe connection to symbol updates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connection_id: Connection ID
|
||||||
|
symbol: Trading symbol
|
||||||
|
data_type: Type of data to subscribe to
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if subscribed successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Validate symbol
|
||||||
|
if not validate_symbol(symbol):
|
||||||
|
error_msg = self.response_formatter.validation_error("symbol", "Invalid symbol format")
|
||||||
|
await self._send_to_connection(connection_id, error_msg)
|
||||||
|
return False
|
||||||
|
|
||||||
|
symbol = symbol.upper()
|
||||||
|
subscription_key = f"{symbol}:{data_type}"
|
||||||
|
|
||||||
|
# Add to subscriptions
|
||||||
|
if subscription_key not in self.subscriptions:
|
||||||
|
self.subscriptions[subscription_key] = set()
|
||||||
|
|
||||||
|
self.subscriptions[subscription_key].add(connection_id)
|
||||||
|
|
||||||
|
# Update connection metadata
|
||||||
|
if connection_id in self.connection_metadata:
|
||||||
|
self.connection_metadata[connection_id]['subscriptions'].add(subscription_key)
|
||||||
|
|
||||||
|
logger.info(f"WebSocket {connection_id} subscribed to {subscription_key}")
|
||||||
|
|
||||||
|
# Send confirmation
|
||||||
|
confirm_msg = self.response_formatter.success(
|
||||||
|
data={'symbol': symbol, 'data_type': data_type},
|
||||||
|
message=f"Subscribed to {symbol} {data_type} updates"
|
||||||
|
)
|
||||||
|
await self._send_to_connection(connection_id, confirm_msg)
|
||||||
|
|
||||||
|
# Send initial data if available
|
||||||
|
await self._send_initial_data(connection_id, symbol, data_type)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing {connection_id} to {symbol}: {e}")
|
||||||
|
error_msg = self.response_formatter.error("Subscription failed", "SUBSCRIBE_ERROR")
|
||||||
|
await self._send_to_connection(connection_id, error_msg)
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def unsubscribe(self, connection_id: str, symbol: str,
|
||||||
|
data_type: str = "heatmap") -> bool:
|
||||||
|
"""
|
||||||
|
Unsubscribe connection from symbol updates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connection_id: Connection ID
|
||||||
|
symbol: Trading symbol
|
||||||
|
data_type: Type of data to unsubscribe from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if unsubscribed successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
symbol = symbol.upper()
|
||||||
|
subscription_key = f"{symbol}:{data_type}"
|
||||||
|
|
||||||
|
await self._unsubscribe_connection(connection_id, subscription_key)
|
||||||
|
|
||||||
|
# Send confirmation
|
||||||
|
confirm_msg = self.response_formatter.success(
|
||||||
|
data={'symbol': symbol, 'data_type': data_type},
|
||||||
|
message=f"Unsubscribed from {symbol} {data_type} updates"
|
||||||
|
)
|
||||||
|
await self._send_to_connection(connection_id, confirm_msg)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing {connection_id} from {symbol}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def broadcast_update(self, symbol: str, data_type: str, data: Any) -> int:
|
||||||
|
"""
|
||||||
|
Broadcast data update to all subscribers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
data_type: Type of data
|
||||||
|
data: Data to broadcast
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Number of connections notified
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
subscription_key = f"{symbol.upper()}:{data_type}"
|
||||||
|
subscribers = self.subscriptions.get(subscription_key, set())
|
||||||
|
|
||||||
|
if not subscribers:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Format message based on data type
|
||||||
|
if data_type == "heatmap":
|
||||||
|
message = self.response_formatter.heatmap_response(data, symbol)
|
||||||
|
elif data_type == "orderbook":
|
||||||
|
message = self.response_formatter.orderbook_response(data, symbol, "consolidated")
|
||||||
|
else:
|
||||||
|
message = self.response_formatter.success(data, f"{data_type} update for {symbol}")
|
||||||
|
|
||||||
|
# Add update type to message
|
||||||
|
message['update_type'] = data_type
|
||||||
|
message['symbol'] = symbol
|
||||||
|
|
||||||
|
# Send to all subscribers
|
||||||
|
sent_count = 0
|
||||||
|
for connection_id in subscribers.copy(): # Copy to avoid modification during iteration
|
||||||
|
if await self._send_to_connection(connection_id, message):
|
||||||
|
sent_count += 1
|
||||||
|
|
||||||
|
logger.debug(f"Broadcasted {data_type} update for {symbol} to {sent_count} connections")
|
||||||
|
return sent_count
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error broadcasting update for {symbol}: {e}")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
async def _send_to_connection(self, connection_id: str, message: Dict[str, Any]) -> bool:
|
||||||
|
"""
|
||||||
|
Send message to specific connection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connection_id: Connection ID
|
||||||
|
message: Message to send
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if sent successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if connection_id not in self.connections:
|
||||||
|
return False
|
||||||
|
|
||||||
|
websocket = self.connections[connection_id]
|
||||||
|
message_json = json.dumps(message, default=str)
|
||||||
|
|
||||||
|
await websocket.send_text(message_json)
|
||||||
|
|
||||||
|
# Update statistics
|
||||||
|
if connection_id in self.connection_metadata:
|
||||||
|
self.connection_metadata[connection_id]['messages_sent'] += 1
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error sending message to {connection_id}: {e}")
|
||||||
|
# Remove broken connection
|
||||||
|
await self.disconnect(connection_id)
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def _unsubscribe_connection(self, connection_id: str, subscription_key: str) -> None:
|
||||||
|
"""Remove connection from subscription"""
|
||||||
|
if subscription_key in self.subscriptions:
|
||||||
|
self.subscriptions[subscription_key].discard(connection_id)
|
||||||
|
|
||||||
|
# Clean up empty subscriptions
|
||||||
|
if not self.subscriptions[subscription_key]:
|
||||||
|
del self.subscriptions[subscription_key]
|
||||||
|
|
||||||
|
# Update connection metadata
|
||||||
|
if connection_id in self.connection_metadata:
|
||||||
|
self.connection_metadata[connection_id]['subscriptions'].discard(subscription_key)
|
||||||
|
|
||||||
|
async def _send_initial_data(self, connection_id: str, symbol: str, data_type: str) -> None:
|
||||||
|
"""Send initial data to newly subscribed connection"""
|
||||||
|
try:
|
||||||
|
if data_type == "heatmap":
|
||||||
|
# Get latest heatmap from cache
|
||||||
|
heatmap_data = await redis_manager.get_heatmap(symbol)
|
||||||
|
if heatmap_data:
|
||||||
|
message = self.response_formatter.heatmap_response(heatmap_data, symbol)
|
||||||
|
message['update_type'] = 'initial_data'
|
||||||
|
await self._send_to_connection(connection_id, message)
|
||||||
|
|
||||||
|
elif data_type == "orderbook":
|
||||||
|
# Could get latest order book from cache
|
||||||
|
# This would require knowing which exchange to get data from
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error sending initial data to {connection_id}: {e}")
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get WebSocket manager statistics"""
|
||||||
|
total_subscriptions = sum(len(subs) for subs in self.subscriptions.values())
|
||||||
|
|
||||||
|
return {
|
||||||
|
'active_connections': len(self.connections),
|
||||||
|
'total_subscriptions': total_subscriptions,
|
||||||
|
'unique_symbols': len(set(key.split(':')[0] for key in self.subscriptions.keys())),
|
||||||
|
'connection_counter': self.connection_counter
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global WebSocket manager instance
|
||||||
|
websocket_manager = WebSocketManager()
|
||||||
|
|
||||||
|
|
||||||
|
class WebSocketServer:
|
||||||
|
"""
|
||||||
|
WebSocket server for real-time data streaming.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize WebSocket server"""
|
||||||
|
self.manager = websocket_manager
|
||||||
|
logger.info("WebSocket server initialized")
|
||||||
|
|
||||||
|
async def handle_connection(self, websocket: WebSocket, client_ip: str) -> None:
|
||||||
|
"""
|
||||||
|
Handle WebSocket connection lifecycle.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
websocket: WebSocket connection
|
||||||
|
client_ip: Client IP address
|
||||||
|
"""
|
||||||
|
connection_id = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Accept connection
|
||||||
|
connection_id = await self.manager.connect(websocket, client_ip)
|
||||||
|
|
||||||
|
# Handle messages
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Receive message
|
||||||
|
message = await websocket.receive_text()
|
||||||
|
await self._handle_message(connection_id, message)
|
||||||
|
|
||||||
|
except WebSocketDisconnect:
|
||||||
|
logger.info(f"WebSocket client disconnected: {connection_id}")
|
||||||
|
break
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"WebSocket connection error: {e}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Clean up connection
|
||||||
|
if connection_id:
|
||||||
|
await self.manager.disconnect(connection_id)
|
||||||
|
|
||||||
|
async def _handle_message(self, connection_id: str, message: str) -> None:
|
||||||
|
"""
|
||||||
|
Handle incoming WebSocket message.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connection_id: Connection ID
|
||||||
|
message: Received message
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse message
|
||||||
|
data = json.loads(message)
|
||||||
|
action = data.get('action')
|
||||||
|
|
||||||
|
if action == 'subscribe':
|
||||||
|
symbol = data.get('symbol')
|
||||||
|
data_type = data.get('data_type', 'heatmap')
|
||||||
|
await self.manager.subscribe(connection_id, symbol, data_type)
|
||||||
|
|
||||||
|
elif action == 'unsubscribe':
|
||||||
|
symbol = data.get('symbol')
|
||||||
|
data_type = data.get('data_type', 'heatmap')
|
||||||
|
await self.manager.unsubscribe(connection_id, symbol, data_type)
|
||||||
|
|
||||||
|
elif action == 'ping':
|
||||||
|
# Send pong response
|
||||||
|
pong_msg = self.manager.response_formatter.success(
|
||||||
|
data={'action': 'pong'},
|
||||||
|
message="Pong"
|
||||||
|
)
|
||||||
|
await self.manager._send_to_connection(connection_id, pong_msg)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Unknown action
|
||||||
|
error_msg = self.manager.response_formatter.error(
|
||||||
|
f"Unknown action: {action}",
|
||||||
|
"UNKNOWN_ACTION"
|
||||||
|
)
|
||||||
|
await self.manager._send_to_connection(connection_id, error_msg)
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
error_msg = self.manager.response_formatter.error(
|
||||||
|
"Invalid JSON message",
|
||||||
|
"INVALID_JSON"
|
||||||
|
)
|
||||||
|
await self.manager._send_to_connection(connection_id, error_msg)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling WebSocket message: {e}")
|
||||||
|
error_msg = self.manager.response_formatter.error(
|
||||||
|
"Message processing failed",
|
||||||
|
"MESSAGE_ERROR"
|
||||||
|
)
|
||||||
|
await self.manager._send_to_connection(connection_id, error_msg)
|
||||||
9
COBY/caching/__init__.py
Normal file
9
COBY/caching/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
"""
|
||||||
|
Caching layer for the COBY system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .redis_manager import RedisManager
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'RedisManager'
|
||||||
|
]
|
||||||
281
COBY/caching/cache_keys.py
Normal file
281
COBY/caching/cache_keys.py
Normal file
@@ -0,0 +1,281 @@
|
|||||||
|
"""
|
||||||
|
Cache key management for Redis operations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
try:
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
except ImportError:
|
||||||
|
from utils.logging import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CacheKeys:
|
||||||
|
"""
|
||||||
|
Centralized cache key management for consistent Redis operations.
|
||||||
|
|
||||||
|
Provides standardized key patterns for different data types.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Key prefixes
|
||||||
|
ORDERBOOK_PREFIX = "ob"
|
||||||
|
HEATMAP_PREFIX = "hm"
|
||||||
|
TRADE_PREFIX = "tr"
|
||||||
|
METRICS_PREFIX = "mt"
|
||||||
|
STATUS_PREFIX = "st"
|
||||||
|
STATS_PREFIX = "stats"
|
||||||
|
|
||||||
|
# TTL values (seconds)
|
||||||
|
ORDERBOOK_TTL = 60 # 1 minute
|
||||||
|
HEATMAP_TTL = 30 # 30 seconds
|
||||||
|
TRADE_TTL = 300 # 5 minutes
|
||||||
|
METRICS_TTL = 120 # 2 minutes
|
||||||
|
STATUS_TTL = 60 # 1 minute
|
||||||
|
STATS_TTL = 300 # 5 minutes
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def orderbook_key(cls, symbol: str, exchange: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for order book data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"{cls.ORDERBOOK_PREFIX}:{exchange}:{symbol}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def heatmap_key(cls, symbol: str, bucket_size: float = 1.0,
|
||||||
|
exchange: Optional[str] = None) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for heatmap data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
bucket_size: Price bucket size
|
||||||
|
exchange: Exchange name (None for consolidated)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
if exchange:
|
||||||
|
return f"{cls.HEATMAP_PREFIX}:{exchange}:{symbol}:{bucket_size}"
|
||||||
|
else:
|
||||||
|
return f"{cls.HEATMAP_PREFIX}:consolidated:{symbol}:{bucket_size}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def trade_key(cls, symbol: str, exchange: str, trade_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for trade data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
trade_id: Trade identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"{cls.TRADE_PREFIX}:{exchange}:{symbol}:{trade_id}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def metrics_key(cls, symbol: str, exchange: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for metrics data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"{cls.METRICS_PREFIX}:{exchange}:{symbol}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def status_key(cls, exchange: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for exchange status.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"{cls.STATUS_PREFIX}:{exchange}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def stats_key(cls, component: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for component statistics.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
component: Component name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"{cls.STATS_PREFIX}:{component}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def latest_heatmaps_key(cls, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for latest heatmaps list.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"{cls.HEATMAP_PREFIX}:latest:{symbol}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def symbol_list_key(cls, exchange: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for symbol list.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"symbols:{exchange}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def price_bucket_key(cls, symbol: str, exchange: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"buckets:{exchange}:{symbol}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def arbitrage_key(cls, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate cache key for arbitrage opportunities.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Cache key
|
||||||
|
"""
|
||||||
|
return f"arbitrage:{symbol}"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_ttl(cls, key: str) -> int:
|
||||||
|
"""
|
||||||
|
Get appropriate TTL for a cache key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key: Cache key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: TTL in seconds
|
||||||
|
"""
|
||||||
|
if key.startswith(cls.ORDERBOOK_PREFIX):
|
||||||
|
return cls.ORDERBOOK_TTL
|
||||||
|
elif key.startswith(cls.HEATMAP_PREFIX):
|
||||||
|
return cls.HEATMAP_TTL
|
||||||
|
elif key.startswith(cls.TRADE_PREFIX):
|
||||||
|
return cls.TRADE_TTL
|
||||||
|
elif key.startswith(cls.METRICS_PREFIX):
|
||||||
|
return cls.METRICS_TTL
|
||||||
|
elif key.startswith(cls.STATUS_PREFIX):
|
||||||
|
return cls.STATUS_TTL
|
||||||
|
elif key.startswith(cls.STATS_PREFIX):
|
||||||
|
return cls.STATS_TTL
|
||||||
|
else:
|
||||||
|
return 300 # Default 5 minutes
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse_key(cls, key: str) -> dict:
|
||||||
|
"""
|
||||||
|
Parse cache key to extract components.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key: Cache key to parse
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Parsed key components
|
||||||
|
"""
|
||||||
|
parts = key.split(':')
|
||||||
|
|
||||||
|
if len(parts) < 2:
|
||||||
|
return {'type': 'unknown', 'key': key}
|
||||||
|
|
||||||
|
key_type = parts[0]
|
||||||
|
|
||||||
|
if key_type == cls.ORDERBOOK_PREFIX and len(parts) >= 3:
|
||||||
|
return {
|
||||||
|
'type': 'orderbook',
|
||||||
|
'exchange': parts[1],
|
||||||
|
'symbol': parts[2]
|
||||||
|
}
|
||||||
|
elif key_type == cls.HEATMAP_PREFIX and len(parts) >= 4:
|
||||||
|
return {
|
||||||
|
'type': 'heatmap',
|
||||||
|
'exchange': parts[1] if parts[1] != 'consolidated' else None,
|
||||||
|
'symbol': parts[2],
|
||||||
|
'bucket_size': float(parts[3]) if len(parts) > 3 else 1.0
|
||||||
|
}
|
||||||
|
elif key_type == cls.TRADE_PREFIX and len(parts) >= 4:
|
||||||
|
return {
|
||||||
|
'type': 'trade',
|
||||||
|
'exchange': parts[1],
|
||||||
|
'symbol': parts[2],
|
||||||
|
'trade_id': parts[3]
|
||||||
|
}
|
||||||
|
elif key_type == cls.METRICS_PREFIX and len(parts) >= 3:
|
||||||
|
return {
|
||||||
|
'type': 'metrics',
|
||||||
|
'exchange': parts[1],
|
||||||
|
'symbol': parts[2]
|
||||||
|
}
|
||||||
|
elif key_type == cls.STATUS_PREFIX and len(parts) >= 2:
|
||||||
|
return {
|
||||||
|
'type': 'status',
|
||||||
|
'exchange': parts[1]
|
||||||
|
}
|
||||||
|
elif key_type == cls.STATS_PREFIX and len(parts) >= 2:
|
||||||
|
return {
|
||||||
|
'type': 'stats',
|
||||||
|
'component': parts[1]
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {'type': 'unknown', 'key': key}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_pattern(cls, key_type: str) -> str:
|
||||||
|
"""
|
||||||
|
Get Redis pattern for key type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key_type: Type of key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Redis pattern
|
||||||
|
"""
|
||||||
|
patterns = {
|
||||||
|
'orderbook': f"{cls.ORDERBOOK_PREFIX}:*",
|
||||||
|
'heatmap': f"{cls.HEATMAP_PREFIX}:*",
|
||||||
|
'trade': f"{cls.TRADE_PREFIX}:*",
|
||||||
|
'metrics': f"{cls.METRICS_PREFIX}:*",
|
||||||
|
'status': f"{cls.STATUS_PREFIX}:*",
|
||||||
|
'stats': f"{cls.STATS_PREFIX}:*"
|
||||||
|
}
|
||||||
|
|
||||||
|
return patterns.get(key_type, "*")
|
||||||
355
COBY/caching/data_serializer.py
Normal file
355
COBY/caching/data_serializer.py
Normal file
@@ -0,0 +1,355 @@
|
|||||||
|
"""
|
||||||
|
Data serialization for Redis caching.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import pickle
|
||||||
|
import gzip
|
||||||
|
from typing import Any, Union, Dict, List
|
||||||
|
from datetime import datetime
|
||||||
|
from ..models.core import (
|
||||||
|
OrderBookSnapshot, TradeEvent, HeatmapData, PriceBuckets,
|
||||||
|
OrderBookMetrics, ImbalanceMetrics, ConsolidatedOrderBook
|
||||||
|
)
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.exceptions import ProcessingError
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DataSerializer:
|
||||||
|
"""
|
||||||
|
Handles serialization and deserialization of data for Redis storage.
|
||||||
|
|
||||||
|
Supports multiple serialization formats:
|
||||||
|
- JSON for simple data
|
||||||
|
- Pickle for complex objects
|
||||||
|
- Compressed formats for large data
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, use_compression: bool = True):
|
||||||
|
"""
|
||||||
|
Initialize data serializer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_compression: Whether to use gzip compression
|
||||||
|
"""
|
||||||
|
self.use_compression = use_compression
|
||||||
|
self.serialization_stats = {
|
||||||
|
'serialized': 0,
|
||||||
|
'deserialized': 0,
|
||||||
|
'compression_ratio': 0.0,
|
||||||
|
'errors': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Data serializer initialized (compression: {use_compression})")
|
||||||
|
|
||||||
|
def serialize(self, data: Any, format_type: str = 'auto') -> bytes:
|
||||||
|
"""
|
||||||
|
Serialize data for Redis storage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Data to serialize
|
||||||
|
format_type: Serialization format ('json', 'pickle', 'auto')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bytes: Serialized data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Determine format
|
||||||
|
if format_type == 'auto':
|
||||||
|
format_type = self._determine_format(data)
|
||||||
|
|
||||||
|
# Serialize based on format
|
||||||
|
if format_type == 'json':
|
||||||
|
serialized = self._serialize_json(data)
|
||||||
|
elif format_type == 'pickle':
|
||||||
|
serialized = self._serialize_pickle(data)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported format: {format_type}")
|
||||||
|
|
||||||
|
# Apply compression if enabled
|
||||||
|
if self.use_compression:
|
||||||
|
original_size = len(serialized)
|
||||||
|
serialized = gzip.compress(serialized)
|
||||||
|
compressed_size = len(serialized)
|
||||||
|
|
||||||
|
# Update compression ratio
|
||||||
|
if original_size > 0:
|
||||||
|
ratio = compressed_size / original_size
|
||||||
|
self.serialization_stats['compression_ratio'] = (
|
||||||
|
(self.serialization_stats['compression_ratio'] *
|
||||||
|
self.serialization_stats['serialized'] + ratio) /
|
||||||
|
(self.serialization_stats['serialized'] + 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.serialization_stats['serialized'] += 1
|
||||||
|
return serialized
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.serialization_stats['errors'] += 1
|
||||||
|
logger.error(f"Serialization error: {e}")
|
||||||
|
raise ProcessingError(f"Serialization failed: {e}", "SERIALIZE_ERROR")
|
||||||
|
|
||||||
|
def deserialize(self, data: bytes, format_type: str = 'auto') -> Any:
|
||||||
|
"""
|
||||||
|
Deserialize data from Redis storage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Serialized data
|
||||||
|
format_type: Expected format ('json', 'pickle', 'auto')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: Deserialized data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Decompress if needed
|
||||||
|
if self.use_compression:
|
||||||
|
try:
|
||||||
|
data = gzip.decompress(data)
|
||||||
|
except gzip.BadGzipFile:
|
||||||
|
# Data might not be compressed
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Determine format if auto
|
||||||
|
if format_type == 'auto':
|
||||||
|
format_type = self._detect_format(data)
|
||||||
|
|
||||||
|
# Deserialize based on format
|
||||||
|
if format_type == 'json':
|
||||||
|
result = self._deserialize_json(data)
|
||||||
|
elif format_type == 'pickle':
|
||||||
|
result = self._deserialize_pickle(data)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported format: {format_type}")
|
||||||
|
|
||||||
|
self.serialization_stats['deserialized'] += 1
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.serialization_stats['errors'] += 1
|
||||||
|
logger.error(f"Deserialization error: {e}")
|
||||||
|
raise ProcessingError(f"Deserialization failed: {e}", "DESERIALIZE_ERROR")
|
||||||
|
|
||||||
|
def _determine_format(self, data: Any) -> str:
|
||||||
|
"""Determine best serialization format for data"""
|
||||||
|
# Use JSON for simple data types
|
||||||
|
if isinstance(data, (dict, list, str, int, float, bool)) or data is None:
|
||||||
|
return 'json'
|
||||||
|
|
||||||
|
# Use pickle for complex objects
|
||||||
|
return 'pickle'
|
||||||
|
|
||||||
|
def _detect_format(self, data: bytes) -> str:
|
||||||
|
"""Detect serialization format from data"""
|
||||||
|
try:
|
||||||
|
# Try JSON first
|
||||||
|
json.loads(data.decode('utf-8'))
|
||||||
|
return 'json'
|
||||||
|
except (json.JSONDecodeError, UnicodeDecodeError):
|
||||||
|
# Assume pickle
|
||||||
|
return 'pickle'
|
||||||
|
|
||||||
|
def _serialize_json(self, data: Any) -> bytes:
|
||||||
|
"""Serialize data as JSON"""
|
||||||
|
# Convert complex objects to dictionaries
|
||||||
|
if hasattr(data, '__dict__'):
|
||||||
|
data = self._object_to_dict(data)
|
||||||
|
elif isinstance(data, list):
|
||||||
|
data = [self._object_to_dict(item) if hasattr(item, '__dict__') else item
|
||||||
|
for item in data]
|
||||||
|
|
||||||
|
json_str = json.dumps(data, default=self._json_serializer, ensure_ascii=False)
|
||||||
|
return json_str.encode('utf-8')
|
||||||
|
|
||||||
|
def _deserialize_json(self, data: bytes) -> Any:
|
||||||
|
"""Deserialize JSON data"""
|
||||||
|
json_str = data.decode('utf-8')
|
||||||
|
return json.loads(json_str, object_hook=self._json_deserializer)
|
||||||
|
|
||||||
|
def _serialize_pickle(self, data: Any) -> bytes:
|
||||||
|
"""Serialize data as pickle"""
|
||||||
|
return pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
|
||||||
|
|
||||||
|
def _deserialize_pickle(self, data: bytes) -> Any:
|
||||||
|
"""Deserialize pickle data"""
|
||||||
|
return pickle.loads(data)
|
||||||
|
|
||||||
|
def _object_to_dict(self, obj: Any) -> Dict:
|
||||||
|
"""Convert object to dictionary for JSON serialization"""
|
||||||
|
if isinstance(obj, (OrderBookSnapshot, TradeEvent, HeatmapData,
|
||||||
|
PriceBuckets, OrderBookMetrics, ImbalanceMetrics,
|
||||||
|
ConsolidatedOrderBook)):
|
||||||
|
result = {
|
||||||
|
'__type__': obj.__class__.__name__,
|
||||||
|
'__data__': {}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Convert object attributes
|
||||||
|
for key, value in obj.__dict__.items():
|
||||||
|
if isinstance(value, datetime):
|
||||||
|
result['__data__'][key] = {
|
||||||
|
'__datetime__': value.isoformat()
|
||||||
|
}
|
||||||
|
elif isinstance(value, list):
|
||||||
|
result['__data__'][key] = [
|
||||||
|
self._object_to_dict(item) if hasattr(item, '__dict__') else item
|
||||||
|
for item in value
|
||||||
|
]
|
||||||
|
elif hasattr(value, '__dict__'):
|
||||||
|
result['__data__'][key] = self._object_to_dict(value)
|
||||||
|
else:
|
||||||
|
result['__data__'][key] = value
|
||||||
|
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
return obj.__dict__ if hasattr(obj, '__dict__') else obj
|
||||||
|
|
||||||
|
def _json_serializer(self, obj: Any) -> Any:
|
||||||
|
"""Custom JSON serializer for special types"""
|
||||||
|
if isinstance(obj, datetime):
|
||||||
|
return {'__datetime__': obj.isoformat()}
|
||||||
|
elif hasattr(obj, '__dict__'):
|
||||||
|
return self._object_to_dict(obj)
|
||||||
|
else:
|
||||||
|
return str(obj)
|
||||||
|
|
||||||
|
def _json_deserializer(self, obj: Dict) -> Any:
|
||||||
|
"""Custom JSON deserializer for special types"""
|
||||||
|
if '__datetime__' in obj:
|
||||||
|
return datetime.fromisoformat(obj['__datetime__'])
|
||||||
|
elif '__type__' in obj and '__data__' in obj:
|
||||||
|
return self._reconstruct_object(obj['__type__'], obj['__data__'])
|
||||||
|
else:
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def _reconstruct_object(self, type_name: str, data: Dict) -> Any:
|
||||||
|
"""Reconstruct object from serialized data"""
|
||||||
|
# Import required classes
|
||||||
|
from ..models.core import (
|
||||||
|
OrderBookSnapshot, TradeEvent, HeatmapData, PriceBuckets,
|
||||||
|
OrderBookMetrics, ImbalanceMetrics, ConsolidatedOrderBook,
|
||||||
|
PriceLevel, HeatmapPoint
|
||||||
|
)
|
||||||
|
|
||||||
|
# Map type names to classes
|
||||||
|
type_map = {
|
||||||
|
'OrderBookSnapshot': OrderBookSnapshot,
|
||||||
|
'TradeEvent': TradeEvent,
|
||||||
|
'HeatmapData': HeatmapData,
|
||||||
|
'PriceBuckets': PriceBuckets,
|
||||||
|
'OrderBookMetrics': OrderBookMetrics,
|
||||||
|
'ImbalanceMetrics': ImbalanceMetrics,
|
||||||
|
'ConsolidatedOrderBook': ConsolidatedOrderBook,
|
||||||
|
'PriceLevel': PriceLevel,
|
||||||
|
'HeatmapPoint': HeatmapPoint
|
||||||
|
}
|
||||||
|
|
||||||
|
if type_name in type_map:
|
||||||
|
cls = type_map[type_name]
|
||||||
|
|
||||||
|
# Recursively deserialize nested objects
|
||||||
|
processed_data = {}
|
||||||
|
for key, value in data.items():
|
||||||
|
if isinstance(value, dict) and '__datetime__' in value:
|
||||||
|
processed_data[key] = datetime.fromisoformat(value['__datetime__'])
|
||||||
|
elif isinstance(value, dict) and '__type__' in value:
|
||||||
|
processed_data[key] = self._reconstruct_object(
|
||||||
|
value['__type__'], value['__data__']
|
||||||
|
)
|
||||||
|
elif isinstance(value, list):
|
||||||
|
processed_data[key] = [
|
||||||
|
self._reconstruct_object(item['__type__'], item['__data__'])
|
||||||
|
if isinstance(item, dict) and '__type__' in item
|
||||||
|
else item
|
||||||
|
for item in value
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
processed_data[key] = value
|
||||||
|
|
||||||
|
try:
|
||||||
|
return cls(**processed_data)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to reconstruct {type_name}: {e}")
|
||||||
|
return processed_data
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unknown type for reconstruction: {type_name}")
|
||||||
|
return data
|
||||||
|
|
||||||
|
def serialize_heatmap(self, heatmap: HeatmapData) -> bytes:
|
||||||
|
"""Specialized serialization for heatmap data"""
|
||||||
|
try:
|
||||||
|
# Create optimized representation
|
||||||
|
heatmap_dict = {
|
||||||
|
'symbol': heatmap.symbol,
|
||||||
|
'timestamp': heatmap.timestamp.isoformat(),
|
||||||
|
'bucket_size': heatmap.bucket_size,
|
||||||
|
'points': [
|
||||||
|
{
|
||||||
|
'p': point.price, # price
|
||||||
|
'v': point.volume, # volume
|
||||||
|
'i': point.intensity, # intensity
|
||||||
|
's': point.side # side
|
||||||
|
}
|
||||||
|
for point in heatmap.data
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.serialize(heatmap_dict, 'json')
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Heatmap serialization error: {e}")
|
||||||
|
# Fallback to standard serialization
|
||||||
|
return self.serialize(heatmap, 'pickle')
|
||||||
|
|
||||||
|
def deserialize_heatmap(self, data: bytes) -> HeatmapData:
|
||||||
|
"""Specialized deserialization for heatmap data"""
|
||||||
|
try:
|
||||||
|
# Try optimized format first
|
||||||
|
heatmap_dict = self.deserialize(data, 'json')
|
||||||
|
|
||||||
|
if isinstance(heatmap_dict, dict) and 'points' in heatmap_dict:
|
||||||
|
from ..models.core import HeatmapData, HeatmapPoint
|
||||||
|
|
||||||
|
# Reconstruct heatmap points
|
||||||
|
points = []
|
||||||
|
for point_data in heatmap_dict['points']:
|
||||||
|
point = HeatmapPoint(
|
||||||
|
price=point_data['p'],
|
||||||
|
volume=point_data['v'],
|
||||||
|
intensity=point_data['i'],
|
||||||
|
side=point_data['s']
|
||||||
|
)
|
||||||
|
points.append(point)
|
||||||
|
|
||||||
|
# Create heatmap
|
||||||
|
heatmap = HeatmapData(
|
||||||
|
symbol=heatmap_dict['symbol'],
|
||||||
|
timestamp=datetime.fromisoformat(heatmap_dict['timestamp']),
|
||||||
|
bucket_size=heatmap_dict['bucket_size']
|
||||||
|
)
|
||||||
|
heatmap.data = points
|
||||||
|
|
||||||
|
return heatmap
|
||||||
|
else:
|
||||||
|
# Fallback to standard deserialization
|
||||||
|
return self.deserialize(data, 'pickle')
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Heatmap deserialization error: {e}")
|
||||||
|
# Final fallback
|
||||||
|
return self.deserialize(data, 'pickle')
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get serialization statistics"""
|
||||||
|
return self.serialization_stats.copy()
|
||||||
|
|
||||||
|
def reset_stats(self) -> None:
|
||||||
|
"""Reset serialization statistics"""
|
||||||
|
self.serialization_stats = {
|
||||||
|
'serialized': 0,
|
||||||
|
'deserialized': 0,
|
||||||
|
'compression_ratio': 0.0,
|
||||||
|
'errors': 0
|
||||||
|
}
|
||||||
|
logger.info("Serialization statistics reset")
|
||||||
122
COBY/caching/redis_manager.py
Normal file
122
COBY/caching/redis_manager.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
"""
|
||||||
|
Simple Redis manager stub.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any, Optional, List, Dict
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RedisManager:
|
||||||
|
"""Simple Redis manager stub"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.connected = False
|
||||||
|
self.cache = {} # In-memory cache as fallback
|
||||||
|
self.stats = {
|
||||||
|
'hits': 0,
|
||||||
|
'misses': 0,
|
||||||
|
'sets': 0,
|
||||||
|
'deletes': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
async def connect(self):
|
||||||
|
"""Connect to Redis (stub)"""
|
||||||
|
logger.info("Redis manager initialized (stub mode)")
|
||||||
|
self.connected = True
|
||||||
|
|
||||||
|
async def initialize(self):
|
||||||
|
"""Initialize Redis manager"""
|
||||||
|
await self.connect()
|
||||||
|
|
||||||
|
async def disconnect(self):
|
||||||
|
"""Disconnect from Redis"""
|
||||||
|
self.connected = False
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
"""Close Redis connection (alias for disconnect)"""
|
||||||
|
await self.disconnect()
|
||||||
|
|
||||||
|
def is_connected(self) -> bool:
|
||||||
|
"""Check if connected"""
|
||||||
|
return self.connected
|
||||||
|
|
||||||
|
async def ping(self) -> bool:
|
||||||
|
"""Ping Redis to check connection"""
|
||||||
|
return self.connected
|
||||||
|
|
||||||
|
async def set(self, key: str, value: Any, ttl: Optional[int] = None):
|
||||||
|
"""Set value in cache"""
|
||||||
|
self.cache[key] = value
|
||||||
|
self.stats['sets'] += 1
|
||||||
|
logger.debug(f"Cached key: {key}")
|
||||||
|
|
||||||
|
async def get(self, key: str) -> Optional[Any]:
|
||||||
|
"""Get value from cache"""
|
||||||
|
value = self.cache.get(key)
|
||||||
|
if value is not None:
|
||||||
|
self.stats['hits'] += 1
|
||||||
|
else:
|
||||||
|
self.stats['misses'] += 1
|
||||||
|
return value
|
||||||
|
|
||||||
|
async def delete(self, key: str):
|
||||||
|
"""Delete key from cache"""
|
||||||
|
self.cache.pop(key, None)
|
||||||
|
self.stats['deletes'] += 1
|
||||||
|
|
||||||
|
async def keys(self, pattern: str) -> List[str]:
|
||||||
|
"""Get keys matching pattern"""
|
||||||
|
if pattern.endswith('*'):
|
||||||
|
prefix = pattern[:-1]
|
||||||
|
return [key for key in self.cache.keys() if key.startswith(prefix)]
|
||||||
|
return [key for key in self.cache.keys() if key == pattern]
|
||||||
|
|
||||||
|
async def get_heatmap(self, symbol: str, exchange: Optional[str] = None) -> Optional[Any]:
|
||||||
|
"""Get heatmap data for symbol"""
|
||||||
|
key = f"heatmap:{symbol}:{exchange or 'consolidated'}"
|
||||||
|
return await self.get(key)
|
||||||
|
|
||||||
|
async def get_orderbook(self, symbol: str, exchange: str) -> Optional[Any]:
|
||||||
|
"""Get order book data for symbol on exchange"""
|
||||||
|
key = f"orderbook:{symbol}:{exchange}"
|
||||||
|
return await self.get(key)
|
||||||
|
|
||||||
|
async def get_metrics(self, symbol: str, exchange: str) -> Optional[Any]:
|
||||||
|
"""Get metrics data for symbol on exchange"""
|
||||||
|
key = f"metrics:{symbol}:{exchange}"
|
||||||
|
return await self.get(key)
|
||||||
|
|
||||||
|
async def get_exchange_status(self, exchange: str) -> Optional[Any]:
|
||||||
|
"""Get exchange status"""
|
||||||
|
key = f"st:{exchange}"
|
||||||
|
return await self.get(key)
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get cache statistics"""
|
||||||
|
total_requests = self.stats['hits'] + self.stats['misses']
|
||||||
|
hit_rate = self.stats['hits'] / max(1, total_requests)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'hits': self.stats['hits'],
|
||||||
|
'misses': self.stats['misses'],
|
||||||
|
'sets': self.stats['sets'],
|
||||||
|
'deletes': self.stats['deletes'],
|
||||||
|
'hit_rate': hit_rate,
|
||||||
|
'total_keys': len(self.cache),
|
||||||
|
'connected': self.connected
|
||||||
|
}
|
||||||
|
|
||||||
|
async def health_check(self) -> Dict[str, Any]:
|
||||||
|
"""Get Redis health status"""
|
||||||
|
return {
|
||||||
|
'connected': self.connected,
|
||||||
|
'total_keys': len(self.cache),
|
||||||
|
'memory_usage': 'N/A (stub mode)',
|
||||||
|
'uptime': 'N/A (stub mode)'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global instance
|
||||||
|
redis_manager = RedisManager()
|
||||||
165
COBY/config.py
Normal file
165
COBY/config.py
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
"""
|
||||||
|
Configuration management for the multi-exchange data aggregation system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DatabaseConfig:
|
||||||
|
"""Database configuration settings"""
|
||||||
|
host: str = os.getenv('DB_HOST', '192.168.0.10')
|
||||||
|
port: int = int(os.getenv('DB_PORT', '5432'))
|
||||||
|
name: str = os.getenv('DB_NAME', 'market_data')
|
||||||
|
user: str = os.getenv('DB_USER', 'market_user')
|
||||||
|
password: str = os.getenv('DB_PASSWORD', 'market_data_secure_pass_2024')
|
||||||
|
schema: str = os.getenv('DB_SCHEMA', 'market_data')
|
||||||
|
pool_size: int = int(os.getenv('DB_POOL_SIZE', '10'))
|
||||||
|
max_overflow: int = int(os.getenv('DB_MAX_OVERFLOW', '20'))
|
||||||
|
pool_timeout: int = int(os.getenv('DB_POOL_TIMEOUT', '30'))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class RedisConfig:
|
||||||
|
"""Redis configuration settings"""
|
||||||
|
host: str = os.getenv('REDIS_HOST', '192.168.0.10')
|
||||||
|
port: int = int(os.getenv('REDIS_PORT', '6379'))
|
||||||
|
password: str = os.getenv('REDIS_PASSWORD', 'market_data_redis_2024')
|
||||||
|
db: int = int(os.getenv('REDIS_DB', '0'))
|
||||||
|
max_connections: int = int(os.getenv('REDIS_MAX_CONNECTIONS', '50'))
|
||||||
|
socket_timeout: int = int(os.getenv('REDIS_SOCKET_TIMEOUT', '5'))
|
||||||
|
socket_connect_timeout: int = int(os.getenv('REDIS_CONNECT_TIMEOUT', '5'))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExchangeConfig:
|
||||||
|
"""Exchange configuration settings"""
|
||||||
|
exchanges: List[str] = field(default_factory=lambda: [
|
||||||
|
'binance', 'coinbase', 'kraken', 'bybit', 'okx',
|
||||||
|
'huobi', 'kucoin', 'gateio', 'bitfinex', 'mexc'
|
||||||
|
])
|
||||||
|
symbols: List[str] = field(default_factory=lambda: ['BTCUSDT', 'ETHUSDT'])
|
||||||
|
max_connections_per_exchange: int = int(os.getenv('MAX_CONNECTIONS_PER_EXCHANGE', '5'))
|
||||||
|
reconnect_delay: int = int(os.getenv('RECONNECT_DELAY', '5'))
|
||||||
|
max_reconnect_attempts: int = int(os.getenv('MAX_RECONNECT_ATTEMPTS', '10'))
|
||||||
|
heartbeat_interval: int = int(os.getenv('HEARTBEAT_INTERVAL', '30'))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AggregationConfig:
|
||||||
|
"""Data aggregation configuration"""
|
||||||
|
bucket_size: float = float(os.getenv('BUCKET_SIZE', '1.0')) # $1 USD buckets for all symbols
|
||||||
|
heatmap_depth: int = int(os.getenv('HEATMAP_DEPTH', '50')) # Number of price levels
|
||||||
|
update_frequency: float = float(os.getenv('UPDATE_FREQUENCY', '0.5')) # Seconds
|
||||||
|
volume_threshold: float = float(os.getenv('VOLUME_THRESHOLD', '0.01')) # Minimum volume
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PerformanceConfig:
|
||||||
|
"""Performance and optimization settings"""
|
||||||
|
data_buffer_size: int = int(os.getenv('DATA_BUFFER_SIZE', '10000'))
|
||||||
|
batch_write_size: int = int(os.getenv('BATCH_WRITE_SIZE', '1000'))
|
||||||
|
max_memory_usage: int = int(os.getenv('MAX_MEMORY_USAGE', '2048')) # MB
|
||||||
|
gc_threshold: float = float(os.getenv('GC_THRESHOLD', '0.8')) # 80% of max memory
|
||||||
|
processing_timeout: int = int(os.getenv('PROCESSING_TIMEOUT', '10')) # Seconds
|
||||||
|
max_queue_size: int = int(os.getenv('MAX_QUEUE_SIZE', '50000'))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class APIConfig:
|
||||||
|
"""API server configuration"""
|
||||||
|
host: str = os.getenv('API_HOST', '0.0.0.0')
|
||||||
|
port: int = int(os.getenv('API_PORT', '8080'))
|
||||||
|
websocket_port: int = int(os.getenv('WS_PORT', '8081'))
|
||||||
|
cors_origins: List[str] = field(default_factory=lambda: ['*'])
|
||||||
|
rate_limit: int = int(os.getenv('RATE_LIMIT', '100')) # Requests per minute
|
||||||
|
max_connections: int = int(os.getenv('MAX_WS_CONNECTIONS', '1000'))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LoggingConfig:
|
||||||
|
"""Logging configuration"""
|
||||||
|
level: str = os.getenv('LOG_LEVEL', 'INFO')
|
||||||
|
format: str = os.getenv('LOG_FORMAT', '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||||
|
file_path: str = os.getenv('LOG_FILE', 'logs/coby.log')
|
||||||
|
max_file_size: int = int(os.getenv('LOG_MAX_SIZE', '100')) # MB
|
||||||
|
backup_count: int = int(os.getenv('LOG_BACKUP_COUNT', '5'))
|
||||||
|
enable_correlation_id: bool = os.getenv('ENABLE_CORRELATION_ID', 'true').lower() == 'true'
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Config:
|
||||||
|
"""Main configuration class"""
|
||||||
|
database: DatabaseConfig = field(default_factory=DatabaseConfig)
|
||||||
|
redis: RedisConfig = field(default_factory=RedisConfig)
|
||||||
|
exchanges: ExchangeConfig = field(default_factory=ExchangeConfig)
|
||||||
|
aggregation: AggregationConfig = field(default_factory=AggregationConfig)
|
||||||
|
performance: PerformanceConfig = field(default_factory=PerformanceConfig)
|
||||||
|
api: APIConfig = field(default_factory=APIConfig)
|
||||||
|
logging: LoggingConfig = field(default_factory=LoggingConfig)
|
||||||
|
|
||||||
|
# Environment
|
||||||
|
environment: str = os.getenv('ENVIRONMENT', 'development')
|
||||||
|
debug: bool = os.getenv('DEBUG', 'false').lower() == 'true'
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Post-initialization validation and setup"""
|
||||||
|
# Create logs directory if it doesn't exist
|
||||||
|
log_dir = Path(self.logging.file_path).parent
|
||||||
|
log_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Validate bucket size
|
||||||
|
if self.aggregation.bucket_size <= 0:
|
||||||
|
raise ValueError("Bucket size must be positive")
|
||||||
|
|
||||||
|
def get_bucket_size(self, symbol: str = None) -> float:
|
||||||
|
"""Get bucket size (now universal $1 for all symbols)"""
|
||||||
|
return self.aggregation.bucket_size
|
||||||
|
|
||||||
|
def get_database_url(self) -> str:
|
||||||
|
"""Get database connection URL"""
|
||||||
|
return (f"postgresql://{self.database.user}:{self.database.password}"
|
||||||
|
f"@{self.database.host}:{self.database.port}/{self.database.name}")
|
||||||
|
|
||||||
|
def get_redis_url(self) -> str:
|
||||||
|
"""Get Redis connection URL"""
|
||||||
|
auth = f":{self.redis.password}@" if self.redis.password else ""
|
||||||
|
return f"redis://{auth}{self.redis.host}:{self.redis.port}/{self.redis.db}"
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert configuration to dictionary"""
|
||||||
|
return {
|
||||||
|
'database': {
|
||||||
|
'host': self.database.host,
|
||||||
|
'port': self.database.port,
|
||||||
|
'name': self.database.name,
|
||||||
|
'schema': self.database.schema,
|
||||||
|
},
|
||||||
|
'redis': {
|
||||||
|
'host': self.redis.host,
|
||||||
|
'port': self.redis.port,
|
||||||
|
'db': self.redis.db,
|
||||||
|
},
|
||||||
|
'exchanges': {
|
||||||
|
'count': len(self.exchanges.exchanges),
|
||||||
|
'symbols': self.exchanges.symbols,
|
||||||
|
},
|
||||||
|
'aggregation': {
|
||||||
|
'bucket_size': self.aggregation.bucket_size,
|
||||||
|
'heatmap_depth': self.aggregation.heatmap_depth,
|
||||||
|
},
|
||||||
|
'api': {
|
||||||
|
'host': self.api.host,
|
||||||
|
'port': self.api.port,
|
||||||
|
'websocket_port': self.api.websocket_port,
|
||||||
|
},
|
||||||
|
'environment': self.environment,
|
||||||
|
'debug': self.debug,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global configuration instance
|
||||||
|
config = Config()
|
||||||
33
COBY/connectors/__init__.py
Normal file
33
COBY/connectors/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
"""
|
||||||
|
Exchange connector implementations for the COBY system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
from .binance_connector import BinanceConnector
|
||||||
|
from .coinbase_connector import CoinbaseConnector
|
||||||
|
from .kraken_connector import KrakenConnector
|
||||||
|
from .bybit_connector import BybitConnector
|
||||||
|
from .okx_connector import OKXConnector
|
||||||
|
from .huobi_connector import HuobiConnector
|
||||||
|
from .kucoin_connector import KuCoinConnector
|
||||||
|
from .gateio_connector import GateIOConnector
|
||||||
|
from .bitfinex_connector import BitfinexConnector
|
||||||
|
from .mexc_connector import MEXCConnector
|
||||||
|
from .connection_manager import ConnectionManager
|
||||||
|
from .circuit_breaker import CircuitBreaker
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'BaseExchangeConnector',
|
||||||
|
'BinanceConnector',
|
||||||
|
'CoinbaseConnector',
|
||||||
|
'KrakenConnector',
|
||||||
|
'BybitConnector',
|
||||||
|
'OKXConnector',
|
||||||
|
'HuobiConnector',
|
||||||
|
'KuCoinConnector',
|
||||||
|
'GateIOConnector',
|
||||||
|
'BitfinexConnector',
|
||||||
|
'MEXCConnector',
|
||||||
|
'ConnectionManager',
|
||||||
|
'CircuitBreaker'
|
||||||
|
]
|
||||||
421
COBY/connectors/base_connector.py
Normal file
421
COBY/connectors/base_connector.py
Normal file
@@ -0,0 +1,421 @@
|
|||||||
|
"""
|
||||||
|
Base exchange connector with WebSocket connection management, circuit breaker pattern,
|
||||||
|
and comprehensive error handling.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
import websockets
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, List, Optional, Callable, Any
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from ..models.core import ConnectionStatus, OrderBookSnapshot, TradeEvent
|
||||||
|
from ..interfaces.exchange_connector import ExchangeConnector
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ConnectionError, ValidationError
|
||||||
|
from ..utils.timing import get_current_timestamp
|
||||||
|
from .connection_manager import ConnectionManager
|
||||||
|
from .circuit_breaker import CircuitBreaker, CircuitBreakerOpenError
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseExchangeConnector(ExchangeConnector):
|
||||||
|
"""
|
||||||
|
Base implementation of exchange connector with common functionality.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- WebSocket connection management
|
||||||
|
- Exponential backoff retry logic
|
||||||
|
- Circuit breaker pattern
|
||||||
|
- Health monitoring
|
||||||
|
- Message handling framework
|
||||||
|
- Subscription management
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, exchange_name: str, websocket_url: str):
|
||||||
|
"""
|
||||||
|
Initialize base exchange connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
exchange_name: Name of the exchange
|
||||||
|
websocket_url: WebSocket URL for the exchange
|
||||||
|
"""
|
||||||
|
super().__init__(exchange_name)
|
||||||
|
|
||||||
|
self.websocket_url = websocket_url
|
||||||
|
self.websocket: Optional[websockets.WebSocketServerProtocol] = None
|
||||||
|
self.subscriptions: Dict[str, List[str]] = {} # symbol -> [subscription_types]
|
||||||
|
self.message_handlers: Dict[str, Callable] = {}
|
||||||
|
|
||||||
|
# Connection management
|
||||||
|
self.connection_manager = ConnectionManager(
|
||||||
|
name=f"{exchange_name}_connector",
|
||||||
|
max_retries=10,
|
||||||
|
initial_delay=1.0,
|
||||||
|
max_delay=300.0,
|
||||||
|
health_check_interval=30
|
||||||
|
)
|
||||||
|
|
||||||
|
# Circuit breaker
|
||||||
|
self.circuit_breaker = CircuitBreaker(
|
||||||
|
failure_threshold=5,
|
||||||
|
recovery_timeout=60,
|
||||||
|
expected_exception=Exception,
|
||||||
|
name=f"{exchange_name}_circuit"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.message_count = 0
|
||||||
|
self.error_count = 0
|
||||||
|
self.last_message_time: Optional[datetime] = None
|
||||||
|
|
||||||
|
# Callbacks
|
||||||
|
self.data_callbacks = []
|
||||||
|
self.status_callbacks = []
|
||||||
|
|
||||||
|
# Setup callbacks
|
||||||
|
self.connection_manager.on_connect = self._on_connect
|
||||||
|
self.connection_manager.on_disconnect = self._on_disconnect
|
||||||
|
self.connection_manager.on_error = self._on_error
|
||||||
|
self.connection_manager.on_health_check = self._health_check
|
||||||
|
|
||||||
|
# Message processing
|
||||||
|
self._message_queue = asyncio.Queue(maxsize=10000)
|
||||||
|
self._message_processor_task: Optional[asyncio.Task] = None
|
||||||
|
|
||||||
|
logger.info(f"Base connector initialized for {exchange_name}")
|
||||||
|
|
||||||
|
async def connect(self) -> bool:
|
||||||
|
"""Establish connection to the exchange WebSocket"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
logger.info(f"Connecting to {self.exchange_name} at {self.websocket_url}")
|
||||||
|
|
||||||
|
return await self.connection_manager.connect(self._establish_websocket_connection)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to connect to {self.exchange_name}: {e}")
|
||||||
|
self._notify_status_callbacks(ConnectionStatus.ERROR)
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def disconnect(self) -> None:
|
||||||
|
"""Disconnect from the exchange WebSocket"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
logger.info(f"Disconnecting from {self.exchange_name}")
|
||||||
|
|
||||||
|
await self.connection_manager.disconnect(self._close_websocket_connection)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during disconnect from {self.exchange_name}: {e}")
|
||||||
|
|
||||||
|
async def _establish_websocket_connection(self) -> None:
|
||||||
|
"""Establish WebSocket connection"""
|
||||||
|
try:
|
||||||
|
# Use circuit breaker for connection
|
||||||
|
self.websocket = await self.circuit_breaker.call_async(
|
||||||
|
websockets.connect,
|
||||||
|
self.websocket_url,
|
||||||
|
ping_interval=20,
|
||||||
|
ping_timeout=10,
|
||||||
|
close_timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"WebSocket connected to {self.exchange_name}")
|
||||||
|
|
||||||
|
# Start message processing
|
||||||
|
await self._start_message_processing()
|
||||||
|
|
||||||
|
except CircuitBreakerOpenError as e:
|
||||||
|
logger.error(f"Circuit breaker open for {self.exchange_name}: {e}")
|
||||||
|
raise ConnectionError(f"Circuit breaker open: {e}", "CIRCUIT_BREAKER_OPEN")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"WebSocket connection failed for {self.exchange_name}: {e}")
|
||||||
|
raise ConnectionError(f"WebSocket connection failed: {e}", "WEBSOCKET_CONNECT_FAILED")
|
||||||
|
|
||||||
|
async def _close_websocket_connection(self) -> None:
|
||||||
|
"""Close WebSocket connection"""
|
||||||
|
try:
|
||||||
|
# Stop message processing
|
||||||
|
await self._stop_message_processing()
|
||||||
|
|
||||||
|
# Close WebSocket
|
||||||
|
if self.websocket:
|
||||||
|
await self.websocket.close()
|
||||||
|
self.websocket = None
|
||||||
|
|
||||||
|
logger.info(f"WebSocket disconnected from {self.exchange_name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error closing WebSocket for {self.exchange_name}: {e}")
|
||||||
|
|
||||||
|
async def _start_message_processing(self) -> None:
|
||||||
|
"""Start message processing tasks"""
|
||||||
|
if self._message_processor_task:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Start message processor
|
||||||
|
self._message_processor_task = asyncio.create_task(self._message_processor())
|
||||||
|
|
||||||
|
# Start message receiver
|
||||||
|
asyncio.create_task(self._message_receiver())
|
||||||
|
|
||||||
|
logger.debug(f"Message processing started for {self.exchange_name}")
|
||||||
|
|
||||||
|
async def _stop_message_processing(self) -> None:
|
||||||
|
"""Stop message processing tasks"""
|
||||||
|
if self._message_processor_task:
|
||||||
|
self._message_processor_task.cancel()
|
||||||
|
try:
|
||||||
|
await self._message_processor_task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
self._message_processor_task = None
|
||||||
|
|
||||||
|
logger.debug(f"Message processing stopped for {self.exchange_name}")
|
||||||
|
|
||||||
|
async def _message_receiver(self) -> None:
|
||||||
|
"""Receive messages from WebSocket"""
|
||||||
|
try:
|
||||||
|
while self.websocket and not self.websocket.closed:
|
||||||
|
try:
|
||||||
|
message = await asyncio.wait_for(self.websocket.recv(), timeout=30.0)
|
||||||
|
|
||||||
|
# Queue message for processing
|
||||||
|
try:
|
||||||
|
self._message_queue.put_nowait(message)
|
||||||
|
except asyncio.QueueFull:
|
||||||
|
logger.warning(f"Message queue full for {self.exchange_name}, dropping message")
|
||||||
|
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
# Send ping to keep connection alive
|
||||||
|
if self.websocket:
|
||||||
|
await self.websocket.ping()
|
||||||
|
except websockets.exceptions.ConnectionClosed:
|
||||||
|
logger.warning(f"WebSocket connection closed for {self.exchange_name}")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error receiving message from {self.exchange_name}: {e}")
|
||||||
|
self.error_count += 1
|
||||||
|
break
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Message receiver error for {self.exchange_name}: {e}")
|
||||||
|
finally:
|
||||||
|
# Mark as disconnected
|
||||||
|
self.connection_manager.is_connected = False
|
||||||
|
|
||||||
|
async def _message_processor(self) -> None:
|
||||||
|
"""Process messages from the queue"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Get message from queue
|
||||||
|
message = await self._message_queue.get()
|
||||||
|
|
||||||
|
# Process message
|
||||||
|
await self._process_message(message)
|
||||||
|
|
||||||
|
# Update statistics
|
||||||
|
self.message_count += 1
|
||||||
|
self.last_message_time = get_current_timestamp()
|
||||||
|
|
||||||
|
# Mark task as done
|
||||||
|
self._message_queue.task_done()
|
||||||
|
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing message for {self.exchange_name}: {e}")
|
||||||
|
self.error_count += 1
|
||||||
|
|
||||||
|
async def _process_message(self, message: str) -> None:
|
||||||
|
"""
|
||||||
|
Process incoming WebSocket message.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Raw message string
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse JSON message
|
||||||
|
data = json.loads(message)
|
||||||
|
|
||||||
|
# Determine message type and route to appropriate handler
|
||||||
|
message_type = self._get_message_type(data)
|
||||||
|
|
||||||
|
if message_type in self.message_handlers:
|
||||||
|
await self.message_handlers[message_type](data)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Unhandled message type '{message_type}' from {self.exchange_name}")
|
||||||
|
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.warning(f"Invalid JSON message from {self.exchange_name}: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing message from {self.exchange_name}: {e}")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from message data.
|
||||||
|
Override in subclasses for exchange-specific logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# Default implementation - override in subclasses
|
||||||
|
return data.get('type', 'unknown')
|
||||||
|
|
||||||
|
async def _send_message(self, message: Dict) -> bool:
|
||||||
|
"""
|
||||||
|
Send message to WebSocket.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Message to send
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if sent successfully, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if not self.websocket or self.websocket.closed:
|
||||||
|
logger.warning(f"Cannot send message to {self.exchange_name}: not connected")
|
||||||
|
return False
|
||||||
|
|
||||||
|
message_str = json.dumps(message)
|
||||||
|
await self.websocket.send(message_str)
|
||||||
|
|
||||||
|
logger.debug(f"Sent message to {self.exchange_name}: {message_str[:100]}...")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending message to {self.exchange_name}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Callback handlers
|
||||||
|
async def _on_connect(self) -> None:
|
||||||
|
"""Handle successful connection"""
|
||||||
|
self._notify_status_callbacks(ConnectionStatus.CONNECTED)
|
||||||
|
|
||||||
|
# Resubscribe to all previous subscriptions
|
||||||
|
await self._resubscribe_all()
|
||||||
|
|
||||||
|
async def _on_disconnect(self) -> None:
|
||||||
|
"""Handle disconnection"""
|
||||||
|
self._notify_status_callbacks(ConnectionStatus.DISCONNECTED)
|
||||||
|
|
||||||
|
async def _on_error(self, error: Exception) -> None:
|
||||||
|
"""Handle connection error"""
|
||||||
|
logger.error(f"Connection error for {self.exchange_name}: {error}")
|
||||||
|
self._notify_status_callbacks(ConnectionStatus.ERROR)
|
||||||
|
|
||||||
|
async def _health_check(self) -> bool:
|
||||||
|
"""Perform health check"""
|
||||||
|
try:
|
||||||
|
if not self.websocket or self.websocket.closed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if we've received messages recently
|
||||||
|
if self.last_message_time:
|
||||||
|
time_since_last_message = (get_current_timestamp() - self.last_message_time).total_seconds()
|
||||||
|
if time_since_last_message > 60: # No messages for 60 seconds
|
||||||
|
logger.warning(f"No messages received from {self.exchange_name} for {time_since_last_message}s")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Send ping
|
||||||
|
await self.websocket.ping()
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Health check failed for {self.exchange_name}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def _resubscribe_all(self) -> None:
|
||||||
|
"""Resubscribe to all previous subscriptions after reconnection"""
|
||||||
|
for symbol, subscription_types in self.subscriptions.items():
|
||||||
|
for sub_type in subscription_types:
|
||||||
|
try:
|
||||||
|
if sub_type == 'orderbook':
|
||||||
|
await self.subscribe_orderbook(symbol)
|
||||||
|
elif sub_type == 'trades':
|
||||||
|
await self.subscribe_trades(symbol)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to resubscribe to {sub_type} for {symbol}: {e}")
|
||||||
|
|
||||||
|
# Abstract methods that must be implemented by subclasses
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""Subscribe to order book updates - must be implemented by subclasses"""
|
||||||
|
raise NotImplementedError("Subclasses must implement subscribe_orderbook")
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""Subscribe to trade updates - must be implemented by subclasses"""
|
||||||
|
raise NotImplementedError("Subclasses must implement subscribe_trades")
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""Unsubscribe from order book updates - must be implemented by subclasses"""
|
||||||
|
raise NotImplementedError("Subclasses must implement unsubscribe_orderbook")
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""Unsubscribe from trade updates - must be implemented by subclasses"""
|
||||||
|
raise NotImplementedError("Subclasses must implement unsubscribe_trades")
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""Get available symbols - must be implemented by subclasses"""
|
||||||
|
raise NotImplementedError("Subclasses must implement get_symbols")
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""Normalize symbol format - must be implemented by subclasses"""
|
||||||
|
raise NotImplementedError("Subclasses must implement normalize_symbol")
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""Get order book snapshot - must be implemented by subclasses"""
|
||||||
|
raise NotImplementedError("Subclasses must implement get_orderbook_snapshot")
|
||||||
|
|
||||||
|
# Callback methods
|
||||||
|
def add_data_callback(self, callback):
|
||||||
|
"""Add callback for data updates"""
|
||||||
|
self.data_callbacks.append(callback)
|
||||||
|
|
||||||
|
def add_status_callback(self, callback):
|
||||||
|
"""Add callback for status updates"""
|
||||||
|
self.status_callbacks.append(callback)
|
||||||
|
|
||||||
|
async def _notify_data_callbacks(self, data):
|
||||||
|
"""Notify all data callbacks"""
|
||||||
|
for callback in self.data_callbacks:
|
||||||
|
try:
|
||||||
|
if hasattr(data, 'symbol'):
|
||||||
|
# Determine data type
|
||||||
|
if isinstance(data, OrderBookSnapshot):
|
||||||
|
await callback('orderbook', data)
|
||||||
|
elif isinstance(data, TradeEvent):
|
||||||
|
await callback('trade', data)
|
||||||
|
else:
|
||||||
|
await callback('data', data)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in data callback: {e}")
|
||||||
|
|
||||||
|
def _notify_status_callbacks(self, status):
|
||||||
|
"""Notify all status callbacks"""
|
||||||
|
for callback in self.status_callbacks:
|
||||||
|
try:
|
||||||
|
callback(self.exchange_name, status)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in status callback: {e}")
|
||||||
|
|
||||||
|
# Utility methods
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get connector statistics"""
|
||||||
|
return {
|
||||||
|
'exchange': self.exchange_name,
|
||||||
|
'connection_status': self.get_connection_status().value,
|
||||||
|
'is_connected': self.is_connected,
|
||||||
|
'message_count': self.message_count,
|
||||||
|
'error_count': self.error_count,
|
||||||
|
'last_message_time': self.last_message_time.isoformat() if self.last_message_time else None,
|
||||||
|
'subscriptions': dict(self.subscriptions),
|
||||||
|
'connection_manager': self.connection_manager.get_stats(),
|
||||||
|
'circuit_breaker': self.circuit_breaker.get_stats(),
|
||||||
|
'queue_size': self._message_queue.qsize()
|
||||||
|
}
|
||||||
493
COBY/connectors/binance_connector.py
Normal file
493
COBY/connectors/binance_connector.py
Normal file
@@ -0,0 +1,493 @@
|
|||||||
|
"""
|
||||||
|
Binance exchange connector implementation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from utils.logging import get_logger, set_correlation_id
|
||||||
|
from utils.exceptions import ValidationError
|
||||||
|
from utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from connectors.base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BinanceConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
Binance WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Order book depth streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
- Real-time data processing
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Binance WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://stream.binance.com:9443/ws"
|
||||||
|
API_URL = "https://api.binance.com/api/v3"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize Binance connector"""
|
||||||
|
super().__init__("binance", self.WEBSOCKET_URL)
|
||||||
|
|
||||||
|
# Binance-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'depthUpdate': self._handle_orderbook_update,
|
||||||
|
'trade': self._handle_trade_update,
|
||||||
|
'error': self._handle_error_message
|
||||||
|
})
|
||||||
|
|
||||||
|
# Stream management
|
||||||
|
self.active_streams: List[str] = []
|
||||||
|
self.stream_id = 1
|
||||||
|
|
||||||
|
logger.info("Binance connector initialized")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from Binance message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# Binance uses 'e' field for event type
|
||||||
|
if 'e' in data:
|
||||||
|
return data['e']
|
||||||
|
|
||||||
|
# Handle error messages
|
||||||
|
if 'error' in data:
|
||||||
|
return 'error'
|
||||||
|
|
||||||
|
# Handle subscription confirmations
|
||||||
|
if 'result' in data and 'id' in data:
|
||||||
|
return 'subscription_response'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to Binance format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Binance symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
# Binance uses uppercase symbols without separators
|
||||||
|
normalized = symbol.upper().replace('-', '').replace('/', '')
|
||||||
|
|
||||||
|
# Validate symbol format
|
||||||
|
if not validate_symbol(normalized):
|
||||||
|
raise ValidationError(f"Invalid symbol format: {symbol}", "INVALID_SYMBOL")
|
||||||
|
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book depth updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
stream_name = f"{normalized_symbol.lower()}@depth@100ms"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"method": "SUBSCRIBE",
|
||||||
|
"params": [stream_name],
|
||||||
|
"id": self.stream_id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.active_streams.append(stream_name)
|
||||||
|
self.stream_id += 1
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} on Binance")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on Binance")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
stream_name = f"{normalized_symbol.lower()}@trade"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"method": "SUBSCRIBE",
|
||||||
|
"params": [stream_name],
|
||||||
|
"id": self.stream_id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.active_streams.append(stream_name)
|
||||||
|
self.stream_id += 1
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} on Binance")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on Binance")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
stream_name = f"{normalized_symbol.lower()}@depth@100ms"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"method": "UNSUBSCRIBE",
|
||||||
|
"params": [stream_name],
|
||||||
|
"id": self.stream_id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
if stream_name in self.active_streams:
|
||||||
|
self.active_streams.remove(stream_name)
|
||||||
|
|
||||||
|
self.stream_id += 1
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} on Binance")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on Binance")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
stream_name = f"{normalized_symbol.lower()}@trade"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"method": "UNSUBSCRIBE",
|
||||||
|
"params": [stream_name],
|
||||||
|
"id": self.stream_id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
if stream_name in self.active_streams:
|
||||||
|
self.active_streams.remove(stream_name)
|
||||||
|
|
||||||
|
self.stream_id += 1
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} on Binance")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on Binance")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from Binance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{self.API_URL}/exchangeInfo") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
symbols = [
|
||||||
|
symbol_info['symbol']
|
||||||
|
for symbol_info in data.get('symbols', [])
|
||||||
|
if symbol_info.get('status') == 'TRADING'
|
||||||
|
]
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from Binance")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from Binance: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from Binance: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from Binance REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Binance supports depths: 5, 10, 20, 50, 100, 500, 1000, 5000
|
||||||
|
valid_depths = [5, 10, 20, 50, 100, 500, 1000, 5000]
|
||||||
|
api_depth = min(valid_depths, key=lambda x: abs(x - depth))
|
||||||
|
|
||||||
|
url = f"{self.API_URL}/depth"
|
||||||
|
params = {
|
||||||
|
'symbol': normalized_symbol,
|
||||||
|
'limit': api_depth
|
||||||
|
}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
return self._parse_orderbook_snapshot(data, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse Binance order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw Binance order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('lastUpdateId')
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book depth update from Binance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract symbol from stream name
|
||||||
|
stream = data.get('s', '').upper()
|
||||||
|
if not stream:
|
||||||
|
logger.warning("Order book update missing symbol")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('b', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('a', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=stream,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(data.get('E', 0) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('u') # Final update ID
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {stream}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade update from Binance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract trade data
|
||||||
|
symbol = data.get('s', '').upper()
|
||||||
|
if not symbol:
|
||||||
|
logger.warning("Trade update missing symbol")
|
||||||
|
return
|
||||||
|
|
||||||
|
price = float(data.get('p', 0))
|
||||||
|
size = float(data.get('q', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(size):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, size={size}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine side (Binance uses 'm' field - true if buyer is market maker)
|
||||||
|
is_buyer_maker = data.get('m', False)
|
||||||
|
side = 'sell' if is_buyer_maker else 'buy'
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(data.get('T', 0) / 1000, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=size,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(data.get('t', ''))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {size} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_error_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle error message from Binance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Error message data
|
||||||
|
"""
|
||||||
|
error_code = data.get('code', 'unknown')
|
||||||
|
error_msg = data.get('msg', 'Unknown error')
|
||||||
|
|
||||||
|
logger.error(f"Binance error {error_code}: {error_msg}")
|
||||||
|
|
||||||
|
# Handle specific error codes
|
||||||
|
if error_code == -1121: # Invalid symbol
|
||||||
|
logger.error("Invalid symbol error - check symbol format")
|
||||||
|
elif error_code == -1130: # Invalid listen key
|
||||||
|
logger.error("Invalid listen key - may need to reconnect")
|
||||||
|
|
||||||
|
def get_binance_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get Binance-specific statistics"""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
binance_stats = {
|
||||||
|
'active_streams': len(self.active_streams),
|
||||||
|
'stream_list': self.active_streams.copy(),
|
||||||
|
'next_stream_id': self.stream_id
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(binance_stats)
|
||||||
|
return base_stats
|
||||||
454
COBY/connectors/bitfinex_connector.py
Normal file
454
COBY/connectors/bitfinex_connector.py
Normal file
@@ -0,0 +1,454 @@
|
|||||||
|
"""
|
||||||
|
Bitfinex exchange connector implementation.
|
||||||
|
Supports WebSocket connections to Bitfinex with proper channel subscription management.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BitfinexConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
Bitfinex WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Channel subscription management
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Bitfinex WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://api-pub.bitfinex.com/ws/2"
|
||||||
|
API_URL = "https://api-pub.bitfinex.com"
|
||||||
|
|
||||||
|
def __init__(self, api_key: str = None, api_secret: str = None):
|
||||||
|
"""Initialize Bitfinex connector."""
|
||||||
|
super().__init__("bitfinex", self.WEBSOCKET_URL)
|
||||||
|
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
|
||||||
|
# Bitfinex-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'subscribed': self._handle_subscription_response,
|
||||||
|
'unsubscribed': self._handle_unsubscription_response,
|
||||||
|
'error': self._handle_error_message,
|
||||||
|
'info': self._handle_info_message,
|
||||||
|
'data': self._handle_data_message
|
||||||
|
})
|
||||||
|
|
||||||
|
# Channel management
|
||||||
|
self.channels = {} # channel_id -> channel_info
|
||||||
|
self.subscribed_symbols = set()
|
||||||
|
|
||||||
|
logger.info("Bitfinex connector initialized")
|
||||||
|
|
||||||
|
def _get_message_type(self, data) -> str:
|
||||||
|
"""Determine message type from Bitfinex message data."""
|
||||||
|
if isinstance(data, dict):
|
||||||
|
if 'event' in data:
|
||||||
|
return data['event']
|
||||||
|
elif 'error' in data:
|
||||||
|
return 'error'
|
||||||
|
elif isinstance(data, list) and len(data) >= 2:
|
||||||
|
# Data message format: [CHANNEL_ID, data]
|
||||||
|
return 'data'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""Normalize symbol to Bitfinex format."""
|
||||||
|
# Bitfinex uses 't' prefix for trading pairs
|
||||||
|
if symbol.upper() == 'BTCUSDT':
|
||||||
|
return 'tBTCUSD'
|
||||||
|
elif symbol.upper() == 'ETHUSDT':
|
||||||
|
return 'tETHUSD'
|
||||||
|
elif symbol.upper().endswith('USDT'):
|
||||||
|
base = symbol[:-4].upper()
|
||||||
|
return f"t{base}USD"
|
||||||
|
else:
|
||||||
|
# Generic conversion
|
||||||
|
normalized = symbol.upper().replace('-', '').replace('/', '')
|
||||||
|
return f"t{normalized}" if not normalized.startswith('t') else normalized
|
||||||
|
|
||||||
|
def _denormalize_symbol(self, bitfinex_symbol: str) -> str:
|
||||||
|
"""Convert Bitfinex symbol back to standard format."""
|
||||||
|
if bitfinex_symbol.startswith('t'):
|
||||||
|
symbol = bitfinex_symbol[1:] # Remove 't' prefix
|
||||||
|
if symbol.endswith('USD'):
|
||||||
|
return symbol[:-3] + 'USDT'
|
||||||
|
return symbol
|
||||||
|
return bitfinex_symbol
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""Subscribe to order book updates for a symbol."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
bitfinex_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
subscription_msg = {
|
||||||
|
"event": "subscribe",
|
||||||
|
"channel": "book",
|
||||||
|
"symbol": bitfinex_symbol,
|
||||||
|
"prec": "P0",
|
||||||
|
"freq": "F0",
|
||||||
|
"len": "25"
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_symbols.add(bitfinex_symbol)
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({bitfinex_symbol}) on Bitfinex")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on Bitfinex")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""Subscribe to trade updates for a symbol."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
bitfinex_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
subscription_msg = {
|
||||||
|
"event": "subscribe",
|
||||||
|
"channel": "trades",
|
||||||
|
"symbol": bitfinex_symbol
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_symbols.add(bitfinex_symbol)
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({bitfinex_symbol}) on Bitfinex")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on Bitfinex")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""Unsubscribe from order book updates."""
|
||||||
|
try:
|
||||||
|
bitfinex_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Find channel ID for this symbol's order book
|
||||||
|
channel_id = None
|
||||||
|
for cid, info in self.channels.items():
|
||||||
|
if info.get('channel') == 'book' and info.get('symbol') == bitfinex_symbol:
|
||||||
|
channel_id = cid
|
||||||
|
break
|
||||||
|
|
||||||
|
if channel_id:
|
||||||
|
unsubscription_msg = {
|
||||||
|
"event": "unsubscribe",
|
||||||
|
"chanId": channel_id
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_symbols.discard(bitfinex_symbol)
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} on Bitfinex")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on Bitfinex")
|
||||||
|
else:
|
||||||
|
logger.warning(f"No active order book subscription found for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""Unsubscribe from trade updates."""
|
||||||
|
try:
|
||||||
|
bitfinex_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Find channel ID for this symbol's trades
|
||||||
|
channel_id = None
|
||||||
|
for cid, info in self.channels.items():
|
||||||
|
if info.get('channel') == 'trades' and info.get('symbol') == bitfinex_symbol:
|
||||||
|
channel_id = cid
|
||||||
|
break
|
||||||
|
|
||||||
|
if channel_id:
|
||||||
|
unsubscription_msg = {
|
||||||
|
"event": "unsubscribe",
|
||||||
|
"chanId": channel_id
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_symbols.discard(bitfinex_symbol)
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} on Bitfinex")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on Bitfinex")
|
||||||
|
else:
|
||||||
|
logger.warning(f"No active trades subscription found for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""Get available symbols from Bitfinex."""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{self.API_URL}/v1/symbols") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
symbols = [self._denormalize_symbol(f"t{s.upper()}") for s in data]
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from Bitfinex")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from Bitfinex: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from Bitfinex: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""Get order book snapshot from Bitfinex REST API."""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
bitfinex_symbol = self.normalize_symbol(symbol)
|
||||||
|
url = f"{self.API_URL}/v2/book/{bitfinex_symbol}/P0"
|
||||||
|
params = {'len': min(depth, 100)}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
return self._parse_orderbook_snapshot(data, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: List, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""Parse Bitfinex order book data."""
|
||||||
|
try:
|
||||||
|
bids = []
|
||||||
|
asks = []
|
||||||
|
|
||||||
|
for level in data:
|
||||||
|
price = float(level[0])
|
||||||
|
count = int(level[1])
|
||||||
|
amount = float(level[2])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(abs(amount)):
|
||||||
|
if amount > 0:
|
||||||
|
bids.append(PriceLevel(price=price, size=amount))
|
||||||
|
else:
|
||||||
|
asks.append(PriceLevel(price=price, size=abs(amount)))
|
||||||
|
|
||||||
|
return OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_subscription_response(self, data: Dict) -> None:
|
||||||
|
"""Handle subscription response."""
|
||||||
|
channel_id = data.get('chanId')
|
||||||
|
channel = data.get('channel')
|
||||||
|
symbol = data.get('symbol', '')
|
||||||
|
|
||||||
|
if channel_id:
|
||||||
|
self.channels[channel_id] = {
|
||||||
|
'channel': channel,
|
||||||
|
'symbol': symbol
|
||||||
|
}
|
||||||
|
logger.info(f"Bitfinex subscription confirmed: {channel} for {symbol} (ID: {channel_id})")
|
||||||
|
|
||||||
|
async def _handle_unsubscription_response(self, data: Dict) -> None:
|
||||||
|
"""Handle unsubscription response."""
|
||||||
|
channel_id = data.get('chanId')
|
||||||
|
if channel_id in self.channels:
|
||||||
|
del self.channels[channel_id]
|
||||||
|
logger.info(f"Bitfinex unsubscription confirmed for channel {channel_id}")
|
||||||
|
|
||||||
|
async def _handle_error_message(self, data: Dict) -> None:
|
||||||
|
"""Handle error message."""
|
||||||
|
error_msg = data.get('msg', 'Unknown error')
|
||||||
|
error_code = data.get('code', 'unknown')
|
||||||
|
logger.error(f"Bitfinex error {error_code}: {error_msg}")
|
||||||
|
|
||||||
|
async def _handle_info_message(self, data: Dict) -> None:
|
||||||
|
"""Handle info message."""
|
||||||
|
logger.info(f"Bitfinex info: {data}")
|
||||||
|
|
||||||
|
async def _handle_data_message(self, data: List) -> None:
|
||||||
|
"""Handle data message from Bitfinex."""
|
||||||
|
try:
|
||||||
|
if len(data) < 2:
|
||||||
|
return
|
||||||
|
|
||||||
|
channel_id = data[0]
|
||||||
|
message_data = data[1]
|
||||||
|
|
||||||
|
if channel_id not in self.channels:
|
||||||
|
logger.warning(f"Received data for unknown channel: {channel_id}")
|
||||||
|
return
|
||||||
|
|
||||||
|
channel_info = self.channels[channel_id]
|
||||||
|
channel_type = channel_info.get('channel')
|
||||||
|
symbol = channel_info.get('symbol', '')
|
||||||
|
|
||||||
|
if channel_type == 'book':
|
||||||
|
await self._handle_orderbook_data(message_data, symbol)
|
||||||
|
elif channel_type == 'trades':
|
||||||
|
await self._handle_trades_data(message_data, symbol)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling data message: {e}")
|
||||||
|
|
||||||
|
async def _handle_orderbook_data(self, data, symbol: str) -> None:
|
||||||
|
"""Handle order book data from Bitfinex."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
if not isinstance(data, list):
|
||||||
|
return
|
||||||
|
|
||||||
|
standard_symbol = self._denormalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Handle snapshot vs update
|
||||||
|
if len(data) > 0 and isinstance(data[0], list):
|
||||||
|
# Snapshot - array of [price, count, amount]
|
||||||
|
bids = []
|
||||||
|
asks = []
|
||||||
|
|
||||||
|
for level in data:
|
||||||
|
if len(level) >= 3:
|
||||||
|
price = float(level[0])
|
||||||
|
count = int(level[1])
|
||||||
|
amount = float(level[2])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(abs(amount)):
|
||||||
|
if amount > 0:
|
||||||
|
bids.append(PriceLevel(price=price, size=amount))
|
||||||
|
else:
|
||||||
|
asks.append(PriceLevel(price=price, size=abs(amount)))
|
||||||
|
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=standard_symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks
|
||||||
|
)
|
||||||
|
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
logger.debug(f"Processed order book snapshot for {standard_symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book data: {e}")
|
||||||
|
|
||||||
|
async def _handle_trades_data(self, data, symbol: str) -> None:
|
||||||
|
"""Handle trades data from Bitfinex."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
if not isinstance(data, list):
|
||||||
|
return
|
||||||
|
|
||||||
|
standard_symbol = self._denormalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Handle snapshot vs update
|
||||||
|
if len(data) > 0 and isinstance(data[0], list):
|
||||||
|
# Snapshot - array of trades
|
||||||
|
for trade_data in data:
|
||||||
|
await self._process_single_trade(trade_data, standard_symbol)
|
||||||
|
elif len(data) >= 4:
|
||||||
|
# Single trade update
|
||||||
|
await self._process_single_trade(data, standard_symbol)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trades data: {e}")
|
||||||
|
|
||||||
|
async def _process_single_trade(self, trade_data: List, symbol: str) -> None:
|
||||||
|
"""Process a single trade from Bitfinex."""
|
||||||
|
try:
|
||||||
|
if len(trade_data) < 4:
|
||||||
|
return
|
||||||
|
|
||||||
|
trade_id = str(trade_data[0])
|
||||||
|
timestamp = int(trade_data[1]) / 1000 # Convert to seconds
|
||||||
|
amount = float(trade_data[2])
|
||||||
|
price = float(trade_data[3])
|
||||||
|
|
||||||
|
if not validate_price(price) or not validate_volume(abs(amount)):
|
||||||
|
return
|
||||||
|
|
||||||
|
side = 'buy' if amount > 0 else 'sell'
|
||||||
|
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(timestamp, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=abs(amount),
|
||||||
|
side=side,
|
||||||
|
trade_id=trade_id
|
||||||
|
)
|
||||||
|
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {abs(amount)} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing single trade: {e}")
|
||||||
|
|
||||||
|
def get_bitfinex_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get Bitfinex-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
bitfinex_stats = {
|
||||||
|
'active_channels': len(self.channels),
|
||||||
|
'subscribed_symbols': list(self.subscribed_symbols),
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret)
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(bitfinex_stats)
|
||||||
|
return base_stats
|
||||||
605
COBY/connectors/bybit_connector.py
Normal file
605
COBY/connectors/bybit_connector.py
Normal file
@@ -0,0 +1,605 @@
|
|||||||
|
"""
|
||||||
|
Bybit exchange connector implementation.
|
||||||
|
Supports WebSocket connections to Bybit with unified trading account support.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import hmac
|
||||||
|
import hashlib
|
||||||
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BybitConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
Bybit WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Unified Trading Account (UTA) WebSocket streams
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
- Authentication for private channels
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Bybit WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://stream.bybit.com/v5/public/spot"
|
||||||
|
WEBSOCKET_PRIVATE_URL = "wss://stream.bybit.com/v5/private"
|
||||||
|
TESTNET_URL = "wss://stream-testnet.bybit.com/v5/public/spot"
|
||||||
|
API_URL = "https://api.bybit.com"
|
||||||
|
|
||||||
|
def __init__(self, use_testnet: bool = False, api_key: str = None, api_secret: str = None):
|
||||||
|
"""
|
||||||
|
Initialize Bybit connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_testnet: Whether to use testnet environment
|
||||||
|
api_key: API key for authentication (optional)
|
||||||
|
api_secret: API secret for authentication (optional)
|
||||||
|
"""
|
||||||
|
websocket_url = self.TESTNET_URL if use_testnet else self.WEBSOCKET_URL
|
||||||
|
super().__init__("bybit", websocket_url)
|
||||||
|
|
||||||
|
# Authentication credentials (optional)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
self.use_testnet = use_testnet
|
||||||
|
|
||||||
|
# Bybit-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'orderbook': self._handle_orderbook_update,
|
||||||
|
'publicTrade': self._handle_trade_update,
|
||||||
|
'pong': self._handle_pong,
|
||||||
|
'subscribe': self._handle_subscription_response
|
||||||
|
})
|
||||||
|
|
||||||
|
# Subscription tracking
|
||||||
|
self.subscribed_topics = set()
|
||||||
|
self.req_id = 1
|
||||||
|
|
||||||
|
logger.info(f"Bybit connector initialized ({'testnet' if use_testnet else 'mainnet'})")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from Bybit message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# Bybit V5 API message format
|
||||||
|
if 'topic' in data:
|
||||||
|
topic = data['topic']
|
||||||
|
if 'orderbook' in topic:
|
||||||
|
return 'orderbook'
|
||||||
|
elif 'publicTrade' in topic:
|
||||||
|
return 'publicTrade'
|
||||||
|
else:
|
||||||
|
return topic
|
||||||
|
elif 'op' in data:
|
||||||
|
return data['op'] # 'subscribe', 'unsubscribe', 'ping', 'pong'
|
||||||
|
elif 'success' in data:
|
||||||
|
return 'response'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to Bybit format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Bybit symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
# Bybit uses uppercase symbols without separators (same as Binance)
|
||||||
|
normalized = symbol.upper().replace('-', '').replace('/', '')
|
||||||
|
|
||||||
|
# Validate symbol format
|
||||||
|
if not validate_symbol(normalized):
|
||||||
|
raise ValidationError(f"Invalid symbol format: {symbol}", "INVALID_SYMBOL")
|
||||||
|
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"orderbook.50.{normalized_symbol}"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"op": "subscribe",
|
||||||
|
"args": [topic],
|
||||||
|
"req_id": str(self.req_id)
|
||||||
|
}
|
||||||
|
self.req_id += 1
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_topics.add(topic)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} on Bybit")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on Bybit")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"publicTrade.{normalized_symbol}"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"op": "subscribe",
|
||||||
|
"args": [topic],
|
||||||
|
"req_id": str(self.req_id)
|
||||||
|
}
|
||||||
|
self.req_id += 1
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_topics.add(topic)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} on Bybit")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on Bybit")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"orderbook.50.{normalized_symbol}"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"op": "unsubscribe",
|
||||||
|
"args": [topic],
|
||||||
|
"req_id": str(self.req_id)
|
||||||
|
}
|
||||||
|
self.req_id += 1
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_topics.discard(topic)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} on Bybit")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on Bybit")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"publicTrade.{normalized_symbol}"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"op": "unsubscribe",
|
||||||
|
"args": [topic],
|
||||||
|
"req_id": str(self.req_id)
|
||||||
|
}
|
||||||
|
self.req_id += 1
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_topics.discard(topic)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} on Bybit")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on Bybit")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from Bybit.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
api_url = "https://api-testnet.bybit.com" if self.use_testnet else self.API_URL
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{api_url}/v5/market/instruments-info",
|
||||||
|
params={"category": "spot"}) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('retCode') != 0:
|
||||||
|
logger.error(f"Bybit API error: {data.get('retMsg')}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
symbols = []
|
||||||
|
instruments = data.get('result', {}).get('list', [])
|
||||||
|
|
||||||
|
for instrument in instruments:
|
||||||
|
if instrument.get('status') == 'Trading':
|
||||||
|
symbol = instrument.get('symbol', '')
|
||||||
|
symbols.append(symbol)
|
||||||
|
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from Bybit")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from Bybit: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from Bybit: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from Bybit REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
normalized_symbol = self.normalize_symbol(symbol)
|
||||||
|
api_url = "https://api-testnet.bybit.com" if self.use_testnet else self.API_URL
|
||||||
|
|
||||||
|
# Bybit supports depths: 1, 25, 50, 100, 200
|
||||||
|
valid_depths = [1, 25, 50, 100, 200]
|
||||||
|
api_depth = min(valid_depths, key=lambda x: abs(x - depth))
|
||||||
|
|
||||||
|
url = f"{api_url}/v5/market/orderbook"
|
||||||
|
params = {
|
||||||
|
'category': 'spot',
|
||||||
|
'symbol': normalized_symbol,
|
||||||
|
'limit': api_depth
|
||||||
|
}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('retCode') != 0:
|
||||||
|
logger.error(f"Bybit API error: {data.get('retMsg')}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
result = data.get('result', {})
|
||||||
|
return self._parse_orderbook_snapshot(result, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse Bybit order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw Bybit order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('b', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('a', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=int(data.get('u', 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book update from Bybit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract symbol from topic
|
||||||
|
topic = data.get('topic', '')
|
||||||
|
if not topic.startswith('orderbook'):
|
||||||
|
logger.warning("Invalid orderbook topic")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract symbol from topic: orderbook.50.BTCUSDT
|
||||||
|
parts = topic.split('.')
|
||||||
|
if len(parts) < 3:
|
||||||
|
logger.warning("Invalid orderbook topic format")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = parts[2]
|
||||||
|
orderbook_data = data.get('data', {})
|
||||||
|
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in orderbook_data.get('b', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in orderbook_data.get('a', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=int(orderbook_data.get('u', 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade update from Bybit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract symbol from topic
|
||||||
|
topic = data.get('topic', '')
|
||||||
|
if not topic.startswith('publicTrade'):
|
||||||
|
logger.warning("Invalid trade topic")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract symbol from topic: publicTrade.BTCUSDT
|
||||||
|
parts = topic.split('.')
|
||||||
|
if len(parts) < 2:
|
||||||
|
logger.warning("Invalid trade topic format")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = parts[1]
|
||||||
|
trades_data = data.get('data', [])
|
||||||
|
|
||||||
|
# Process each trade
|
||||||
|
for trade_data in trades_data:
|
||||||
|
price = float(trade_data.get('p', 0))
|
||||||
|
size = float(trade_data.get('v', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(size):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, size={size}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Determine side (Bybit uses 'S' field)
|
||||||
|
side_flag = trade_data.get('S', '')
|
||||||
|
side = 'buy' if side_flag == 'Buy' else 'sell'
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(trade_data.get('T', 0)) / 1000, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=size,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(trade_data.get('i', ''))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {size} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_subscription_response(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle subscription response from Bybit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Subscription response data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
success = data.get('success', False)
|
||||||
|
req_id = data.get('req_id', '')
|
||||||
|
op = data.get('op', '')
|
||||||
|
|
||||||
|
if success:
|
||||||
|
logger.info(f"Bybit {op} successful (req_id: {req_id})")
|
||||||
|
else:
|
||||||
|
ret_msg = data.get('ret_msg', 'Unknown error')
|
||||||
|
logger.error(f"Bybit {op} failed: {ret_msg} (req_id: {req_id})")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling subscription response: {e}")
|
||||||
|
|
||||||
|
async def _handle_pong(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle pong response from Bybit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Pong response data
|
||||||
|
"""
|
||||||
|
logger.debug("Received Bybit pong")
|
||||||
|
|
||||||
|
def _get_auth_signature(self, timestamp: str, recv_window: str = "5000") -> str:
|
||||||
|
"""
|
||||||
|
Generate authentication signature for Bybit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
timestamp: Current timestamp
|
||||||
|
recv_window: Receive window
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Authentication signature
|
||||||
|
"""
|
||||||
|
if not self.api_key or not self.api_secret:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
param_str = f"GET/realtime{timestamp}{self.api_key}{recv_window}"
|
||||||
|
signature = hmac.new(
|
||||||
|
self.api_secret.encode('utf-8'),
|
||||||
|
param_str.encode('utf-8'),
|
||||||
|
hashlib.sha256
|
||||||
|
).hexdigest()
|
||||||
|
|
||||||
|
return signature
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating auth signature: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
async def _send_ping(self) -> None:
|
||||||
|
"""Send ping to keep connection alive."""
|
||||||
|
try:
|
||||||
|
ping_msg = {
|
||||||
|
"op": "ping",
|
||||||
|
"req_id": str(self.req_id)
|
||||||
|
}
|
||||||
|
self.req_id += 1
|
||||||
|
|
||||||
|
await self._send_message(ping_msg)
|
||||||
|
logger.debug("Sent ping to Bybit")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending ping: {e}")
|
||||||
|
|
||||||
|
def get_bybit_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get Bybit-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
bybit_stats = {
|
||||||
|
'subscribed_topics': list(self.subscribed_topics),
|
||||||
|
'use_testnet': self.use_testnet,
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret),
|
||||||
|
'next_req_id': self.req_id
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(bybit_stats)
|
||||||
|
return base_stats
|
||||||
206
COBY/connectors/circuit_breaker.py
Normal file
206
COBY/connectors/circuit_breaker.py
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
"""
|
||||||
|
Circuit breaker pattern implementation for exchange connections.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Optional, Callable, Any
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CircuitState(Enum):
|
||||||
|
"""Circuit breaker states"""
|
||||||
|
CLOSED = "closed" # Normal operation
|
||||||
|
OPEN = "open" # Circuit is open, calls fail fast
|
||||||
|
HALF_OPEN = "half_open" # Testing if service is back
|
||||||
|
|
||||||
|
|
||||||
|
class CircuitBreaker:
|
||||||
|
"""
|
||||||
|
Circuit breaker to prevent cascading failures in exchange connections.
|
||||||
|
|
||||||
|
States:
|
||||||
|
- CLOSED: Normal operation, requests pass through
|
||||||
|
- OPEN: Circuit is open, requests fail immediately
|
||||||
|
- HALF_OPEN: Testing if service is back, limited requests allowed
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
failure_threshold: int = 5,
|
||||||
|
recovery_timeout: int = 60,
|
||||||
|
expected_exception: type = Exception,
|
||||||
|
name: str = "CircuitBreaker"
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize circuit breaker.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
failure_threshold: Number of failures before opening circuit
|
||||||
|
recovery_timeout: Time in seconds before attempting recovery
|
||||||
|
expected_exception: Exception type that triggers circuit breaker
|
||||||
|
name: Name for logging purposes
|
||||||
|
"""
|
||||||
|
self.failure_threshold = failure_threshold
|
||||||
|
self.recovery_timeout = recovery_timeout
|
||||||
|
self.expected_exception = expected_exception
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
# State tracking
|
||||||
|
self._state = CircuitState.CLOSED
|
||||||
|
self._failure_count = 0
|
||||||
|
self._last_failure_time: Optional[float] = None
|
||||||
|
self._next_attempt_time: Optional[float] = None
|
||||||
|
|
||||||
|
logger.info(f"Circuit breaker '{name}' initialized with threshold={failure_threshold}")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def state(self) -> CircuitState:
|
||||||
|
"""Get current circuit state"""
|
||||||
|
return self._state
|
||||||
|
|
||||||
|
@property
|
||||||
|
def failure_count(self) -> int:
|
||||||
|
"""Get current failure count"""
|
||||||
|
return self._failure_count
|
||||||
|
|
||||||
|
def _should_attempt_reset(self) -> bool:
|
||||||
|
"""Check if we should attempt to reset the circuit"""
|
||||||
|
if self._state != CircuitState.OPEN:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if self._next_attempt_time is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return time.time() >= self._next_attempt_time
|
||||||
|
|
||||||
|
def _on_success(self) -> None:
|
||||||
|
"""Handle successful operation"""
|
||||||
|
if self._state == CircuitState.HALF_OPEN:
|
||||||
|
logger.info(f"Circuit breaker '{self.name}' reset to CLOSED after successful test")
|
||||||
|
self._state = CircuitState.CLOSED
|
||||||
|
|
||||||
|
self._failure_count = 0
|
||||||
|
self._last_failure_time = None
|
||||||
|
self._next_attempt_time = None
|
||||||
|
|
||||||
|
def _on_failure(self) -> None:
|
||||||
|
"""Handle failed operation"""
|
||||||
|
self._failure_count += 1
|
||||||
|
self._last_failure_time = time.time()
|
||||||
|
|
||||||
|
if self._state == CircuitState.HALF_OPEN:
|
||||||
|
# Failed during test, go back to OPEN
|
||||||
|
logger.warning(f"Circuit breaker '{self.name}' failed during test, returning to OPEN")
|
||||||
|
self._state = CircuitState.OPEN
|
||||||
|
self._next_attempt_time = time.time() + self.recovery_timeout
|
||||||
|
elif self._failure_count >= self.failure_threshold:
|
||||||
|
# Too many failures, open the circuit
|
||||||
|
logger.error(
|
||||||
|
f"Circuit breaker '{self.name}' OPENED after {self._failure_count} failures"
|
||||||
|
)
|
||||||
|
self._state = CircuitState.OPEN
|
||||||
|
self._next_attempt_time = time.time() + self.recovery_timeout
|
||||||
|
|
||||||
|
def call(self, func: Callable, *args, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
Execute function with circuit breaker protection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
func: Function to execute
|
||||||
|
*args: Function arguments
|
||||||
|
**kwargs: Function keyword arguments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Function result
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
CircuitBreakerOpenError: When circuit is open
|
||||||
|
Original exception: When function fails
|
||||||
|
"""
|
||||||
|
# Check if we should attempt reset
|
||||||
|
if self._should_attempt_reset():
|
||||||
|
logger.info(f"Circuit breaker '{self.name}' attempting reset to HALF_OPEN")
|
||||||
|
self._state = CircuitState.HALF_OPEN
|
||||||
|
|
||||||
|
# Fail fast if circuit is open
|
||||||
|
if self._state == CircuitState.OPEN:
|
||||||
|
raise CircuitBreakerOpenError(
|
||||||
|
f"Circuit breaker '{self.name}' is OPEN. "
|
||||||
|
f"Next attempt in {self._next_attempt_time - time.time():.1f}s"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Execute the function
|
||||||
|
result = func(*args, **kwargs)
|
||||||
|
self._on_success()
|
||||||
|
return result
|
||||||
|
|
||||||
|
except self.expected_exception as e:
|
||||||
|
self._on_failure()
|
||||||
|
raise e
|
||||||
|
|
||||||
|
async def call_async(self, func: Callable, *args, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
Execute async function with circuit breaker protection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
func: Async function to execute
|
||||||
|
*args: Function arguments
|
||||||
|
**kwargs: Function keyword arguments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Function result
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
CircuitBreakerOpenError: When circuit is open
|
||||||
|
Original exception: When function fails
|
||||||
|
"""
|
||||||
|
# Check if we should attempt reset
|
||||||
|
if self._should_attempt_reset():
|
||||||
|
logger.info(f"Circuit breaker '{self.name}' attempting reset to HALF_OPEN")
|
||||||
|
self._state = CircuitState.HALF_OPEN
|
||||||
|
|
||||||
|
# Fail fast if circuit is open
|
||||||
|
if self._state == CircuitState.OPEN:
|
||||||
|
raise CircuitBreakerOpenError(
|
||||||
|
f"Circuit breaker '{self.name}' is OPEN. "
|
||||||
|
f"Next attempt in {self._next_attempt_time - time.time():.1f}s"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Execute the async function
|
||||||
|
result = await func(*args, **kwargs)
|
||||||
|
self._on_success()
|
||||||
|
return result
|
||||||
|
|
||||||
|
except self.expected_exception as e:
|
||||||
|
self._on_failure()
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
"""Manually reset the circuit breaker"""
|
||||||
|
logger.info(f"Circuit breaker '{self.name}' manually reset")
|
||||||
|
self._state = CircuitState.CLOSED
|
||||||
|
self._failure_count = 0
|
||||||
|
self._last_failure_time = None
|
||||||
|
self._next_attempt_time = None
|
||||||
|
|
||||||
|
def get_stats(self) -> dict:
|
||||||
|
"""Get circuit breaker statistics"""
|
||||||
|
return {
|
||||||
|
'name': self.name,
|
||||||
|
'state': self._state.value,
|
||||||
|
'failure_count': self._failure_count,
|
||||||
|
'failure_threshold': self.failure_threshold,
|
||||||
|
'last_failure_time': self._last_failure_time,
|
||||||
|
'next_attempt_time': self._next_attempt_time,
|
||||||
|
'recovery_timeout': self.recovery_timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CircuitBreakerOpenError(Exception):
|
||||||
|
"""Exception raised when circuit breaker is open"""
|
||||||
|
pass
|
||||||
650
COBY/connectors/coinbase_connector.py
Normal file
650
COBY/connectors/coinbase_connector.py
Normal file
@@ -0,0 +1,650 @@
|
|||||||
|
"""
|
||||||
|
Coinbase Pro exchange connector implementation.
|
||||||
|
Supports WebSocket connections to Coinbase Pro (now Coinbase Advanced Trade).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import hmac
|
||||||
|
import hashlib
|
||||||
|
import base64
|
||||||
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CoinbaseConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
Coinbase Pro WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Order book level2 streams
|
||||||
|
- Trade streams (matches)
|
||||||
|
- Symbol normalization
|
||||||
|
- Authentication for private channels (if needed)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Coinbase Pro WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://ws-feed.exchange.coinbase.com"
|
||||||
|
SANDBOX_URL = "wss://ws-feed-public.sandbox.exchange.coinbase.com"
|
||||||
|
API_URL = "https://api.exchange.coinbase.com"
|
||||||
|
|
||||||
|
def __init__(self, use_sandbox: bool = False, api_key: str = None,
|
||||||
|
api_secret: str = None, passphrase: str = None):
|
||||||
|
"""
|
||||||
|
Initialize Coinbase connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_sandbox: Whether to use sandbox environment
|
||||||
|
api_key: API key for authentication (optional)
|
||||||
|
api_secret: API secret for authentication (optional)
|
||||||
|
passphrase: API passphrase for authentication (optional)
|
||||||
|
"""
|
||||||
|
websocket_url = self.SANDBOX_URL if use_sandbox else self.WEBSOCKET_URL
|
||||||
|
super().__init__("coinbase", websocket_url)
|
||||||
|
|
||||||
|
# Authentication credentials (optional)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
self.passphrase = passphrase
|
||||||
|
self.use_sandbox = use_sandbox
|
||||||
|
|
||||||
|
# Coinbase-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'l2update': self._handle_orderbook_update,
|
||||||
|
'match': self._handle_trade_update,
|
||||||
|
'snapshot': self._handle_orderbook_snapshot,
|
||||||
|
'error': self._handle_error_message,
|
||||||
|
'subscriptions': self._handle_subscription_response
|
||||||
|
})
|
||||||
|
|
||||||
|
# Channel management
|
||||||
|
self.subscribed_channels = set()
|
||||||
|
self.product_ids = set()
|
||||||
|
|
||||||
|
logger.info(f"Coinbase connector initialized ({'sandbox' if use_sandbox else 'production'})")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from Coinbase message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# Coinbase uses 'type' field for message type
|
||||||
|
return data.get('type', 'unknown')
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to Coinbase format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Coinbase product ID format (e.g., 'BTC-USD')
|
||||||
|
"""
|
||||||
|
# Convert standard format to Coinbase product ID
|
||||||
|
if symbol.upper() == 'BTCUSDT':
|
||||||
|
return 'BTC-USD'
|
||||||
|
elif symbol.upper() == 'ETHUSDT':
|
||||||
|
return 'ETH-USD'
|
||||||
|
elif symbol.upper() == 'ADAUSDT':
|
||||||
|
return 'ADA-USD'
|
||||||
|
elif symbol.upper() == 'DOTUSDT':
|
||||||
|
return 'DOT-USD'
|
||||||
|
elif symbol.upper() == 'LINKUSDT':
|
||||||
|
return 'LINK-USD'
|
||||||
|
else:
|
||||||
|
# Generic conversion: BTCUSDT -> BTC-USD
|
||||||
|
if symbol.endswith('USDT'):
|
||||||
|
base = symbol[:-4]
|
||||||
|
return f"{base}-USD"
|
||||||
|
elif symbol.endswith('USD'):
|
||||||
|
base = symbol[:-3]
|
||||||
|
return f"{base}-USD"
|
||||||
|
else:
|
||||||
|
# Assume it's already in correct format or try to parse
|
||||||
|
if '-' in symbol:
|
||||||
|
return symbol.upper()
|
||||||
|
else:
|
||||||
|
# Default fallback
|
||||||
|
return symbol.upper()
|
||||||
|
|
||||||
|
def _denormalize_symbol(self, product_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert Coinbase product ID back to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
product_id: Coinbase product ID (e.g., 'BTC-USD')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
if '-' in product_id:
|
||||||
|
base, quote = product_id.split('-', 1)
|
||||||
|
if quote == 'USD':
|
||||||
|
return f"{base}USDT"
|
||||||
|
else:
|
||||||
|
return f"{base}{quote}"
|
||||||
|
return product_id
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book level2 updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
product_id = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"type": "subscribe",
|
||||||
|
"product_ids": [product_id],
|
||||||
|
"channels": ["level2"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add authentication if credentials provided
|
||||||
|
if self.api_key and self.api_secret and self.passphrase:
|
||||||
|
subscription_msg.update(self._get_auth_headers(subscription_msg))
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_channels.add('level2')
|
||||||
|
self.product_ids.add(product_id)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({product_id}) on Coinbase")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on Coinbase")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates (matches) for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
product_id = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"type": "subscribe",
|
||||||
|
"product_ids": [product_id],
|
||||||
|
"channels": ["matches"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add authentication if credentials provided
|
||||||
|
if self.api_key and self.api_secret and self.passphrase:
|
||||||
|
subscription_msg.update(self._get_auth_headers(subscription_msg))
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_channels.add('matches')
|
||||||
|
self.product_ids.add(product_id)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({product_id}) on Coinbase")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on Coinbase")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
product_id = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"type": "unsubscribe",
|
||||||
|
"product_ids": [product_id],
|
||||||
|
"channels": ["level2"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.product_ids.discard(product_id)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} ({product_id}) on Coinbase")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on Coinbase")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
product_id = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"type": "unsubscribe",
|
||||||
|
"product_ids": [product_id],
|
||||||
|
"channels": ["matches"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.product_ids.discard(product_id)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} ({product_id}) on Coinbase")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on Coinbase")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from Coinbase.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols in standard format
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
api_url = "https://api-public.sandbox.exchange.coinbase.com" if self.use_sandbox else self.API_URL
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{api_url}/products") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
symbols = []
|
||||||
|
|
||||||
|
for product in data:
|
||||||
|
if product.get('status') == 'online' and product.get('trading_disabled') is False:
|
||||||
|
product_id = product.get('id', '')
|
||||||
|
# Convert to standard format
|
||||||
|
standard_symbol = self._denormalize_symbol(product_id)
|
||||||
|
symbols.append(standard_symbol)
|
||||||
|
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from Coinbase")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from Coinbase: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from Coinbase: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from Coinbase REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve (Coinbase supports up to 50)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
product_id = self.normalize_symbol(symbol)
|
||||||
|
api_url = "https://api-public.sandbox.exchange.coinbase.com" if self.use_sandbox else self.API_URL
|
||||||
|
|
||||||
|
# Coinbase supports level 1, 2, or 3
|
||||||
|
level = 2 # Level 2 gives us aggregated order book
|
||||||
|
|
||||||
|
url = f"{api_url}/products/{product_id}/book"
|
||||||
|
params = {'level': level}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
return self._parse_orderbook_snapshot(data, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse Coinbase order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw Coinbase order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('sequence')
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
def _get_auth_headers(self, message: Dict) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Generate authentication headers for Coinbase Pro API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Message to authenticate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Authentication headers
|
||||||
|
"""
|
||||||
|
if not all([self.api_key, self.api_secret, self.passphrase]):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
timestamp = str(time.time())
|
||||||
|
message_str = json.dumps(message, separators=(',', ':'))
|
||||||
|
|
||||||
|
# Create signature
|
||||||
|
message_to_sign = timestamp + 'GET' + '/users/self/verify' + message_str
|
||||||
|
signature = base64.b64encode(
|
||||||
|
hmac.new(
|
||||||
|
base64.b64decode(self.api_secret),
|
||||||
|
message_to_sign.encode('utf-8'),
|
||||||
|
hashlib.sha256
|
||||||
|
).digest()
|
||||||
|
).decode('utf-8')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'CB-ACCESS-KEY': self.api_key,
|
||||||
|
'CB-ACCESS-SIGN': signature,
|
||||||
|
'CB-ACCESS-TIMESTAMP': timestamp,
|
||||||
|
'CB-ACCESS-PASSPHRASE': self.passphrase
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating auth headers: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
async def _handle_orderbook_snapshot(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book snapshot from Coinbase.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book snapshot data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
product_id = data.get('product_id', '')
|
||||||
|
if not product_id:
|
||||||
|
logger.warning("Order book snapshot missing product_id")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = self._denormalize_symbol(product_id)
|
||||||
|
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('sequence')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book snapshot for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book snapshot: {e}")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book level2 update from Coinbase.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
product_id = data.get('product_id', '')
|
||||||
|
if not product_id:
|
||||||
|
logger.warning("Order book update missing product_id")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = self._denormalize_symbol(product_id)
|
||||||
|
|
||||||
|
# Coinbase l2update format: changes array with [side, price, size]
|
||||||
|
changes = data.get('changes', [])
|
||||||
|
|
||||||
|
bids = []
|
||||||
|
asks = []
|
||||||
|
|
||||||
|
for change in changes:
|
||||||
|
if len(change) >= 3:
|
||||||
|
side = change[0] # 'buy' or 'sell'
|
||||||
|
price = float(change[1])
|
||||||
|
size = float(change[2])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
if side == 'buy':
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
elif side == 'sell':
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book update (partial snapshot)
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromisoformat(data.get('time', '').replace('Z', '+00:00')),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('sequence')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade (match) update from Coinbase.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
product_id = data.get('product_id', '')
|
||||||
|
if not product_id:
|
||||||
|
logger.warning("Trade update missing product_id")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = self._denormalize_symbol(product_id)
|
||||||
|
|
||||||
|
price = float(data.get('price', 0))
|
||||||
|
size = float(data.get('size', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(size):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, size={size}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine side (Coinbase uses 'side' field for taker side)
|
||||||
|
side = data.get('side', 'unknown') # 'buy' or 'sell'
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromisoformat(data.get('time', '').replace('Z', '+00:00')),
|
||||||
|
price=price,
|
||||||
|
size=size,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(data.get('trade_id', ''))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {size} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_subscription_response(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle subscription confirmation from Coinbase.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Subscription response data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
channels = data.get('channels', [])
|
||||||
|
logger.info(f"Coinbase subscription confirmed for channels: {channels}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling subscription response: {e}")
|
||||||
|
|
||||||
|
async def _handle_error_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle error message from Coinbase.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Error message data
|
||||||
|
"""
|
||||||
|
message = data.get('message', 'Unknown error')
|
||||||
|
reason = data.get('reason', '')
|
||||||
|
|
||||||
|
logger.error(f"Coinbase error: {message}")
|
||||||
|
if reason:
|
||||||
|
logger.error(f"Coinbase error reason: {reason}")
|
||||||
|
|
||||||
|
# Handle specific error types
|
||||||
|
if 'Invalid signature' in message:
|
||||||
|
logger.error("Authentication failed - check API credentials")
|
||||||
|
elif 'Product not found' in message:
|
||||||
|
logger.error("Invalid product ID - check symbol mapping")
|
||||||
|
|
||||||
|
def get_coinbase_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get Coinbase-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
coinbase_stats = {
|
||||||
|
'subscribed_channels': list(self.subscribed_channels),
|
||||||
|
'product_ids': list(self.product_ids),
|
||||||
|
'use_sandbox': self.use_sandbox,
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret and self.passphrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(coinbase_stats)
|
||||||
|
return base_stats
|
||||||
271
COBY/connectors/connection_manager.py
Normal file
271
COBY/connectors/connection_manager.py
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
"""
|
||||||
|
Connection management with exponential backoff and retry logic.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import random
|
||||||
|
from typing import Optional, Callable, Any
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.exceptions import ConnectionError
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ExponentialBackoff:
|
||||||
|
"""Exponential backoff strategy for connection retries"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
initial_delay: float = 1.0,
|
||||||
|
max_delay: float = 300.0,
|
||||||
|
multiplier: float = 2.0,
|
||||||
|
jitter: bool = True
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize exponential backoff.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
initial_delay: Initial delay in seconds
|
||||||
|
max_delay: Maximum delay in seconds
|
||||||
|
multiplier: Backoff multiplier
|
||||||
|
jitter: Whether to add random jitter
|
||||||
|
"""
|
||||||
|
self.initial_delay = initial_delay
|
||||||
|
self.max_delay = max_delay
|
||||||
|
self.multiplier = multiplier
|
||||||
|
self.jitter = jitter
|
||||||
|
self.current_delay = initial_delay
|
||||||
|
self.attempt_count = 0
|
||||||
|
|
||||||
|
def get_delay(self) -> float:
|
||||||
|
"""Get next delay value"""
|
||||||
|
delay = min(self.current_delay, self.max_delay)
|
||||||
|
|
||||||
|
# Add jitter to prevent thundering herd
|
||||||
|
if self.jitter:
|
||||||
|
delay = delay * (0.5 + random.random() * 0.5)
|
||||||
|
|
||||||
|
# Update for next attempt
|
||||||
|
self.current_delay *= self.multiplier
|
||||||
|
self.attempt_count += 1
|
||||||
|
|
||||||
|
return delay
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
"""Reset backoff to initial state"""
|
||||||
|
self.current_delay = self.initial_delay
|
||||||
|
self.attempt_count = 0
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionManager:
|
||||||
|
"""
|
||||||
|
Manages connection lifecycle with retry logic and health monitoring.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
max_retries: int = 10,
|
||||||
|
initial_delay: float = 1.0,
|
||||||
|
max_delay: float = 300.0,
|
||||||
|
health_check_interval: int = 30
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize connection manager.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Connection name for logging
|
||||||
|
max_retries: Maximum number of retry attempts
|
||||||
|
initial_delay: Initial retry delay in seconds
|
||||||
|
max_delay: Maximum retry delay in seconds
|
||||||
|
health_check_interval: Health check interval in seconds
|
||||||
|
"""
|
||||||
|
self.name = name
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.health_check_interval = health_check_interval
|
||||||
|
|
||||||
|
self.backoff = ExponentialBackoff(initial_delay, max_delay)
|
||||||
|
self.is_connected = False
|
||||||
|
self.connection_attempts = 0
|
||||||
|
self.last_error: Optional[Exception] = None
|
||||||
|
self.health_check_task: Optional[asyncio.Task] = None
|
||||||
|
|
||||||
|
# Callbacks
|
||||||
|
self.on_connect: Optional[Callable] = None
|
||||||
|
self.on_disconnect: Optional[Callable] = None
|
||||||
|
self.on_error: Optional[Callable] = None
|
||||||
|
self.on_health_check: Optional[Callable] = None
|
||||||
|
|
||||||
|
logger.info(f"Connection manager '{name}' initialized")
|
||||||
|
|
||||||
|
async def connect(self, connect_func: Callable) -> bool:
|
||||||
|
"""
|
||||||
|
Attempt to establish connection with retry logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connect_func: Async function that establishes the connection
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if connection successful, False otherwise
|
||||||
|
"""
|
||||||
|
self.connection_attempts = 0
|
||||||
|
self.backoff.reset()
|
||||||
|
|
||||||
|
while self.connection_attempts < self.max_retries:
|
||||||
|
try:
|
||||||
|
logger.info(f"Attempting to connect '{self.name}' (attempt {self.connection_attempts + 1})")
|
||||||
|
|
||||||
|
# Attempt connection
|
||||||
|
await connect_func()
|
||||||
|
|
||||||
|
# Connection successful
|
||||||
|
self.is_connected = True
|
||||||
|
self.connection_attempts = 0
|
||||||
|
self.last_error = None
|
||||||
|
self.backoff.reset()
|
||||||
|
|
||||||
|
logger.info(f"Connection '{self.name}' established successfully")
|
||||||
|
|
||||||
|
# Start health check
|
||||||
|
await self._start_health_check()
|
||||||
|
|
||||||
|
# Notify success
|
||||||
|
if self.on_connect:
|
||||||
|
try:
|
||||||
|
await self.on_connect()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error in connect callback: {e}")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.connection_attempts += 1
|
||||||
|
self.last_error = e
|
||||||
|
|
||||||
|
logger.warning(
|
||||||
|
f"Connection '{self.name}' failed (attempt {self.connection_attempts}): {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify error
|
||||||
|
if self.on_error:
|
||||||
|
try:
|
||||||
|
await self.on_error(e)
|
||||||
|
except Exception as callback_error:
|
||||||
|
logger.warning(f"Error in error callback: {callback_error}")
|
||||||
|
|
||||||
|
# Check if we should retry
|
||||||
|
if self.connection_attempts >= self.max_retries:
|
||||||
|
logger.error(f"Connection '{self.name}' failed after {self.max_retries} attempts")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Wait before retry
|
||||||
|
delay = self.backoff.get_delay()
|
||||||
|
logger.info(f"Retrying connection '{self.name}' in {delay:.1f} seconds")
|
||||||
|
await asyncio.sleep(delay)
|
||||||
|
|
||||||
|
self.is_connected = False
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def disconnect(self, disconnect_func: Optional[Callable] = None) -> None:
|
||||||
|
"""
|
||||||
|
Disconnect and cleanup.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
disconnect_func: Optional async function to handle disconnection
|
||||||
|
"""
|
||||||
|
logger.info(f"Disconnecting '{self.name}'")
|
||||||
|
|
||||||
|
# Stop health check
|
||||||
|
await self._stop_health_check()
|
||||||
|
|
||||||
|
# Execute disconnect function
|
||||||
|
if disconnect_func:
|
||||||
|
try:
|
||||||
|
await disconnect_func()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error during disconnect: {e}")
|
||||||
|
|
||||||
|
self.is_connected = False
|
||||||
|
|
||||||
|
# Notify disconnect
|
||||||
|
if self.on_disconnect:
|
||||||
|
try:
|
||||||
|
await self.on_disconnect()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error in disconnect callback: {e}")
|
||||||
|
|
||||||
|
logger.info(f"Connection '{self.name}' disconnected")
|
||||||
|
|
||||||
|
async def reconnect(self, connect_func: Callable, disconnect_func: Optional[Callable] = None) -> bool:
|
||||||
|
"""
|
||||||
|
Reconnect by disconnecting first then connecting.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connect_func: Async function that establishes the connection
|
||||||
|
disconnect_func: Optional async function to handle disconnection
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if reconnection successful, False otherwise
|
||||||
|
"""
|
||||||
|
logger.info(f"Reconnecting '{self.name}'")
|
||||||
|
|
||||||
|
# Disconnect first
|
||||||
|
await self.disconnect(disconnect_func)
|
||||||
|
|
||||||
|
# Wait a bit before reconnecting
|
||||||
|
await asyncio.sleep(1.0)
|
||||||
|
|
||||||
|
# Attempt to connect
|
||||||
|
return await self.connect(connect_func)
|
||||||
|
|
||||||
|
async def _start_health_check(self) -> None:
|
||||||
|
"""Start periodic health check"""
|
||||||
|
if self.health_check_task:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.health_check_task = asyncio.create_task(self._health_check_loop())
|
||||||
|
logger.debug(f"Health check started for '{self.name}'")
|
||||||
|
|
||||||
|
async def _stop_health_check(self) -> None:
|
||||||
|
"""Stop health check"""
|
||||||
|
if self.health_check_task:
|
||||||
|
self.health_check_task.cancel()
|
||||||
|
try:
|
||||||
|
await self.health_check_task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
self.health_check_task = None
|
||||||
|
logger.debug(f"Health check stopped for '{self.name}'")
|
||||||
|
|
||||||
|
async def _health_check_loop(self) -> None:
|
||||||
|
"""Health check loop"""
|
||||||
|
while self.is_connected:
|
||||||
|
try:
|
||||||
|
await asyncio.sleep(self.health_check_interval)
|
||||||
|
|
||||||
|
if self.on_health_check:
|
||||||
|
is_healthy = await self.on_health_check()
|
||||||
|
if not is_healthy:
|
||||||
|
logger.warning(f"Health check failed for '{self.name}'")
|
||||||
|
self.is_connected = False
|
||||||
|
break
|
||||||
|
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Health check error for '{self.name}': {e}")
|
||||||
|
self.is_connected = False
|
||||||
|
break
|
||||||
|
|
||||||
|
def get_stats(self) -> dict:
|
||||||
|
"""Get connection statistics"""
|
||||||
|
return {
|
||||||
|
'name': self.name,
|
||||||
|
'is_connected': self.is_connected,
|
||||||
|
'connection_attempts': self.connection_attempts,
|
||||||
|
'max_retries': self.max_retries,
|
||||||
|
'current_delay': self.backoff.current_delay,
|
||||||
|
'backoff_attempts': self.backoff.attempt_count,
|
||||||
|
'last_error': str(self.last_error) if self.last_error else None,
|
||||||
|
'health_check_active': self.health_check_task is not None
|
||||||
|
}
|
||||||
601
COBY/connectors/gateio_connector.py
Normal file
601
COBY/connectors/gateio_connector.py
Normal file
@@ -0,0 +1,601 @@
|
|||||||
|
"""
|
||||||
|
Gate.io exchange connector implementation.
|
||||||
|
Supports WebSocket connections to Gate.io with their WebSocket v4 API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import hmac
|
||||||
|
import hashlib
|
||||||
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class GateIOConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
Gate.io WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- WebSocket v4 API
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
- Authentication for private channels
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Gate.io WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://api.gateio.ws/ws/v4/"
|
||||||
|
TESTNET_URL = "wss://fx-api-testnet.gateio.ws/ws/v4/"
|
||||||
|
API_URL = "https://api.gateio.ws"
|
||||||
|
|
||||||
|
def __init__(self, use_testnet: bool = False, api_key: str = None, api_secret: str = None):
|
||||||
|
"""
|
||||||
|
Initialize Gate.io connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_testnet: Whether to use testnet environment
|
||||||
|
api_key: API key for authentication (optional)
|
||||||
|
api_secret: API secret for authentication (optional)
|
||||||
|
"""
|
||||||
|
websocket_url = self.TESTNET_URL if use_testnet else self.WEBSOCKET_URL
|
||||||
|
super().__init__("gateio", websocket_url)
|
||||||
|
|
||||||
|
# Authentication credentials (optional)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
self.use_testnet = use_testnet
|
||||||
|
|
||||||
|
# Gate.io-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'spot.order_book_update': self._handle_orderbook_update,
|
||||||
|
'spot.trades': self._handle_trade_update,
|
||||||
|
'spot.pong': self._handle_pong,
|
||||||
|
'error': self._handle_error_message
|
||||||
|
})
|
||||||
|
|
||||||
|
# Subscription tracking
|
||||||
|
self.subscribed_channels = set()
|
||||||
|
self.request_id = 1
|
||||||
|
|
||||||
|
logger.info(f"Gate.io connector initialized ({'testnet' if use_testnet else 'mainnet'})")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from Gate.io message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# Gate.io v4 API message format
|
||||||
|
if 'method' in data:
|
||||||
|
return data['method'] # 'spot.order_book_update', 'spot.trades', etc.
|
||||||
|
elif 'error' in data:
|
||||||
|
return 'error'
|
||||||
|
elif 'result' in data:
|
||||||
|
return 'result'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to Gate.io format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Gate.io symbol format (e.g., 'BTC_USDT')
|
||||||
|
"""
|
||||||
|
# Gate.io uses underscore-separated format
|
||||||
|
if symbol.upper() == 'BTCUSDT':
|
||||||
|
return 'BTC_USDT'
|
||||||
|
elif symbol.upper() == 'ETHUSDT':
|
||||||
|
return 'ETH_USDT'
|
||||||
|
elif symbol.upper().endswith('USDT'):
|
||||||
|
base = symbol[:-4].upper()
|
||||||
|
return f"{base}_USDT"
|
||||||
|
elif symbol.upper().endswith('USD'):
|
||||||
|
base = symbol[:-3].upper()
|
||||||
|
return f"{base}_USD"
|
||||||
|
else:
|
||||||
|
# Assume it's already in correct format or add underscore
|
||||||
|
if '_' not in symbol:
|
||||||
|
# Try to split common patterns
|
||||||
|
if len(symbol) >= 6:
|
||||||
|
# Assume last 4 chars are quote currency
|
||||||
|
base = symbol[:-4].upper()
|
||||||
|
quote = symbol[-4:].upper()
|
||||||
|
return f"{base}_{quote}"
|
||||||
|
else:
|
||||||
|
return symbol.upper()
|
||||||
|
else:
|
||||||
|
return symbol.upper()
|
||||||
|
|
||||||
|
def _denormalize_symbol(self, gateio_symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert Gate.io symbol back to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
gateio_symbol: Gate.io symbol format (e.g., 'BTC_USDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
if '_' in gateio_symbol:
|
||||||
|
return gateio_symbol.replace('_', '')
|
||||||
|
return gateio_symbol
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
gateio_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"method": "spot.order_book",
|
||||||
|
"params": [gateio_symbol, 20, "0"], # symbol, limit, interval
|
||||||
|
"id": self.request_id
|
||||||
|
}
|
||||||
|
self.request_id += 1
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_channels.add(f"spot.order_book:{gateio_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({gateio_symbol}) on Gate.io")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on Gate.io")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
gateio_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"method": "spot.trades",
|
||||||
|
"params": [gateio_symbol],
|
||||||
|
"id": self.request_id
|
||||||
|
}
|
||||||
|
self.request_id += 1
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_channels.add(f"spot.trades:{gateio_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({gateio_symbol}) on Gate.io")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on Gate.io")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
gateio_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"method": "spot.unsubscribe",
|
||||||
|
"params": [f"spot.order_book", gateio_symbol],
|
||||||
|
"id": self.request_id
|
||||||
|
}
|
||||||
|
self.request_id += 1
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_channels.discard(f"spot.order_book:{gateio_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} ({gateio_symbol}) on Gate.io")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on Gate.io")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
gateio_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"method": "spot.unsubscribe",
|
||||||
|
"params": ["spot.trades", gateio_symbol],
|
||||||
|
"id": self.request_id
|
||||||
|
}
|
||||||
|
self.request_id += 1
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_channels.discard(f"spot.trades:{gateio_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} ({gateio_symbol}) on Gate.io")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on Gate.io")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from Gate.io.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols in standard format
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
api_url = "https://fx-api-testnet.gateio.ws" if self.use_testnet else self.API_URL
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{api_url}/api/v4/spot/currency_pairs") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
symbols = []
|
||||||
|
|
||||||
|
for pair_info in data:
|
||||||
|
if pair_info.get('trade_status') == 'tradable':
|
||||||
|
pair_id = pair_info.get('id', '')
|
||||||
|
# Convert to standard format
|
||||||
|
standard_symbol = self._denormalize_symbol(pair_id)
|
||||||
|
symbols.append(standard_symbol)
|
||||||
|
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from Gate.io")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from Gate.io: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from Gate.io: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from Gate.io REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
gateio_symbol = self.normalize_symbol(symbol)
|
||||||
|
api_url = "https://fx-api-testnet.gateio.ws" if self.use_testnet else self.API_URL
|
||||||
|
|
||||||
|
# Gate.io supports various depths
|
||||||
|
api_depth = min(depth, 100)
|
||||||
|
|
||||||
|
url = f"{api_url}/api/v4/spot/order_book"
|
||||||
|
params = {
|
||||||
|
'currency_pair': gateio_symbol,
|
||||||
|
'limit': api_depth
|
||||||
|
}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
return self._parse_orderbook_snapshot(data, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse Gate.io order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw Gate.io order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc), # Gate.io doesn't provide timestamp in snapshot
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('id')
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book update from Gate.io.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
params = data.get('params', [])
|
||||||
|
if len(params) < 2:
|
||||||
|
logger.warning("Invalid order book update format")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Gate.io format: [symbol, order_book_data]
|
||||||
|
gateio_symbol = params[0]
|
||||||
|
symbol = self._denormalize_symbol(gateio_symbol)
|
||||||
|
book_data = params[1]
|
||||||
|
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in book_data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in book_data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(book_data.get('t', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=book_data.get('id')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade update from Gate.io.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
params = data.get('params', [])
|
||||||
|
if len(params) < 2:
|
||||||
|
logger.warning("Invalid trade update format")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Gate.io format: [symbol, [trade_data]]
|
||||||
|
gateio_symbol = params[0]
|
||||||
|
symbol = self._denormalize_symbol(gateio_symbol)
|
||||||
|
trades_data = params[1]
|
||||||
|
|
||||||
|
# Process each trade
|
||||||
|
for trade_data in trades_data:
|
||||||
|
price = float(trade_data.get('price', 0))
|
||||||
|
amount = float(trade_data.get('amount', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(amount):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, amount={amount}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Determine side (Gate.io uses 'side' field)
|
||||||
|
side = trade_data.get('side', 'unknown').lower()
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(trade_data.get('time', 0)), tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=amount,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(trade_data.get('id', ''))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {amount} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_pong(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle pong response from Gate.io.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Pong response data
|
||||||
|
"""
|
||||||
|
logger.debug("Received Gate.io pong")
|
||||||
|
|
||||||
|
async def _handle_error_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle error message from Gate.io.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Error message data
|
||||||
|
"""
|
||||||
|
error_info = data.get('error', {})
|
||||||
|
code = error_info.get('code', 'unknown')
|
||||||
|
message = error_info.get('message', 'Unknown error')
|
||||||
|
|
||||||
|
logger.error(f"Gate.io error {code}: {message}")
|
||||||
|
|
||||||
|
def _get_auth_signature(self, method: str, url: str, query_string: str,
|
||||||
|
payload: str, timestamp: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate authentication signature for Gate.io.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
method: HTTP method
|
||||||
|
url: Request URL
|
||||||
|
query_string: Query string
|
||||||
|
payload: Request payload
|
||||||
|
timestamp: Request timestamp
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Authentication signature
|
||||||
|
"""
|
||||||
|
if not self.api_key or not self.api_secret:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create signature string
|
||||||
|
message = f"{method}\n{url}\n{query_string}\n{hashlib.sha512(payload.encode()).hexdigest()}\n{timestamp}"
|
||||||
|
|
||||||
|
# Generate signature
|
||||||
|
signature = hmac.new(
|
||||||
|
self.api_secret.encode('utf-8'),
|
||||||
|
message.encode('utf-8'),
|
||||||
|
hashlib.sha512
|
||||||
|
).hexdigest()
|
||||||
|
|
||||||
|
return signature
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating auth signature: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
async def _send_ping(self) -> None:
|
||||||
|
"""Send ping to keep connection alive."""
|
||||||
|
try:
|
||||||
|
ping_msg = {
|
||||||
|
"method": "spot.ping",
|
||||||
|
"params": [],
|
||||||
|
"id": self.request_id
|
||||||
|
}
|
||||||
|
self.request_id += 1
|
||||||
|
|
||||||
|
await self._send_message(ping_msg)
|
||||||
|
logger.debug("Sent ping to Gate.io")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending ping: {e}")
|
||||||
|
|
||||||
|
def get_gateio_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get Gate.io-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
gateio_stats = {
|
||||||
|
'subscribed_channels': list(self.subscribed_channels),
|
||||||
|
'use_testnet': self.use_testnet,
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret),
|
||||||
|
'next_request_id': self.request_id
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(gateio_stats)
|
||||||
|
return base_stats
|
||||||
660
COBY/connectors/huobi_connector.py
Normal file
660
COBY/connectors/huobi_connector.py
Normal file
@@ -0,0 +1,660 @@
|
|||||||
|
"""
|
||||||
|
Huobi Global exchange connector implementation.
|
||||||
|
Supports WebSocket connections to Huobi with proper symbol mapping.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import gzip
|
||||||
|
import hmac
|
||||||
|
import hashlib
|
||||||
|
import base64
|
||||||
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class HuobiConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
Huobi Global WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
- GZIP message decompression
|
||||||
|
- Authentication for private channels
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Huobi WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://api.huobi.pro/ws"
|
||||||
|
WEBSOCKET_PRIVATE_URL = "wss://api.huobi.pro/ws/v2"
|
||||||
|
API_URL = "https://api.huobi.pro"
|
||||||
|
|
||||||
|
def __init__(self, api_key: str = None, api_secret: str = None):
|
||||||
|
"""
|
||||||
|
Initialize Huobi connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
api_key: API key for authentication (optional)
|
||||||
|
api_secret: API secret for authentication (optional)
|
||||||
|
"""
|
||||||
|
super().__init__("huobi", self.WEBSOCKET_URL)
|
||||||
|
|
||||||
|
# Authentication credentials (optional)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
|
||||||
|
# Huobi-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'market.*.depth.step0': self._handle_orderbook_update,
|
||||||
|
'market.*.trade.detail': self._handle_trade_update,
|
||||||
|
'ping': self._handle_ping,
|
||||||
|
'pong': self._handle_pong
|
||||||
|
})
|
||||||
|
|
||||||
|
# Subscription tracking
|
||||||
|
self.subscribed_topics = set()
|
||||||
|
|
||||||
|
logger.info("Huobi connector initialized")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from Huobi message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# Huobi message format
|
||||||
|
if 'ping' in data:
|
||||||
|
return 'ping'
|
||||||
|
elif 'pong' in data:
|
||||||
|
return 'pong'
|
||||||
|
elif 'ch' in data:
|
||||||
|
# Data channel message
|
||||||
|
channel = data['ch']
|
||||||
|
if 'depth' in channel:
|
||||||
|
return 'market.*.depth.step0'
|
||||||
|
elif 'trade' in channel:
|
||||||
|
return 'market.*.trade.detail'
|
||||||
|
else:
|
||||||
|
return channel
|
||||||
|
elif 'subbed' in data:
|
||||||
|
return 'subscription_response'
|
||||||
|
elif 'unsubbed' in data:
|
||||||
|
return 'unsubscription_response'
|
||||||
|
elif 'status' in data and data.get('status') == 'error':
|
||||||
|
return 'error'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to Huobi format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Huobi symbol format (e.g., 'btcusdt')
|
||||||
|
"""
|
||||||
|
# Huobi uses lowercase symbols
|
||||||
|
normalized = symbol.lower().replace('-', '').replace('/', '')
|
||||||
|
|
||||||
|
# Validate symbol format
|
||||||
|
if not validate_symbol(normalized.upper()):
|
||||||
|
raise ValidationError(f"Invalid symbol format: {symbol}", "INVALID_SYMBOL")
|
||||||
|
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
def _denormalize_symbol(self, huobi_symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert Huobi symbol back to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
huobi_symbol: Huobi symbol format (e.g., 'btcusdt')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
return huobi_symbol.upper()
|
||||||
|
|
||||||
|
async def _decompress_message(self, message: bytes) -> str:
|
||||||
|
"""
|
||||||
|
Decompress GZIP message from Huobi.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Compressed message bytes
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Decompressed message string
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return gzip.decompress(message).decode('utf-8')
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error decompressing message: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
async def _process_message(self, message: str) -> None:
|
||||||
|
"""
|
||||||
|
Override message processing to handle GZIP compression.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Raw message (could be compressed)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Check if message is compressed (binary)
|
||||||
|
if isinstance(message, bytes):
|
||||||
|
message = await self._decompress_message(message)
|
||||||
|
|
||||||
|
if not message:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Parse JSON message
|
||||||
|
data = json.loads(message)
|
||||||
|
|
||||||
|
# Handle ping/pong first
|
||||||
|
if 'ping' in data:
|
||||||
|
await self._handle_ping(data)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine message type and route to appropriate handler
|
||||||
|
message_type = self._get_message_type(data)
|
||||||
|
|
||||||
|
if message_type in self.message_handlers:
|
||||||
|
await self.message_handlers[message_type](data)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Unhandled message type '{message_type}' from {self.exchange_name}")
|
||||||
|
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.warning(f"Invalid JSON message from {self.exchange_name}: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing message from {self.exchange_name}: {e}")
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
huobi_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"market.{huobi_symbol}.depth.step0"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"sub": topic,
|
||||||
|
"id": str(int(time.time()))
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_topics.add(topic)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({huobi_symbol}) on Huobi")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on Huobi")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
huobi_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"market.{huobi_symbol}.trade.detail"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"sub": topic,
|
||||||
|
"id": str(int(time.time()))
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_topics.add(topic)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({huobi_symbol}) on Huobi")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on Huobi")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
huobi_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"market.{huobi_symbol}.depth.step0"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"unsub": topic,
|
||||||
|
"id": str(int(time.time()))
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_topics.discard(topic)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} ({huobi_symbol}) on Huobi")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on Huobi")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
huobi_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"market.{huobi_symbol}.trade.detail"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"unsub": topic,
|
||||||
|
"id": str(int(time.time()))
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_topics.discard(topic)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} ({huobi_symbol}) on Huobi")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on Huobi")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from Huobi.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols in standard format
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{self.API_URL}/v1/common/symbols") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('status') != 'ok':
|
||||||
|
logger.error(f"Huobi API error: {data}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
symbols = []
|
||||||
|
symbol_data = data.get('data', [])
|
||||||
|
|
||||||
|
for symbol_info in symbol_data:
|
||||||
|
if symbol_info.get('state') == 'online':
|
||||||
|
symbol = symbol_info.get('symbol', '')
|
||||||
|
# Convert to standard format
|
||||||
|
standard_symbol = self._denormalize_symbol(symbol)
|
||||||
|
symbols.append(standard_symbol)
|
||||||
|
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from Huobi")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from Huobi: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from Huobi: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from Huobi REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
huobi_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Huobi supports depths: 5, 10, 20
|
||||||
|
valid_depths = [5, 10, 20]
|
||||||
|
api_depth = min(valid_depths, key=lambda x: abs(x - depth))
|
||||||
|
|
||||||
|
url = f"{self.API_URL}/market/depth"
|
||||||
|
params = {
|
||||||
|
'symbol': huobi_symbol,
|
||||||
|
'depth': api_depth,
|
||||||
|
'type': 'step0'
|
||||||
|
}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('status') != 'ok':
|
||||||
|
logger.error(f"Huobi API error: {data}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
tick_data = data.get('tick', {})
|
||||||
|
return self._parse_orderbook_snapshot(tick_data, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse Huobi order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw Huobi order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('version')
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book update from Huobi.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract symbol from channel
|
||||||
|
channel = data.get('ch', '')
|
||||||
|
if not channel:
|
||||||
|
logger.warning("Order book update missing channel")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Parse channel: market.btcusdt.depth.step0
|
||||||
|
parts = channel.split('.')
|
||||||
|
if len(parts) < 2:
|
||||||
|
logger.warning("Invalid order book channel format")
|
||||||
|
return
|
||||||
|
|
||||||
|
huobi_symbol = parts[1]
|
||||||
|
symbol = self._denormalize_symbol(huobi_symbol)
|
||||||
|
|
||||||
|
tick_data = data.get('tick', {})
|
||||||
|
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in tick_data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in tick_data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(tick_data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=tick_data.get('version')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade update from Huobi.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract symbol from channel
|
||||||
|
channel = data.get('ch', '')
|
||||||
|
if not channel:
|
||||||
|
logger.warning("Trade update missing channel")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Parse channel: market.btcusdt.trade.detail
|
||||||
|
parts = channel.split('.')
|
||||||
|
if len(parts) < 2:
|
||||||
|
logger.warning("Invalid trade channel format")
|
||||||
|
return
|
||||||
|
|
||||||
|
huobi_symbol = parts[1]
|
||||||
|
symbol = self._denormalize_symbol(huobi_symbol)
|
||||||
|
|
||||||
|
tick_data = data.get('tick', {})
|
||||||
|
trades_data = tick_data.get('data', [])
|
||||||
|
|
||||||
|
# Process each trade
|
||||||
|
for trade_data in trades_data:
|
||||||
|
price = float(trade_data.get('price', 0))
|
||||||
|
amount = float(trade_data.get('amount', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(amount):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, amount={amount}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Determine side (Huobi uses 'direction' field)
|
||||||
|
direction = trade_data.get('direction', 'unknown')
|
||||||
|
side = 'buy' if direction == 'buy' else 'sell'
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(trade_data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=amount,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(trade_data.get('tradeId', trade_data.get('id', '')))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {amount} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_ping(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle ping message from Huobi and respond with pong.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Ping message data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
ping_value = data.get('ping')
|
||||||
|
if ping_value:
|
||||||
|
# Respond with pong
|
||||||
|
pong_msg = {"pong": ping_value}
|
||||||
|
await self._send_message(pong_msg)
|
||||||
|
logger.debug(f"Responded to Huobi ping with pong: {ping_value}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling ping: {e}")
|
||||||
|
|
||||||
|
async def _handle_pong(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle pong response from Huobi.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Pong response data
|
||||||
|
"""
|
||||||
|
logger.debug("Received Huobi pong")
|
||||||
|
|
||||||
|
def _get_auth_signature(self, method: str, host: str, path: str,
|
||||||
|
params: Dict[str, str]) -> str:
|
||||||
|
"""
|
||||||
|
Generate authentication signature for Huobi.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
method: HTTP method
|
||||||
|
host: API host
|
||||||
|
path: Request path
|
||||||
|
params: Request parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Authentication signature
|
||||||
|
"""
|
||||||
|
if not self.api_key or not self.api_secret:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Sort parameters
|
||||||
|
sorted_params = sorted(params.items())
|
||||||
|
query_string = '&'.join([f"{k}={v}" for k, v in sorted_params])
|
||||||
|
|
||||||
|
# Create signature string
|
||||||
|
signature_string = f"{method}\n{host}\n{path}\n{query_string}"
|
||||||
|
|
||||||
|
# Generate signature
|
||||||
|
signature = base64.b64encode(
|
||||||
|
hmac.new(
|
||||||
|
self.api_secret.encode('utf-8'),
|
||||||
|
signature_string.encode('utf-8'),
|
||||||
|
hashlib.sha256
|
||||||
|
).digest()
|
||||||
|
).decode('utf-8')
|
||||||
|
|
||||||
|
return signature
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating auth signature: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def get_huobi_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get Huobi-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
huobi_stats = {
|
||||||
|
'subscribed_topics': list(self.subscribed_topics),
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret)
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(huobi_stats)
|
||||||
|
return base_stats
|
||||||
708
COBY/connectors/kraken_connector.py
Normal file
708
COBY/connectors/kraken_connector.py
Normal file
@@ -0,0 +1,708 @@
|
|||||||
|
"""
|
||||||
|
Kraken exchange connector implementation.
|
||||||
|
Supports WebSocket connections to Kraken exchange with their specific message format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
import base64
|
||||||
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class KrakenConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
Kraken WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization for Kraken format
|
||||||
|
- Authentication for private channels (if needed)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Kraken WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://ws.kraken.com"
|
||||||
|
WEBSOCKET_AUTH_URL = "wss://ws-auth.kraken.com"
|
||||||
|
API_URL = "https://api.kraken.com"
|
||||||
|
|
||||||
|
def __init__(self, api_key: str = None, api_secret: str = None):
|
||||||
|
"""
|
||||||
|
Initialize Kraken connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
api_key: API key for authentication (optional)
|
||||||
|
api_secret: API secret for authentication (optional)
|
||||||
|
"""
|
||||||
|
super().__init__("kraken", self.WEBSOCKET_URL)
|
||||||
|
|
||||||
|
# Authentication credentials (optional)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
|
||||||
|
# Kraken-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'book-10': self._handle_orderbook_update,
|
||||||
|
'book-25': self._handle_orderbook_update,
|
||||||
|
'book-100': self._handle_orderbook_update,
|
||||||
|
'book-500': self._handle_orderbook_update,
|
||||||
|
'book-1000': self._handle_orderbook_update,
|
||||||
|
'trade': self._handle_trade_update,
|
||||||
|
'systemStatus': self._handle_system_status,
|
||||||
|
'subscriptionStatus': self._handle_subscription_status,
|
||||||
|
'heartbeat': self._handle_heartbeat
|
||||||
|
})
|
||||||
|
|
||||||
|
# Kraken-specific tracking
|
||||||
|
self.channel_map = {} # channel_id -> (channel_name, symbol)
|
||||||
|
self.subscription_ids = {} # symbol -> subscription_id
|
||||||
|
self.system_status = 'unknown'
|
||||||
|
|
||||||
|
logger.info("Kraken connector initialized")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from Kraken message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# Kraken messages can be arrays or objects
|
||||||
|
if isinstance(data, list) and len(data) >= 2:
|
||||||
|
# Data message format: [channelID, data, channelName, pair]
|
||||||
|
if len(data) >= 4:
|
||||||
|
channel_name = data[2]
|
||||||
|
return channel_name
|
||||||
|
else:
|
||||||
|
return 'unknown'
|
||||||
|
elif isinstance(data, dict):
|
||||||
|
# Status/control messages
|
||||||
|
if 'event' in data:
|
||||||
|
return data['event']
|
||||||
|
elif 'errorMessage' in data:
|
||||||
|
return 'error'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to Kraken format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Kraken pair format (e.g., 'XBT/USD')
|
||||||
|
"""
|
||||||
|
# Kraken uses different symbol names
|
||||||
|
symbol_map = {
|
||||||
|
'BTCUSDT': 'XBT/USD',
|
||||||
|
'ETHUSDT': 'ETH/USD',
|
||||||
|
'ADAUSDT': 'ADA/USD',
|
||||||
|
'DOTUSDT': 'DOT/USD',
|
||||||
|
'LINKUSDT': 'LINK/USD',
|
||||||
|
'LTCUSDT': 'LTC/USD',
|
||||||
|
'XRPUSDT': 'XRP/USD',
|
||||||
|
'BCHUSDT': 'BCH/USD',
|
||||||
|
'EOSUSDT': 'EOS/USD',
|
||||||
|
'XLMUSDT': 'XLM/USD'
|
||||||
|
}
|
||||||
|
|
||||||
|
if symbol.upper() in symbol_map:
|
||||||
|
return symbol_map[symbol.upper()]
|
||||||
|
else:
|
||||||
|
# Generic conversion: BTCUSDT -> BTC/USD
|
||||||
|
if symbol.endswith('USDT'):
|
||||||
|
base = symbol[:-4]
|
||||||
|
return f"{base}/USD"
|
||||||
|
elif symbol.endswith('USD'):
|
||||||
|
base = symbol[:-3]
|
||||||
|
return f"{base}/USD"
|
||||||
|
else:
|
||||||
|
# Assume it's already in correct format
|
||||||
|
return symbol.upper()
|
||||||
|
|
||||||
|
def _denormalize_symbol(self, kraken_pair: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert Kraken pair back to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
kraken_pair: Kraken pair format (e.g., 'XBT/USD')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
# Reverse mapping
|
||||||
|
reverse_map = {
|
||||||
|
'XBT/USD': 'BTCUSDT',
|
||||||
|
'ETH/USD': 'ETHUSDT',
|
||||||
|
'ADA/USD': 'ADAUSDT',
|
||||||
|
'DOT/USD': 'DOTUSDT',
|
||||||
|
'LINK/USD': 'LINKUSDT',
|
||||||
|
'LTC/USD': 'LTCUSDT',
|
||||||
|
'XRP/USD': 'XRPUSDT',
|
||||||
|
'BCH/USD': 'BCHUSDT',
|
||||||
|
'EOS/USD': 'EOSUSDT',
|
||||||
|
'XLM/USD': 'XLMUSDT'
|
||||||
|
}
|
||||||
|
|
||||||
|
if kraken_pair in reverse_map:
|
||||||
|
return reverse_map[kraken_pair]
|
||||||
|
else:
|
||||||
|
# Generic conversion: BTC/USD -> BTCUSDT
|
||||||
|
if '/' in kraken_pair:
|
||||||
|
base, quote = kraken_pair.split('/', 1)
|
||||||
|
if quote == 'USD':
|
||||||
|
return f"{base}USDT"
|
||||||
|
else:
|
||||||
|
return f"{base}{quote}"
|
||||||
|
return kraken_pair
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
kraken_pair = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"event": "subscribe",
|
||||||
|
"pair": [kraken_pair],
|
||||||
|
"subscription": {
|
||||||
|
"name": "book",
|
||||||
|
"depth": 25 # 25 levels
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add authentication if credentials provided
|
||||||
|
if self.api_key and self.api_secret:
|
||||||
|
subscription_msg["subscription"]["token"] = self._get_auth_token()
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({kraken_pair}) on Kraken")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on Kraken")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
kraken_pair = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"event": "subscribe",
|
||||||
|
"pair": [kraken_pair],
|
||||||
|
"subscription": {
|
||||||
|
"name": "trade"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add authentication if credentials provided
|
||||||
|
if self.api_key and self.api_secret:
|
||||||
|
subscription_msg["subscription"]["token"] = self._get_auth_token()
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({kraken_pair}) on Kraken")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on Kraken")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
kraken_pair = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"event": "unsubscribe",
|
||||||
|
"pair": [kraken_pair],
|
||||||
|
"subscription": {
|
||||||
|
"name": "book"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} ({kraken_pair}) on Kraken")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on Kraken")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
kraken_pair = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"event": "unsubscribe",
|
||||||
|
"pair": [kraken_pair],
|
||||||
|
"subscription": {
|
||||||
|
"name": "trade"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} ({kraken_pair}) on Kraken")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on Kraken")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from Kraken.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols in standard format
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{self.API_URL}/0/public/AssetPairs") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('error'):
|
||||||
|
logger.error(f"Kraken API error: {data['error']}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
symbols = []
|
||||||
|
pairs = data.get('result', {})
|
||||||
|
|
||||||
|
for pair_name, pair_info in pairs.items():
|
||||||
|
# Skip dark pool pairs
|
||||||
|
if '.d' in pair_name:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get the WebSocket pair name
|
||||||
|
ws_name = pair_info.get('wsname')
|
||||||
|
if ws_name:
|
||||||
|
# Convert to standard format
|
||||||
|
standard_symbol = self._denormalize_symbol(ws_name)
|
||||||
|
symbols.append(standard_symbol)
|
||||||
|
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from Kraken")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from Kraken: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from Kraken: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from Kraken REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
kraken_pair = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
url = f"{self.API_URL}/0/public/Depth"
|
||||||
|
params = {
|
||||||
|
'pair': kraken_pair,
|
||||||
|
'count': min(depth, 500) # Kraken max is 500
|
||||||
|
}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('error'):
|
||||||
|
logger.error(f"Kraken API error: {data['error']}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
result = data.get('result', {})
|
||||||
|
# Kraken returns data with the actual pair name as key
|
||||||
|
pair_data = None
|
||||||
|
for key, value in result.items():
|
||||||
|
if isinstance(value, dict) and 'bids' in value and 'asks' in value:
|
||||||
|
pair_data = value
|
||||||
|
break
|
||||||
|
|
||||||
|
if pair_data:
|
||||||
|
return self._parse_orderbook_snapshot(pair_data, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"No order book data found for {symbol}")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse Kraken order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw Kraken order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: List) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book update from Kraken.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data (Kraken array format)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Kraken format: [channelID, data, channelName, pair]
|
||||||
|
if len(data) < 4:
|
||||||
|
logger.warning("Invalid Kraken order book update format")
|
||||||
|
return
|
||||||
|
|
||||||
|
channel_id = data[0]
|
||||||
|
book_data = data[1]
|
||||||
|
channel_name = data[2]
|
||||||
|
kraken_pair = data[3]
|
||||||
|
|
||||||
|
symbol = self._denormalize_symbol(kraken_pair)
|
||||||
|
|
||||||
|
# Track channel mapping
|
||||||
|
self.channel_map[channel_id] = (channel_name, symbol)
|
||||||
|
|
||||||
|
# Parse order book data
|
||||||
|
bids = []
|
||||||
|
asks = []
|
||||||
|
|
||||||
|
# Kraken book data can have 'b' (bids), 'a' (asks), 'bs' (bid snapshot), 'as' (ask snapshot)
|
||||||
|
if 'b' in book_data:
|
||||||
|
for bid_data in book_data['b']:
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
if 'bs' in book_data: # Bid snapshot
|
||||||
|
for bid_data in book_data['bs']:
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
if 'a' in book_data:
|
||||||
|
for ask_data in book_data['a']:
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
if 'as' in book_data: # Ask snapshot
|
||||||
|
for ask_data in book_data['as']:
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: List) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade update from Kraken.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data (Kraken array format)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Kraken format: [channelID, data, channelName, pair]
|
||||||
|
if len(data) < 4:
|
||||||
|
logger.warning("Invalid Kraken trade update format")
|
||||||
|
return
|
||||||
|
|
||||||
|
channel_id = data[0]
|
||||||
|
trade_data = data[1]
|
||||||
|
channel_name = data[2]
|
||||||
|
kraken_pair = data[3]
|
||||||
|
|
||||||
|
symbol = self._denormalize_symbol(kraken_pair)
|
||||||
|
|
||||||
|
# Track channel mapping
|
||||||
|
self.channel_map[channel_id] = (channel_name, symbol)
|
||||||
|
|
||||||
|
# Process trade data (array of trades)
|
||||||
|
for trade_info in trade_data:
|
||||||
|
if len(trade_info) >= 6:
|
||||||
|
price = float(trade_info[0])
|
||||||
|
size = float(trade_info[1])
|
||||||
|
timestamp = float(trade_info[2])
|
||||||
|
side = trade_info[3] # 'b' for buy, 's' for sell
|
||||||
|
order_type = trade_info[4] # 'm' for market, 'l' for limit
|
||||||
|
misc = trade_info[5] if len(trade_info) > 5 else ''
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(size):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, size={size}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Convert side
|
||||||
|
trade_side = 'buy' if side == 'b' else 'sell'
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(timestamp, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=size,
|
||||||
|
side=trade_side,
|
||||||
|
trade_id=f"{timestamp}_{price}_{size}" # Generate ID
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {trade_side} {size} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_system_status(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle system status message from Kraken.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: System status data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
status = data.get('status', 'unknown')
|
||||||
|
version = data.get('version', 'unknown')
|
||||||
|
|
||||||
|
self.system_status = status
|
||||||
|
logger.info(f"Kraken system status: {status} (version: {version})")
|
||||||
|
|
||||||
|
if status != 'online':
|
||||||
|
logger.warning(f"Kraken system not online: {status}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling system status: {e}")
|
||||||
|
|
||||||
|
async def _handle_subscription_status(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle subscription status message from Kraken.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Subscription status data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
status = data.get('status', 'unknown')
|
||||||
|
channel_name = data.get('channelName', 'unknown')
|
||||||
|
pair = data.get('pair', 'unknown')
|
||||||
|
subscription = data.get('subscription', {})
|
||||||
|
|
||||||
|
if status == 'subscribed':
|
||||||
|
logger.info(f"Kraken subscription confirmed: {channel_name} for {pair}")
|
||||||
|
|
||||||
|
# Store subscription ID if provided
|
||||||
|
if 'channelID' in data:
|
||||||
|
channel_id = data['channelID']
|
||||||
|
symbol = self._denormalize_symbol(pair)
|
||||||
|
self.channel_map[channel_id] = (channel_name, symbol)
|
||||||
|
|
||||||
|
elif status == 'unsubscribed':
|
||||||
|
logger.info(f"Kraken unsubscription confirmed: {channel_name} for {pair}")
|
||||||
|
elif status == 'error':
|
||||||
|
error_message = data.get('errorMessage', 'Unknown error')
|
||||||
|
logger.error(f"Kraken subscription error: {error_message}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling subscription status: {e}")
|
||||||
|
|
||||||
|
async def _handle_heartbeat(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle heartbeat message from Kraken.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Heartbeat data
|
||||||
|
"""
|
||||||
|
logger.debug("Received Kraken heartbeat")
|
||||||
|
|
||||||
|
def _get_auth_token(self) -> str:
|
||||||
|
"""
|
||||||
|
Generate authentication token for Kraken WebSocket.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Authentication token
|
||||||
|
"""
|
||||||
|
if not self.api_key or not self.api_secret:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
# This is a simplified version - actual Kraken auth is more complex
|
||||||
|
# and requires getting a token from the REST API first
|
||||||
|
nonce = str(int(time.time() * 1000))
|
||||||
|
message = nonce + self.api_key
|
||||||
|
signature = hmac.new(
|
||||||
|
base64.b64decode(self.api_secret),
|
||||||
|
message.encode('utf-8'),
|
||||||
|
hashlib.sha512
|
||||||
|
).hexdigest()
|
||||||
|
|
||||||
|
return f"{self.api_key}:{signature}:{nonce}"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating auth token: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def get_kraken_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get Kraken-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
kraken_stats = {
|
||||||
|
'system_status': self.system_status,
|
||||||
|
'channel_mappings': len(self.channel_map),
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret)
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(kraken_stats)
|
||||||
|
return base_stats
|
||||||
776
COBY/connectors/kucoin_connector.py
Normal file
776
COBY/connectors/kucoin_connector.py
Normal file
@@ -0,0 +1,776 @@
|
|||||||
|
"""
|
||||||
|
KuCoin exchange connector implementation.
|
||||||
|
Supports WebSocket connections to KuCoin with proper token-based authentication.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import hmac
|
||||||
|
import hashlib
|
||||||
|
import base64
|
||||||
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class KuCoinConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
KuCoin WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Token-based authentication
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
- Bullet connection protocol
|
||||||
|
"""
|
||||||
|
|
||||||
|
# KuCoin API URLs
|
||||||
|
API_URL = "https://api.kucoin.com"
|
||||||
|
SANDBOX_API_URL = "https://openapi-sandbox.kucoin.com"
|
||||||
|
|
||||||
|
def __init__(self, use_sandbox: bool = False, api_key: str = None,
|
||||||
|
api_secret: str = None, passphrase: str = None):
|
||||||
|
"""
|
||||||
|
Initialize KuCoin connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_sandbox: Whether to use sandbox environment
|
||||||
|
api_key: API key for authentication (optional)
|
||||||
|
api_secret: API secret for authentication (optional)
|
||||||
|
passphrase: API passphrase for authentication (optional)
|
||||||
|
"""
|
||||||
|
# KuCoin requires getting WebSocket URL from REST API
|
||||||
|
super().__init__("kucoin", "") # URL will be set after token retrieval
|
||||||
|
|
||||||
|
# Authentication credentials (optional)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
self.passphrase = passphrase
|
||||||
|
self.use_sandbox = use_sandbox
|
||||||
|
|
||||||
|
# KuCoin-specific attributes
|
||||||
|
self.token = None
|
||||||
|
self.connect_id = None
|
||||||
|
self.ping_interval = 18000 # 18 seconds (KuCoin requirement)
|
||||||
|
self.ping_timeout = 10000 # 10 seconds
|
||||||
|
|
||||||
|
# KuCoin-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'message': self._handle_data_message,
|
||||||
|
'welcome': self._handle_welcome_message,
|
||||||
|
'ack': self._handle_ack_message,
|
||||||
|
'error': self._handle_error_message,
|
||||||
|
'pong': self._handle_pong_message
|
||||||
|
})
|
||||||
|
|
||||||
|
# Subscription tracking
|
||||||
|
self.subscribed_topics = set()
|
||||||
|
self.subscription_id = 1
|
||||||
|
|
||||||
|
logger.info(f"KuCoin connector initialized ({'sandbox' if use_sandbox else 'live'})")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from KuCoin message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# KuCoin message format
|
||||||
|
if 'type' in data:
|
||||||
|
return data['type'] # 'message', 'welcome', 'ack', 'error', 'pong'
|
||||||
|
elif 'subject' in data:
|
||||||
|
# Data message with subject
|
||||||
|
return 'message'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to KuCoin format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: KuCoin symbol format (e.g., 'BTC-USDT')
|
||||||
|
"""
|
||||||
|
# KuCoin uses dash-separated format
|
||||||
|
if symbol.upper() == 'BTCUSDT':
|
||||||
|
return 'BTC-USDT'
|
||||||
|
elif symbol.upper() == 'ETHUSDT':
|
||||||
|
return 'ETH-USDT'
|
||||||
|
elif symbol.upper().endswith('USDT'):
|
||||||
|
base = symbol[:-4].upper()
|
||||||
|
return f"{base}-USDT"
|
||||||
|
elif symbol.upper().endswith('USD'):
|
||||||
|
base = symbol[:-3].upper()
|
||||||
|
return f"{base}-USD"
|
||||||
|
else:
|
||||||
|
# Assume it's already in correct format or add dash
|
||||||
|
if '-' not in symbol:
|
||||||
|
# Try to split common patterns
|
||||||
|
if len(symbol) >= 6:
|
||||||
|
# Assume last 4 chars are quote currency
|
||||||
|
base = symbol[:-4].upper()
|
||||||
|
quote = symbol[-4:].upper()
|
||||||
|
return f"{base}-{quote}"
|
||||||
|
else:
|
||||||
|
return symbol.upper()
|
||||||
|
else:
|
||||||
|
return symbol.upper()
|
||||||
|
|
||||||
|
def _denormalize_symbol(self, kucoin_symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert KuCoin symbol back to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
kucoin_symbol: KuCoin symbol format (e.g., 'BTC-USDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
if '-' in kucoin_symbol:
|
||||||
|
return kucoin_symbol.replace('-', '')
|
||||||
|
return kucoin_symbol
|
||||||
|
|
||||||
|
async def _get_websocket_token(self) -> Optional[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get WebSocket connection token from KuCoin REST API.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Token information including WebSocket URL
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
api_url = self.SANDBOX_API_URL if self.use_sandbox else self.API_URL
|
||||||
|
endpoint = "/api/v1/bullet-public"
|
||||||
|
|
||||||
|
# Use private endpoint if authenticated
|
||||||
|
if self.api_key and self.api_secret and self.passphrase:
|
||||||
|
endpoint = "/api/v1/bullet-private"
|
||||||
|
headers = self._get_auth_headers("POST", endpoint, "")
|
||||||
|
else:
|
||||||
|
headers = {}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.post(f"{api_url}{endpoint}", headers=headers) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('code') != '200000':
|
||||||
|
logger.error(f"KuCoin token error: {data.get('msg')}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
return data.get('data')
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get KuCoin token: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting KuCoin WebSocket token: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def connect(self) -> bool:
|
||||||
|
"""Override connect to get token first."""
|
||||||
|
try:
|
||||||
|
# Get WebSocket token and URL
|
||||||
|
token_data = await self._get_websocket_token()
|
||||||
|
if not token_data:
|
||||||
|
logger.error("Failed to get KuCoin WebSocket token")
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.token = token_data.get('token')
|
||||||
|
servers = token_data.get('instanceServers', [])
|
||||||
|
|
||||||
|
if not servers:
|
||||||
|
logger.error("No KuCoin WebSocket servers available")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Use first available server
|
||||||
|
server = servers[0]
|
||||||
|
self.websocket_url = f"{server['endpoint']}?token={self.token}&connectId={int(time.time() * 1000)}"
|
||||||
|
self.ping_interval = server.get('pingInterval', 18000)
|
||||||
|
self.ping_timeout = server.get('pingTimeout', 10000)
|
||||||
|
|
||||||
|
logger.info(f"KuCoin WebSocket URL: {server['endpoint']}")
|
||||||
|
|
||||||
|
# Now connect using the base connector method
|
||||||
|
return await super().connect()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error connecting to KuCoin: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
kucoin_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"/market/level2:{kucoin_symbol}"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"id": str(self.subscription_id),
|
||||||
|
"type": "subscribe",
|
||||||
|
"topic": topic,
|
||||||
|
"privateChannel": False,
|
||||||
|
"response": True
|
||||||
|
}
|
||||||
|
self.subscription_id += 1
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_topics.add(topic)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({kucoin_symbol}) on KuCoin")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on KuCoin")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
kucoin_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"/market/match:{kucoin_symbol}"
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"id": str(self.subscription_id),
|
||||||
|
"type": "subscribe",
|
||||||
|
"topic": topic,
|
||||||
|
"privateChannel": False,
|
||||||
|
"response": True
|
||||||
|
}
|
||||||
|
self.subscription_id += 1
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_topics.add(topic)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({kucoin_symbol}) on KuCoin")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on KuCoin")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
kucoin_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"/market/level2:{kucoin_symbol}"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"id": str(self.subscription_id),
|
||||||
|
"type": "unsubscribe",
|
||||||
|
"topic": topic,
|
||||||
|
"privateChannel": False,
|
||||||
|
"response": True
|
||||||
|
}
|
||||||
|
self.subscription_id += 1
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_topics.discard(topic)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} ({kucoin_symbol}) on KuCoin")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on KuCoin")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
kucoin_symbol = self.normalize_symbol(symbol)
|
||||||
|
topic = f"/market/match:{kucoin_symbol}"
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"id": str(self.subscription_id),
|
||||||
|
"type": "unsubscribe",
|
||||||
|
"topic": topic,
|
||||||
|
"privateChannel": False,
|
||||||
|
"response": True
|
||||||
|
}
|
||||||
|
self.subscription_id += 1
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_topics.discard(topic)
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} ({kucoin_symbol}) on KuCoin")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on KuCoin")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from KuCoin.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols in standard format
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
api_url = self.SANDBOX_API_URL if self.use_sandbox else self.API_URL
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{api_url}/api/v1/symbols") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('code') != '200000':
|
||||||
|
logger.error(f"KuCoin API error: {data.get('msg')}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
symbols = []
|
||||||
|
symbol_data = data.get('data', [])
|
||||||
|
|
||||||
|
for symbol_info in symbol_data:
|
||||||
|
if symbol_info.get('enableTrading'):
|
||||||
|
symbol = symbol_info.get('symbol', '')
|
||||||
|
# Convert to standard format
|
||||||
|
standard_symbol = self._denormalize_symbol(symbol)
|
||||||
|
symbols.append(standard_symbol)
|
||||||
|
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from KuCoin")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from KuCoin: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from KuCoin: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from KuCoin REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
kucoin_symbol = self.normalize_symbol(symbol)
|
||||||
|
api_url = self.SANDBOX_API_URL if self.use_sandbox else self.API_URL
|
||||||
|
|
||||||
|
url = f"{api_url}/api/v1/market/orderbook/level2_20"
|
||||||
|
params = {'symbol': kucoin_symbol}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('code') != '200000':
|
||||||
|
logger.error(f"KuCoin API error: {data.get('msg')}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
result = data.get('data', {})
|
||||||
|
return self._parse_orderbook_snapshot(result, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse KuCoin order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw KuCoin order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(data.get('time', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=int(data.get('sequence', 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_data_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle data message from KuCoin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Data message
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
subject = data.get('subject', '')
|
||||||
|
topic = data.get('topic', '')
|
||||||
|
message_data = data.get('data', {})
|
||||||
|
|
||||||
|
if 'level2' in subject:
|
||||||
|
await self._handle_orderbook_update(data)
|
||||||
|
elif 'match' in subject:
|
||||||
|
await self._handle_trade_update(data)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Unhandled KuCoin subject: {subject}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling data message: {e}")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book update from KuCoin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
topic = data.get('topic', '')
|
||||||
|
if not topic:
|
||||||
|
logger.warning("Order book update missing topic")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract symbol from topic: /market/level2:BTC-USDT
|
||||||
|
parts = topic.split(':')
|
||||||
|
if len(parts) < 2:
|
||||||
|
logger.warning("Invalid order book topic format")
|
||||||
|
return
|
||||||
|
|
||||||
|
kucoin_symbol = parts[1]
|
||||||
|
symbol = self._denormalize_symbol(kucoin_symbol)
|
||||||
|
|
||||||
|
message_data = data.get('data', {})
|
||||||
|
changes = message_data.get('changes', {})
|
||||||
|
|
||||||
|
# Parse bids and asks changes
|
||||||
|
bids = []
|
||||||
|
for bid_data in changes.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in changes.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(message_data.get('time', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=int(message_data.get('sequenceEnd', 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade update from KuCoin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
topic = data.get('topic', '')
|
||||||
|
if not topic:
|
||||||
|
logger.warning("Trade update missing topic")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract symbol from topic: /market/match:BTC-USDT
|
||||||
|
parts = topic.split(':')
|
||||||
|
if len(parts) < 2:
|
||||||
|
logger.warning("Invalid trade topic format")
|
||||||
|
return
|
||||||
|
|
||||||
|
kucoin_symbol = parts[1]
|
||||||
|
symbol = self._denormalize_symbol(kucoin_symbol)
|
||||||
|
|
||||||
|
message_data = data.get('data', {})
|
||||||
|
|
||||||
|
price = float(message_data.get('price', 0))
|
||||||
|
size = float(message_data.get('size', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(size):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, size={size}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine side (KuCoin uses 'side' field)
|
||||||
|
side = message_data.get('side', 'unknown').lower()
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(message_data.get('time', 0)) / 1000, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=size,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(message_data.get('tradeId', ''))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {size} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_welcome_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle welcome message from KuCoin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Welcome message data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
connect_id = data.get('id')
|
||||||
|
if connect_id:
|
||||||
|
self.connect_id = connect_id
|
||||||
|
logger.info(f"KuCoin connection established with ID: {connect_id}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling welcome message: {e}")
|
||||||
|
|
||||||
|
async def _handle_ack_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle acknowledgment message from KuCoin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Ack message data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
msg_id = data.get('id', '')
|
||||||
|
logger.debug(f"KuCoin ACK received for message ID: {msg_id}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling ack message: {e}")
|
||||||
|
|
||||||
|
async def _handle_error_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle error message from KuCoin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Error message data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
code = data.get('code', 'unknown')
|
||||||
|
message = data.get('data', 'Unknown error')
|
||||||
|
|
||||||
|
logger.error(f"KuCoin error {code}: {message}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling error message: {e}")
|
||||||
|
|
||||||
|
async def _handle_pong_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle pong message from KuCoin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Pong message data
|
||||||
|
"""
|
||||||
|
logger.debug("Received KuCoin pong")
|
||||||
|
|
||||||
|
def _get_auth_headers(self, method: str, endpoint: str, body: str) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Generate authentication headers for KuCoin API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
method: HTTP method
|
||||||
|
endpoint: API endpoint
|
||||||
|
body: Request body
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Authentication headers
|
||||||
|
"""
|
||||||
|
if not all([self.api_key, self.api_secret, self.passphrase]):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
timestamp = str(int(time.time() * 1000))
|
||||||
|
|
||||||
|
# Create signature string
|
||||||
|
str_to_sign = timestamp + method + endpoint + body
|
||||||
|
signature = base64.b64encode(
|
||||||
|
hmac.new(
|
||||||
|
self.api_secret.encode('utf-8'),
|
||||||
|
str_to_sign.encode('utf-8'),
|
||||||
|
hashlib.sha256
|
||||||
|
).digest()
|
||||||
|
).decode('utf-8')
|
||||||
|
|
||||||
|
# Create passphrase signature
|
||||||
|
passphrase_signature = base64.b64encode(
|
||||||
|
hmac.new(
|
||||||
|
self.api_secret.encode('utf-8'),
|
||||||
|
self.passphrase.encode('utf-8'),
|
||||||
|
hashlib.sha256
|
||||||
|
).digest()
|
||||||
|
).decode('utf-8')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'KC-API-SIGN': signature,
|
||||||
|
'KC-API-TIMESTAMP': timestamp,
|
||||||
|
'KC-API-KEY': self.api_key,
|
||||||
|
'KC-API-PASSPHRASE': passphrase_signature,
|
||||||
|
'KC-API-KEY-VERSION': '2',
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating auth headers: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
async def _send_ping(self) -> None:
|
||||||
|
"""Send ping to keep connection alive."""
|
||||||
|
try:
|
||||||
|
ping_msg = {
|
||||||
|
"id": str(self.subscription_id),
|
||||||
|
"type": "ping"
|
||||||
|
}
|
||||||
|
self.subscription_id += 1
|
||||||
|
|
||||||
|
await self._send_message(ping_msg)
|
||||||
|
logger.debug("Sent ping to KuCoin")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending ping: {e}")
|
||||||
|
|
||||||
|
def get_kucoin_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get KuCoin-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
kucoin_stats = {
|
||||||
|
'subscribed_topics': list(self.subscribed_topics),
|
||||||
|
'use_sandbox': self.use_sandbox,
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret and self.passphrase),
|
||||||
|
'connect_id': self.connect_id,
|
||||||
|
'token_available': bool(self.token),
|
||||||
|
'next_subscription_id': self.subscription_id
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(kucoin_stats)
|
||||||
|
return base_stats
|
||||||
420
COBY/connectors/mexc_connector.py
Normal file
420
COBY/connectors/mexc_connector.py
Normal file
@@ -0,0 +1,420 @@
|
|||||||
|
"""
|
||||||
|
MEXC exchange connector implementation.
|
||||||
|
Supports WebSocket connections to MEXC with their WebSocket streams.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class MEXCConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
MEXC WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
"""
|
||||||
|
|
||||||
|
# MEXC WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://wbs.mexc.com/ws"
|
||||||
|
API_URL = "https://api.mexc.com"
|
||||||
|
|
||||||
|
def __init__(self, api_key: str = None, api_secret: str = None):
|
||||||
|
"""Initialize MEXC connector."""
|
||||||
|
super().__init__("mexc", self.WEBSOCKET_URL)
|
||||||
|
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
|
||||||
|
# MEXC-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'spot@public.deals.v3.api': self._handle_trade_update,
|
||||||
|
'spot@public.increase.depth.v3.api': self._handle_orderbook_update,
|
||||||
|
'spot@public.limit.depth.v3.api': self._handle_orderbook_snapshot,
|
||||||
|
'pong': self._handle_pong
|
||||||
|
})
|
||||||
|
|
||||||
|
# Subscription tracking
|
||||||
|
self.subscribed_streams = set()
|
||||||
|
self.request_id = 1
|
||||||
|
|
||||||
|
logger.info("MEXC connector initialized")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""Determine message type from MEXC message data."""
|
||||||
|
if 'c' in data: # Channel
|
||||||
|
return data['c']
|
||||||
|
elif 'msg' in data:
|
||||||
|
return 'message'
|
||||||
|
elif 'pong' in data:
|
||||||
|
return 'pong'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""Normalize symbol to MEXC format."""
|
||||||
|
# MEXC uses uppercase without separators (same as Binance)
|
||||||
|
normalized = symbol.upper().replace('-', '').replace('/', '')
|
||||||
|
|
||||||
|
if not validate_symbol(normalized):
|
||||||
|
raise ValidationError(f"Invalid symbol format: {symbol}", "INVALID_SYMBOL")
|
||||||
|
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""Subscribe to order book updates for a symbol."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
mexc_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
subscription_msg = {
|
||||||
|
"method": "SUBSCRIPTION",
|
||||||
|
"params": [f"spot@public.limit.depth.v3.api@{mexc_symbol}@20"]
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_streams.add(f"spot@public.limit.depth.v3.api@{mexc_symbol}@20")
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({mexc_symbol}) on MEXC")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on MEXC")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""Subscribe to trade updates for a symbol."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
mexc_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
subscription_msg = {
|
||||||
|
"method": "SUBSCRIPTION",
|
||||||
|
"params": [f"spot@public.deals.v3.api@{mexc_symbol}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_streams.add(f"spot@public.deals.v3.api@{mexc_symbol}")
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({mexc_symbol}) on MEXC")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on MEXC")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""Unsubscribe from order book updates."""
|
||||||
|
try:
|
||||||
|
mexc_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
unsubscription_msg = {
|
||||||
|
"method": "UNSUBSCRIPTION",
|
||||||
|
"params": [f"spot@public.limit.depth.v3.api@{mexc_symbol}@20"]
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_streams.discard(f"spot@public.limit.depth.v3.api@{mexc_symbol}@20")
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} on MEXC")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""Unsubscribe from trade updates."""
|
||||||
|
try:
|
||||||
|
mexc_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
unsubscription_msg = {
|
||||||
|
"method": "UNSUBSCRIPTION",
|
||||||
|
"params": [f"spot@public.deals.v3.api@{mexc_symbol}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_streams.discard(f"spot@public.deals.v3.api@{mexc_symbol}")
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} on MEXC")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""Get available symbols from MEXC."""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{self.API_URL}/api/v3/exchangeInfo") as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
symbols = [
|
||||||
|
symbol_info['symbol']
|
||||||
|
for symbol_info in data.get('symbols', [])
|
||||||
|
if symbol_info.get('status') == 'TRADING'
|
||||||
|
]
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from MEXC")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from MEXC: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from MEXC: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""Get order book snapshot from MEXC REST API."""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
mexc_symbol = self.normalize_symbol(symbol)
|
||||||
|
url = f"{self.API_URL}/api/v3/depth"
|
||||||
|
params = {'symbol': mexc_symbol, 'limit': min(depth, 5000)}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
return self._parse_orderbook_snapshot(data, symbol)
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""Parse MEXC order book data."""
|
||||||
|
try:
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
return OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=data.get('lastUpdateId')
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""Handle order book update from MEXC."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
symbol_data = data.get('s', '') # Symbol
|
||||||
|
if not symbol_data:
|
||||||
|
logger.warning("Order book update missing symbol")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = symbol_data # Already in standard format
|
||||||
|
order_data = data.get('d', {})
|
||||||
|
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in order_data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in order_data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(data.get('t', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=order_data.get('lastUpdateId')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_orderbook_snapshot(self, data: Dict) -> None:
|
||||||
|
"""Handle order book snapshot from MEXC."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
symbol_data = data.get('s', '') # Symbol
|
||||||
|
if not symbol_data:
|
||||||
|
logger.warning("Order book snapshot missing symbol")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = symbol_data # Already in standard format
|
||||||
|
order_data = data.get('d', {})
|
||||||
|
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in order_data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in order_data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(data.get('t', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=order_data.get('lastUpdateId')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book snapshot for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book snapshot: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""Handle trade update from MEXC."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
symbol_data = data.get('s', '') # Symbol
|
||||||
|
if not symbol_data:
|
||||||
|
logger.warning("Trade update missing symbol")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = symbol_data # Already in standard format
|
||||||
|
trade_data = data.get('d', {})
|
||||||
|
|
||||||
|
# MEXC trade data format
|
||||||
|
trades = trade_data.get('deals', [])
|
||||||
|
|
||||||
|
for trade_info in trades:
|
||||||
|
price = float(trade_info.get('p', 0))
|
||||||
|
quantity = float(trade_info.get('v', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(quantity):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, quantity={quantity}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Determine side (MEXC uses 'S' field: 1=buy, 2=sell)
|
||||||
|
side_code = trade_info.get('S', 0)
|
||||||
|
side = 'buy' if side_code == 1 else 'sell'
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(trade_info.get('t', 0)) / 1000, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=quantity,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(trade_info.get('i', ''))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {quantity} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_pong(self, data: Dict) -> None:
|
||||||
|
"""Handle pong response from MEXC."""
|
||||||
|
logger.debug("Received MEXC pong")
|
||||||
|
|
||||||
|
async def _send_ping(self) -> None:
|
||||||
|
"""Send ping to keep connection alive."""
|
||||||
|
try:
|
||||||
|
ping_msg = {"method": "PING"}
|
||||||
|
await self._send_message(ping_msg)
|
||||||
|
logger.debug("Sent ping to MEXC")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending ping: {e}")
|
||||||
|
|
||||||
|
def get_mexc_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get MEXC-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
mexc_stats = {
|
||||||
|
'subscribed_streams': list(self.subscribed_streams),
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret),
|
||||||
|
'next_request_id': self.request_id
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(mexc_stats)
|
||||||
|
return base_stats
|
||||||
660
COBY/connectors/okx_connector.py
Normal file
660
COBY/connectors/okx_connector.py
Normal file
@@ -0,0 +1,660 @@
|
|||||||
|
"""
|
||||||
|
OKX exchange connector implementation.
|
||||||
|
Supports WebSocket connections to OKX with their V5 API WebSocket streams.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import hmac
|
||||||
|
import hashlib
|
||||||
|
import base64
|
||||||
|
import time
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, PriceLevel
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import ValidationError, ConnectionError
|
||||||
|
from ..utils.validation import validate_symbol, validate_price, validate_volume
|
||||||
|
from .base_connector import BaseExchangeConnector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class OKXConnector(BaseExchangeConnector):
|
||||||
|
"""
|
||||||
|
OKX WebSocket connector implementation.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- V5 API WebSocket streams
|
||||||
|
- Order book streams
|
||||||
|
- Trade streams
|
||||||
|
- Symbol normalization
|
||||||
|
- Authentication for private channels
|
||||||
|
"""
|
||||||
|
|
||||||
|
# OKX WebSocket URLs
|
||||||
|
WEBSOCKET_URL = "wss://ws.okx.com:8443/ws/v5/public"
|
||||||
|
WEBSOCKET_PRIVATE_URL = "wss://ws.okx.com:8443/ws/v5/private"
|
||||||
|
DEMO_WEBSOCKET_URL = "wss://wspap.okx.com:8443/ws/v5/public?brokerId=9999"
|
||||||
|
API_URL = "https://www.okx.com"
|
||||||
|
|
||||||
|
def __init__(self, use_demo: bool = False, api_key: str = None,
|
||||||
|
api_secret: str = None, passphrase: str = None):
|
||||||
|
"""
|
||||||
|
Initialize OKX connector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
use_demo: Whether to use demo environment
|
||||||
|
api_key: API key for authentication (optional)
|
||||||
|
api_secret: API secret for authentication (optional)
|
||||||
|
passphrase: API passphrase for authentication (optional)
|
||||||
|
"""
|
||||||
|
websocket_url = self.DEMO_WEBSOCKET_URL if use_demo else self.WEBSOCKET_URL
|
||||||
|
super().__init__("okx", websocket_url)
|
||||||
|
|
||||||
|
# Authentication credentials (optional)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_secret = api_secret
|
||||||
|
self.passphrase = passphrase
|
||||||
|
self.use_demo = use_demo
|
||||||
|
|
||||||
|
# OKX-specific message handlers
|
||||||
|
self.message_handlers.update({
|
||||||
|
'books': self._handle_orderbook_update,
|
||||||
|
'trades': self._handle_trade_update,
|
||||||
|
'error': self._handle_error_message,
|
||||||
|
'subscribe': self._handle_subscription_response,
|
||||||
|
'unsubscribe': self._handle_subscription_response
|
||||||
|
})
|
||||||
|
|
||||||
|
# Subscription tracking
|
||||||
|
self.subscribed_channels = set()
|
||||||
|
|
||||||
|
logger.info(f"OKX connector initialized ({'demo' if use_demo else 'live'})")
|
||||||
|
|
||||||
|
def _get_message_type(self, data: Dict) -> str:
|
||||||
|
"""
|
||||||
|
Determine message type from OKX message data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Parsed message data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Message type identifier
|
||||||
|
"""
|
||||||
|
# OKX V5 API message format
|
||||||
|
if 'event' in data:
|
||||||
|
return data['event'] # 'subscribe', 'unsubscribe', 'error'
|
||||||
|
elif 'arg' in data and 'data' in data:
|
||||||
|
# Data message
|
||||||
|
channel = data['arg'].get('channel', '')
|
||||||
|
return channel
|
||||||
|
elif 'op' in data:
|
||||||
|
return data['op'] # 'ping', 'pong'
|
||||||
|
|
||||||
|
return 'unknown'
|
||||||
|
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to OKX format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: OKX symbol format (e.g., 'BTC-USDT')
|
||||||
|
"""
|
||||||
|
# OKX uses dash-separated format
|
||||||
|
if symbol.upper() == 'BTCUSDT':
|
||||||
|
return 'BTC-USDT'
|
||||||
|
elif symbol.upper() == 'ETHUSDT':
|
||||||
|
return 'ETH-USDT'
|
||||||
|
elif symbol.upper().endswith('USDT'):
|
||||||
|
base = symbol[:-4].upper()
|
||||||
|
return f"{base}-USDT"
|
||||||
|
elif symbol.upper().endswith('USD'):
|
||||||
|
base = symbol[:-3].upper()
|
||||||
|
return f"{base}-USD"
|
||||||
|
else:
|
||||||
|
# Assume it's already in correct format or add dash
|
||||||
|
if '-' not in symbol:
|
||||||
|
# Try to split common patterns
|
||||||
|
if len(symbol) >= 6:
|
||||||
|
# Assume last 4 chars are quote currency
|
||||||
|
base = symbol[:-4].upper()
|
||||||
|
quote = symbol[-4:].upper()
|
||||||
|
return f"{base}-{quote}"
|
||||||
|
else:
|
||||||
|
return symbol.upper()
|
||||||
|
else:
|
||||||
|
return symbol.upper()
|
||||||
|
|
||||||
|
def _denormalize_symbol(self, okx_symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert OKX symbol back to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
okx_symbol: OKX symbol format (e.g., 'BTC-USDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
if '-' in okx_symbol:
|
||||||
|
return okx_symbol.replace('-', '')
|
||||||
|
return okx_symbol
|
||||||
|
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
okx_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"op": "subscribe",
|
||||||
|
"args": [
|
||||||
|
{
|
||||||
|
"channel": "books",
|
||||||
|
"instId": okx_symbol
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'orderbook' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('orderbook')
|
||||||
|
|
||||||
|
self.subscribed_channels.add(f"books:{okx_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to order book for {symbol} ({okx_symbol}) on OKX")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to order book for {symbol} on OKX")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
okx_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create subscription message
|
||||||
|
subscription_msg = {
|
||||||
|
"op": "subscribe",
|
||||||
|
"args": [
|
||||||
|
{
|
||||||
|
"channel": "trades",
|
||||||
|
"instId": okx_symbol
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
success = await self._send_message(subscription_msg)
|
||||||
|
if success:
|
||||||
|
# Track subscription
|
||||||
|
if symbol not in self.subscriptions:
|
||||||
|
self.subscriptions[symbol] = []
|
||||||
|
if 'trades' not in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].append('trades')
|
||||||
|
|
||||||
|
self.subscribed_channels.add(f"trades:{okx_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to trades for {symbol} ({okx_symbol}) on OKX")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to subscribe to trades for {symbol} on OKX")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
okx_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"op": "unsubscribe",
|
||||||
|
"args": [
|
||||||
|
{
|
||||||
|
"channel": "books",
|
||||||
|
"instId": okx_symbol
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'orderbook' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('orderbook')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_channels.discard(f"books:{okx_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from order book for {symbol} ({okx_symbol}) on OKX")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from order book for {symbol} on OKX")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from order book for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
okx_symbol = self.normalize_symbol(symbol)
|
||||||
|
|
||||||
|
# Create unsubscription message
|
||||||
|
unsubscription_msg = {
|
||||||
|
"op": "unsubscribe",
|
||||||
|
"args": [
|
||||||
|
{
|
||||||
|
"channel": "trades",
|
||||||
|
"instId": okx_symbol
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send unsubscription
|
||||||
|
success = await self._send_message(unsubscription_msg)
|
||||||
|
if success:
|
||||||
|
# Remove from tracking
|
||||||
|
if symbol in self.subscriptions and 'trades' in self.subscriptions[symbol]:
|
||||||
|
self.subscriptions[symbol].remove('trades')
|
||||||
|
if not self.subscriptions[symbol]:
|
||||||
|
del self.subscriptions[symbol]
|
||||||
|
|
||||||
|
self.subscribed_channels.discard(f"trades:{okx_symbol}")
|
||||||
|
|
||||||
|
logger.info(f"Unsubscribed from trades for {symbol} ({okx_symbol}) on OKX")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to unsubscribe from trades for {symbol} on OKX")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error unsubscribing from trades for {symbol}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols from OKX.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols in standard format
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
api_url = "https://www.okx.com"
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(f"{api_url}/api/v5/public/instruments",
|
||||||
|
params={"instType": "SPOT"}) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('code') != '0':
|
||||||
|
logger.error(f"OKX API error: {data.get('msg')}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
symbols = []
|
||||||
|
instruments = data.get('data', [])
|
||||||
|
|
||||||
|
for instrument in instruments:
|
||||||
|
if instrument.get('state') == 'live':
|
||||||
|
inst_id = instrument.get('instId', '')
|
||||||
|
# Convert to standard format
|
||||||
|
standard_symbol = self._denormalize_symbol(inst_id)
|
||||||
|
symbols.append(standard_symbol)
|
||||||
|
|
||||||
|
logger.info(f"Retrieved {len(symbols)} symbols from OKX")
|
||||||
|
return symbols
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get symbols from OKX: HTTP {response.status}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting symbols from OKX: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot from OKX REST API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
okx_symbol = self.normalize_symbol(symbol)
|
||||||
|
api_url = "https://www.okx.com"
|
||||||
|
|
||||||
|
# OKX supports depths up to 400
|
||||||
|
api_depth = min(depth, 400)
|
||||||
|
|
||||||
|
url = f"{api_url}/api/v5/market/books"
|
||||||
|
params = {
|
||||||
|
'instId': okx_symbol,
|
||||||
|
'sz': api_depth
|
||||||
|
}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
if data.get('code') != '0':
|
||||||
|
logger.error(f"OKX API error: {data.get('msg')}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
result_data = data.get('data', [])
|
||||||
|
if result_data:
|
||||||
|
return self._parse_orderbook_snapshot(result_data[0], symbol)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get order book for {symbol}: HTTP {response.status}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting order book snapshot for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_orderbook_snapshot(self, data: Dict, symbol: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Parse OKX order book data into OrderBookSnapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Raw OKX order book data
|
||||||
|
symbol: Trading symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Parsed order book
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=int(data.get('seqId', 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
return orderbook
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing order book snapshot: {e}")
|
||||||
|
raise ValidationError(f"Invalid order book data: {e}", "PARSE_ERROR")
|
||||||
|
|
||||||
|
async def _handle_orderbook_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle order book update from OKX.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract symbol from arg
|
||||||
|
arg = data.get('arg', {})
|
||||||
|
okx_symbol = arg.get('instId', '')
|
||||||
|
if not okx_symbol:
|
||||||
|
logger.warning("Order book update missing instId")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = self._denormalize_symbol(okx_symbol)
|
||||||
|
|
||||||
|
# Process each data item
|
||||||
|
for book_data in data.get('data', []):
|
||||||
|
# Parse bids and asks
|
||||||
|
bids = []
|
||||||
|
for bid_data in book_data.get('bids', []):
|
||||||
|
price = float(bid_data[0])
|
||||||
|
size = float(bid_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
bids.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
asks = []
|
||||||
|
for ask_data in book_data.get('asks', []):
|
||||||
|
price = float(ask_data[0])
|
||||||
|
size = float(ask_data[1])
|
||||||
|
|
||||||
|
if validate_price(price) and validate_volume(size):
|
||||||
|
asks.append(PriceLevel(price=price, size=size))
|
||||||
|
|
||||||
|
# Create order book snapshot
|
||||||
|
orderbook = OrderBookSnapshot(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(book_data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
bids=bids,
|
||||||
|
asks=asks,
|
||||||
|
sequence_id=int(book_data.get('seqId', 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(orderbook)
|
||||||
|
|
||||||
|
logger.debug(f"Processed order book update for {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling order book update: {e}")
|
||||||
|
|
||||||
|
async def _handle_trade_update(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle trade update from OKX.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade update data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Extract symbol from arg
|
||||||
|
arg = data.get('arg', {})
|
||||||
|
okx_symbol = arg.get('instId', '')
|
||||||
|
if not okx_symbol:
|
||||||
|
logger.warning("Trade update missing instId")
|
||||||
|
return
|
||||||
|
|
||||||
|
symbol = self._denormalize_symbol(okx_symbol)
|
||||||
|
|
||||||
|
# Process each trade
|
||||||
|
for trade_data in data.get('data', []):
|
||||||
|
price = float(trade_data.get('px', 0))
|
||||||
|
size = float(trade_data.get('sz', 0))
|
||||||
|
|
||||||
|
# Validate data
|
||||||
|
if not validate_price(price) or not validate_volume(size):
|
||||||
|
logger.warning(f"Invalid trade data: price={price}, size={size}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Determine side (OKX uses 'side' field)
|
||||||
|
side = trade_data.get('side', 'unknown').lower()
|
||||||
|
|
||||||
|
# Create trade event
|
||||||
|
trade = TradeEvent(
|
||||||
|
symbol=symbol,
|
||||||
|
exchange=self.exchange_name,
|
||||||
|
timestamp=datetime.fromtimestamp(int(trade_data.get('ts', 0)) / 1000, tz=timezone.utc),
|
||||||
|
price=price,
|
||||||
|
size=size,
|
||||||
|
side=side,
|
||||||
|
trade_id=str(trade_data.get('tradeId', ''))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
self._notify_data_callbacks(trade)
|
||||||
|
|
||||||
|
logger.debug(f"Processed trade for {symbol}: {side} {size} @ {price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling trade update: {e}")
|
||||||
|
|
||||||
|
async def _handle_subscription_response(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle subscription response from OKX.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Subscription response data
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
event = data.get('event', '')
|
||||||
|
arg = data.get('arg', {})
|
||||||
|
channel = arg.get('channel', '')
|
||||||
|
inst_id = arg.get('instId', '')
|
||||||
|
|
||||||
|
if event == 'subscribe':
|
||||||
|
logger.info(f"OKX subscription confirmed: {channel} for {inst_id}")
|
||||||
|
elif event == 'unsubscribe':
|
||||||
|
logger.info(f"OKX unsubscription confirmed: {channel} for {inst_id}")
|
||||||
|
elif event == 'error':
|
||||||
|
error_msg = data.get('msg', 'Unknown error')
|
||||||
|
logger.error(f"OKX subscription error: {error_msg}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling subscription response: {e}")
|
||||||
|
|
||||||
|
async def _handle_error_message(self, data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Handle error message from OKX.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Error message data
|
||||||
|
"""
|
||||||
|
error_code = data.get('code', 'unknown')
|
||||||
|
error_msg = data.get('msg', 'Unknown error')
|
||||||
|
|
||||||
|
logger.error(f"OKX error {error_code}: {error_msg}")
|
||||||
|
|
||||||
|
# Handle specific error codes
|
||||||
|
if error_code == '60012':
|
||||||
|
logger.error("Invalid request - check parameters")
|
||||||
|
elif error_code == '60013':
|
||||||
|
logger.error("Invalid channel - check channel name")
|
||||||
|
|
||||||
|
def _get_auth_headers(self, timestamp: str, method: str = "GET",
|
||||||
|
request_path: str = "/users/self/verify") -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Generate authentication headers for OKX API.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
timestamp: Current timestamp
|
||||||
|
method: HTTP method
|
||||||
|
request_path: Request path
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Authentication headers
|
||||||
|
"""
|
||||||
|
if not all([self.api_key, self.api_secret, self.passphrase]):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create signature
|
||||||
|
message = timestamp + method + request_path
|
||||||
|
signature = base64.b64encode(
|
||||||
|
hmac.new(
|
||||||
|
self.api_secret.encode('utf-8'),
|
||||||
|
message.encode('utf-8'),
|
||||||
|
hashlib.sha256
|
||||||
|
).digest()
|
||||||
|
).decode('utf-8')
|
||||||
|
|
||||||
|
# Create passphrase signature
|
||||||
|
passphrase_signature = base64.b64encode(
|
||||||
|
hmac.new(
|
||||||
|
self.api_secret.encode('utf-8'),
|
||||||
|
self.passphrase.encode('utf-8'),
|
||||||
|
hashlib.sha256
|
||||||
|
).digest()
|
||||||
|
).decode('utf-8')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'OK-ACCESS-KEY': self.api_key,
|
||||||
|
'OK-ACCESS-SIGN': signature,
|
||||||
|
'OK-ACCESS-TIMESTAMP': timestamp,
|
||||||
|
'OK-ACCESS-PASSPHRASE': passphrase_signature
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating auth headers: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
async def _send_ping(self) -> None:
|
||||||
|
"""Send ping to keep connection alive."""
|
||||||
|
try:
|
||||||
|
ping_msg = {"op": "ping"}
|
||||||
|
await self._send_message(ping_msg)
|
||||||
|
logger.debug("Sent ping to OKX")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending ping: {e}")
|
||||||
|
|
||||||
|
def get_okx_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get OKX-specific statistics."""
|
||||||
|
base_stats = self.get_stats()
|
||||||
|
|
||||||
|
okx_stats = {
|
||||||
|
'subscribed_channels': list(self.subscribed_channels),
|
||||||
|
'use_demo': self.use_demo,
|
||||||
|
'authenticated': bool(self.api_key and self.api_secret and self.passphrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
base_stats.update(okx_stats)
|
||||||
|
return base_stats
|
||||||
87
COBY/docker-compose.dev.yml
Normal file
87
COBY/docker-compose.dev.yml
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
# Docker Compose configuration for development environment
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# Override main app for development
|
||||||
|
coby-app:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
target: development
|
||||||
|
environment:
|
||||||
|
# Development overrides
|
||||||
|
ENVIRONMENT: development
|
||||||
|
DEBUG: true
|
||||||
|
LOG_LEVEL: DEBUG
|
||||||
|
|
||||||
|
# Database configuration
|
||||||
|
DB_HOST: timescaledb
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_NAME: ${DB_NAME:-market_data_dev}
|
||||||
|
DB_USER: ${DB_USER:-market_user}
|
||||||
|
DB_PASSWORD: ${DB_PASSWORD:-dev_password}
|
||||||
|
|
||||||
|
# Redis configuration
|
||||||
|
REDIS_HOST: redis
|
||||||
|
REDIS_PORT: 6379
|
||||||
|
REDIS_PASSWORD: ${REDIS_PASSWORD:-dev_redis}
|
||||||
|
|
||||||
|
# Development settings
|
||||||
|
PYTHONPATH: /app
|
||||||
|
FLASK_ENV: development
|
||||||
|
FLASK_DEBUG: 1
|
||||||
|
volumes:
|
||||||
|
# Mount source code for live reloading
|
||||||
|
- .:/app
|
||||||
|
- ./logs:/app/logs
|
||||||
|
- ./data:/app/data
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
- "8081:8081"
|
||||||
|
- "5678:5678" # Debug port
|
||||||
|
command: ["python", "-m", "COBY.main", "--debug", "--reload"]
|
||||||
|
|
||||||
|
# Development database with different settings
|
||||||
|
timescaledb:
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: ${DB_NAME:-market_data_dev}
|
||||||
|
POSTGRES_USER: ${DB_USER:-market_user}
|
||||||
|
POSTGRES_PASSWORD: ${DB_PASSWORD:-dev_password}
|
||||||
|
POSTGRES_HOST_AUTH_METHOD: trust # Less secure for dev
|
||||||
|
ports:
|
||||||
|
- "5433:5432" # Different port to avoid conflicts
|
||||||
|
|
||||||
|
# Development Redis
|
||||||
|
redis:
|
||||||
|
ports:
|
||||||
|
- "6380:6379" # Different port to avoid conflicts
|
||||||
|
command: redis-server --requirepass ${REDIS_PASSWORD:-dev_redis}
|
||||||
|
|
||||||
|
# Hot-reload web dashboard
|
||||||
|
coby-dashboard-dev:
|
||||||
|
image: nginx:alpine
|
||||||
|
container_name: coby-dashboard-dev
|
||||||
|
ports:
|
||||||
|
- "3000:80"
|
||||||
|
volumes:
|
||||||
|
- ./web/static:/usr/share/nginx/html
|
||||||
|
- ./docker/nginx-dev.conf:/etc/nginx/nginx.conf:ro
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
depends_on:
|
||||||
|
- coby-app
|
||||||
|
|
||||||
|
# Development tools container
|
||||||
|
dev-tools:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
target: development
|
||||||
|
container_name: coby-dev-tools
|
||||||
|
volumes:
|
||||||
|
- .:/app
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
command: ["tail", "-f", "/dev/null"] # Keep container running
|
||||||
|
profiles:
|
||||||
|
- tools
|
||||||
392
COBY/docker-compose.portainer.yml
Normal file
392
COBY/docker-compose.portainer.yml
Normal file
@@ -0,0 +1,392 @@
|
|||||||
|
# Docker Compose for COBY Multi-Exchange Data Aggregation System
|
||||||
|
# Optimized for Portainer deployment with Git repository integration
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# TimescaleDB Database
|
||||||
|
timescaledb:
|
||||||
|
image: timescale/timescaledb:latest-pg15
|
||||||
|
container_name: coby-timescaledb
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: market_data
|
||||||
|
POSTGRES_USER: market_user
|
||||||
|
POSTGRES_PASSWORD: market_data_secure_pass_2024
|
||||||
|
TIMESCALEDB_TELEMETRY: 'off'
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- timescale_data:/var/lib/postgresql/data
|
||||||
|
- ./COBY/docker/init-scripts:/docker-entrypoint-initdb.d:ro
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U market_user -d market_data"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 5
|
||||||
|
start_period: 60s
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 1G
|
||||||
|
cpus: '1.0'
|
||||||
|
reservations:
|
||||||
|
memory: 512M
|
||||||
|
cpus: '0.5'
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# Redis Cache
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: coby-redis
|
||||||
|
command: >
|
||||||
|
redis-server
|
||||||
|
--requirepass market_data_redis_2024
|
||||||
|
--maxmemory 256mb
|
||||||
|
--maxmemory-policy allkeys-lru
|
||||||
|
--save 900 1
|
||||||
|
--save 300 10
|
||||||
|
--save 60 10000
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "market_data_redis_2024", "ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 512M
|
||||||
|
cpus: '0.5'
|
||||||
|
reservations:
|
||||||
|
memory: 128M
|
||||||
|
cpus: '0.1'
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# COBY Main Application
|
||||||
|
coby-app:
|
||||||
|
build:
|
||||||
|
context: ./COBY
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
target: production
|
||||||
|
container_name: coby-app
|
||||||
|
environment:
|
||||||
|
# Database configuration
|
||||||
|
DB_HOST: timescaledb
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_NAME: market_data
|
||||||
|
DB_USER: market_user
|
||||||
|
DB_PASSWORD: market_data_secure_pass_2024
|
||||||
|
DB_SCHEMA: market_data
|
||||||
|
DB_POOL_SIZE: 10
|
||||||
|
DB_MAX_OVERFLOW: 20
|
||||||
|
DB_POOL_TIMEOUT: 30
|
||||||
|
|
||||||
|
# Redis configuration
|
||||||
|
REDIS_HOST: redis
|
||||||
|
REDIS_PORT: 6379
|
||||||
|
REDIS_PASSWORD: market_data_redis_2024
|
||||||
|
REDIS_DB: 0
|
||||||
|
REDIS_MAX_CONNECTIONS: 50
|
||||||
|
REDIS_SOCKET_TIMEOUT: 5
|
||||||
|
REDIS_CONNECT_TIMEOUT: 5
|
||||||
|
|
||||||
|
# Application configuration
|
||||||
|
ENVIRONMENT: production
|
||||||
|
DEBUG: false
|
||||||
|
LOG_LEVEL: INFO
|
||||||
|
LOG_FILE: logs/coby.log
|
||||||
|
LOG_MAX_SIZE: 100
|
||||||
|
LOG_BACKUP_COUNT: 5
|
||||||
|
ENABLE_CORRELATION_ID: true
|
||||||
|
|
||||||
|
# API configuration
|
||||||
|
API_HOST: 0.0.0.0
|
||||||
|
API_PORT: 8080
|
||||||
|
WS_PORT: 8081
|
||||||
|
CORS_ORIGINS: "*"
|
||||||
|
RATE_LIMIT: 100
|
||||||
|
MAX_WS_CONNECTIONS: 1000
|
||||||
|
|
||||||
|
# Exchange configuration
|
||||||
|
MAX_CONNECTIONS_PER_EXCHANGE: 5
|
||||||
|
RECONNECT_DELAY: 5
|
||||||
|
MAX_RECONNECT_ATTEMPTS: 10
|
||||||
|
HEARTBEAT_INTERVAL: 30
|
||||||
|
EXCHANGES: "binance,coinbase,kraken,bybit,okx,huobi,kucoin,gateio,bitfinex,mexc"
|
||||||
|
SYMBOLS: "BTCUSDT,ETHUSDT"
|
||||||
|
|
||||||
|
# Aggregation configuration
|
||||||
|
BUCKET_SIZE: 1.0
|
||||||
|
HEATMAP_DEPTH: 50
|
||||||
|
UPDATE_FREQUENCY: 0.5
|
||||||
|
VOLUME_THRESHOLD: 0.01
|
||||||
|
|
||||||
|
# Performance configuration
|
||||||
|
DATA_BUFFER_SIZE: 10000
|
||||||
|
BATCH_WRITE_SIZE: 1000
|
||||||
|
MAX_MEMORY_USAGE: 2048
|
||||||
|
GC_THRESHOLD: 0.8
|
||||||
|
PROCESSING_TIMEOUT: 10
|
||||||
|
MAX_QUEUE_SIZE: 50000
|
||||||
|
|
||||||
|
# Monitoring configuration
|
||||||
|
METRICS_COLLECTION_INTERVAL: 1.0
|
||||||
|
MAX_CPU_USAGE: 80.0
|
||||||
|
MAX_MEMORY_USAGE_PERCENT: 85.0
|
||||||
|
MIN_MEMORY_AVAILABLE_GB: 1.0
|
||||||
|
MAX_LATENCY_MS: 100.0
|
||||||
|
MAX_ERROR_RATE_PERCENT: 5.0
|
||||||
|
MIN_THROUGHPUT_OPS: 100.0
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
- "8081:8081"
|
||||||
|
volumes:
|
||||||
|
- coby_logs:/app/logs
|
||||||
|
- coby_data:/app/data
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
depends_on:
|
||||||
|
timescaledb:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8080/health', timeout=5)"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 120s
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2G
|
||||||
|
cpus: '2.0'
|
||||||
|
reservations:
|
||||||
|
memory: 1G
|
||||||
|
cpus: '1.0'
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "5"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Prometheus (Optional - for monitoring)
|
||||||
|
prometheus:
|
||||||
|
image: prom/prometheus:latest
|
||||||
|
container_name: coby-prometheus
|
||||||
|
ports:
|
||||||
|
- "9090:9090"
|
||||||
|
volumes:
|
||||||
|
- prometheus_data:/prometheus
|
||||||
|
configs:
|
||||||
|
- source: prometheus_config
|
||||||
|
target: /etc/prometheus/prometheus.yml
|
||||||
|
- source: alert_rules
|
||||||
|
target: /etc/prometheus/alert_rules.yml
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
command:
|
||||||
|
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||||
|
- '--storage.tsdb.path=/prometheus'
|
||||||
|
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||||
|
- '--web.console.templates=/etc/prometheus/consoles'
|
||||||
|
- '--storage.tsdb.retention.time=200h'
|
||||||
|
- '--web.enable-lifecycle'
|
||||||
|
- '--web.enable-admin-api'
|
||||||
|
restart: unless-stopped
|
||||||
|
profiles:
|
||||||
|
- monitoring
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 512M
|
||||||
|
cpus: '0.5'
|
||||||
|
reservations:
|
||||||
|
memory: 256M
|
||||||
|
cpus: '0.2'
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# Grafana (Optional - for visualization)
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana:latest
|
||||||
|
container_name: coby-grafana
|
||||||
|
ports:
|
||||||
|
- "3001:3000"
|
||||||
|
environment:
|
||||||
|
GF_SECURITY_ADMIN_PASSWORD: admin123
|
||||||
|
GF_USERS_ALLOW_SIGN_UP: false
|
||||||
|
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-simple-json-datasource
|
||||||
|
GF_SECURITY_ALLOW_EMBEDDING: true
|
||||||
|
volumes:
|
||||||
|
- grafana_data:/var/lib/grafana
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
depends_on:
|
||||||
|
- prometheus
|
||||||
|
restart: unless-stopped
|
||||||
|
profiles:
|
||||||
|
- monitoring
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 256M
|
||||||
|
cpus: '0.3'
|
||||||
|
reservations:
|
||||||
|
memory: 128M
|
||||||
|
cpus: '0.1'
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# Node Exporter for system metrics
|
||||||
|
node-exporter:
|
||||||
|
image: prom/node-exporter:latest
|
||||||
|
container_name: coby-node-exporter
|
||||||
|
ports:
|
||||||
|
- "9100:9100"
|
||||||
|
volumes:
|
||||||
|
- /proc:/host/proc:ro
|
||||||
|
- /sys:/host/sys:ro
|
||||||
|
- /:/rootfs:ro
|
||||||
|
command:
|
||||||
|
- '--path.procfs=/host/proc'
|
||||||
|
- '--path.rootfs=/rootfs'
|
||||||
|
- '--path.sysfs=/host/sys'
|
||||||
|
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
restart: unless-stopped
|
||||||
|
profiles:
|
||||||
|
- monitoring
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 128M
|
||||||
|
cpus: '0.1'
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "5m"
|
||||||
|
max-file: "2"
|
||||||
|
|
||||||
|
# Network configuration
|
||||||
|
networks:
|
||||||
|
coby-network:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 172.20.0.0/16
|
||||||
|
labels:
|
||||||
|
- "com.coby.network=main"
|
||||||
|
|
||||||
|
# Volume configuration
|
||||||
|
volumes:
|
||||||
|
timescale_data:
|
||||||
|
driver: local
|
||||||
|
labels:
|
||||||
|
- "com.coby.volume=database"
|
||||||
|
redis_data:
|
||||||
|
driver: local
|
||||||
|
labels:
|
||||||
|
- "com.coby.volume=cache"
|
||||||
|
prometheus_data:
|
||||||
|
driver: local
|
||||||
|
labels:
|
||||||
|
- "com.coby.volume=monitoring"
|
||||||
|
grafana_data:
|
||||||
|
driver: local
|
||||||
|
labels:
|
||||||
|
- "com.coby.volume=monitoring"
|
||||||
|
coby_logs:
|
||||||
|
driver: local
|
||||||
|
labels:
|
||||||
|
- "com.coby.volume=logs"
|
||||||
|
coby_data:
|
||||||
|
driver: local
|
||||||
|
labels:
|
||||||
|
- "com.coby.volume=data"
|
||||||
|
|
||||||
|
# Configuration files
|
||||||
|
configs:
|
||||||
|
prometheus_config:
|
||||||
|
content: |
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
evaluation_interval: 15s
|
||||||
|
rule_files:
|
||||||
|
- "/etc/prometheus/alert_rules.yml"
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 'coby-app'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['coby-app:8080']
|
||||||
|
metrics_path: '/metrics'
|
||||||
|
scrape_interval: 10s
|
||||||
|
scrape_timeout: 5s
|
||||||
|
- job_name: 'prometheus'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:9090']
|
||||||
|
- job_name: 'node'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['node-exporter:9100']
|
||||||
|
scrape_interval: 30s
|
||||||
|
|
||||||
|
alert_rules:
|
||||||
|
content: |
|
||||||
|
groups:
|
||||||
|
- name: coby_alerts
|
||||||
|
rules:
|
||||||
|
- alert: HighCPUUsage
|
||||||
|
expr: system_cpu_usage > 80
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "High CPU usage detected"
|
||||||
|
description: "CPU usage is above 80% for more than 2 minutes"
|
||||||
|
- alert: HighMemoryUsage
|
||||||
|
expr: system_memory_usage > 85
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "High memory usage detected"
|
||||||
|
description: "Memory usage is above 85% for more than 2 minutes"
|
||||||
|
- alert: ServiceDown
|
||||||
|
expr: up == 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Service is down"
|
||||||
|
description: "{{ $$labels.job }} service is down"
|
||||||
|
|
||||||
|
# Labels for the entire stack
|
||||||
|
x-labels: &default-labels
|
||||||
|
com.coby.project: "multi-exchange-data-aggregation"
|
||||||
|
com.coby.version: "1.0.0"
|
||||||
|
com.coby.environment: "production"
|
||||||
215
COBY/docker-compose.yml
Normal file
215
COBY/docker-compose.yml
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
# Docker Compose configuration for COBY Multi-Exchange Data Aggregation System
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# TimescaleDB Database
|
||||||
|
timescaledb:
|
||||||
|
image: timescale/timescaledb:latest-pg15
|
||||||
|
container_name: coby-timescaledb
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: ${DB_NAME:-market_data}
|
||||||
|
POSTGRES_USER: ${DB_USER:-market_user}
|
||||||
|
POSTGRES_PASSWORD: ${DB_PASSWORD:-market_data_secure_pass_2024}
|
||||||
|
TIMESCALEDB_TELEMETRY: 'off'
|
||||||
|
ports:
|
||||||
|
- "${DB_PORT:-5432}:5432"
|
||||||
|
volumes:
|
||||||
|
- timescale_data:/var/lib/postgresql/data
|
||||||
|
- ./docker/init-scripts:/docker-entrypoint-initdb.d
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-market_user} -d ${DB_NAME:-market_data}"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# Redis Cache
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: coby-redis
|
||||||
|
command: redis-server /usr/local/etc/redis/redis.conf
|
||||||
|
ports:
|
||||||
|
- "${REDIS_PORT:-6379}:6379"
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
- ./docker/redis.conf:/usr/local/etc/redis/redis.conf:ro
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 3
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# COBY Main Application
|
||||||
|
coby-app:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
target: production
|
||||||
|
container_name: coby-app
|
||||||
|
environment:
|
||||||
|
# Database configuration
|
||||||
|
DB_HOST: timescaledb
|
||||||
|
DB_PORT: 5432
|
||||||
|
DB_NAME: ${DB_NAME:-market_data}
|
||||||
|
DB_USER: ${DB_USER:-market_user}
|
||||||
|
DB_PASSWORD: ${DB_PASSWORD:-market_data_secure_pass_2024}
|
||||||
|
|
||||||
|
# Redis configuration
|
||||||
|
REDIS_HOST: redis
|
||||||
|
REDIS_PORT: 6379
|
||||||
|
REDIS_PASSWORD: ${REDIS_PASSWORD:-market_data_redis_2024}
|
||||||
|
|
||||||
|
# Application configuration
|
||||||
|
ENVIRONMENT: ${ENVIRONMENT:-production}
|
||||||
|
DEBUG: ${DEBUG:-false}
|
||||||
|
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
||||||
|
|
||||||
|
# API configuration
|
||||||
|
API_HOST: 0.0.0.0
|
||||||
|
API_PORT: 8080
|
||||||
|
WS_PORT: 8081
|
||||||
|
|
||||||
|
# Performance configuration
|
||||||
|
MAX_CONNECTIONS_PER_EXCHANGE: ${MAX_CONNECTIONS_PER_EXCHANGE:-5}
|
||||||
|
DATA_BUFFER_SIZE: ${DATA_BUFFER_SIZE:-10000}
|
||||||
|
BATCH_WRITE_SIZE: ${BATCH_WRITE_SIZE:-1000}
|
||||||
|
|
||||||
|
# Monitoring configuration
|
||||||
|
BUCKET_SIZE: ${BUCKET_SIZE:-1.0}
|
||||||
|
HEATMAP_DEPTH: ${HEATMAP_DEPTH:-50}
|
||||||
|
UPDATE_FREQUENCY: ${UPDATE_FREQUENCY:-0.5}
|
||||||
|
ports:
|
||||||
|
- "${API_PORT:-8080}:8080"
|
||||||
|
- "${WS_PORT:-8081}:8081"
|
||||||
|
volumes:
|
||||||
|
- ./logs:/app/logs
|
||||||
|
- ./data:/app/data
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
depends_on:
|
||||||
|
timescaledb:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8080/health', timeout=5)"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 60s
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "5"
|
||||||
|
|
||||||
|
# Web Dashboard (Nginx serving static files)
|
||||||
|
coby-dashboard:
|
||||||
|
image: nginx:alpine
|
||||||
|
container_name: coby-dashboard
|
||||||
|
ports:
|
||||||
|
- "${DASHBOARD_PORT:-3000}:80"
|
||||||
|
volumes:
|
||||||
|
- ./web/static:/usr/share/nginx/html:ro
|
||||||
|
- ./docker/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
depends_on:
|
||||||
|
- coby-app
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# Prometheus (Optional - for monitoring)
|
||||||
|
prometheus:
|
||||||
|
image: prom/prometheus:latest
|
||||||
|
container_name: coby-prometheus
|
||||||
|
ports:
|
||||||
|
- "${PROMETHEUS_PORT:-9090}:9090"
|
||||||
|
volumes:
|
||||||
|
- ./docker/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||||
|
- prometheus_data:/prometheus
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
command:
|
||||||
|
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||||
|
- '--storage.tsdb.path=/prometheus'
|
||||||
|
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||||
|
- '--web.console.templates=/etc/prometheus/consoles'
|
||||||
|
- '--storage.tsdb.retention.time=200h'
|
||||||
|
- '--web.enable-lifecycle'
|
||||||
|
restart: unless-stopped
|
||||||
|
profiles:
|
||||||
|
- monitoring
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
# Grafana (Optional - for visualization)
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana:latest
|
||||||
|
container_name: coby-grafana
|
||||||
|
ports:
|
||||||
|
- "${GRAFANA_PORT:-3001}:3000"
|
||||||
|
environment:
|
||||||
|
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
|
||||||
|
GF_USERS_ALLOW_SIGN_UP: false
|
||||||
|
volumes:
|
||||||
|
- grafana_data:/var/lib/grafana
|
||||||
|
- ./docker/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
|
||||||
|
- ./docker/grafana/datasources:/etc/grafana/provisioning/datasources:ro
|
||||||
|
networks:
|
||||||
|
- coby-network
|
||||||
|
depends_on:
|
||||||
|
- prometheus
|
||||||
|
restart: unless-stopped
|
||||||
|
profiles:
|
||||||
|
- monitoring
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
coby-network:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 172.20.0.0/16
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
timescale_data:
|
||||||
|
driver: local
|
||||||
|
redis_data:
|
||||||
|
driver: local
|
||||||
|
prometheus_data:
|
||||||
|
driver: local
|
||||||
|
grafana_data:
|
||||||
|
driver: local
|
||||||
170
COBY/docker/.env.example
Normal file
170
COBY/docker/.env.example
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
# COBY Multi-Exchange Data Aggregation System Environment Configuration
|
||||||
|
# Copy this file to .env and customize the values
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DATABASE CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
DB_HOST=timescaledb
|
||||||
|
DB_PORT=5432
|
||||||
|
DB_NAME=market_data
|
||||||
|
DB_USER=market_user
|
||||||
|
DB_PASSWORD=market_data_secure_pass_2024
|
||||||
|
DB_SCHEMA=market_data
|
||||||
|
DB_POOL_SIZE=10
|
||||||
|
DB_MAX_OVERFLOW=20
|
||||||
|
DB_POOL_TIMEOUT=30
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# REDIS CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
REDIS_HOST=redis
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_PASSWORD=market_data_redis_2024
|
||||||
|
REDIS_DB=0
|
||||||
|
REDIS_MAX_CONNECTIONS=50
|
||||||
|
REDIS_SOCKET_TIMEOUT=5
|
||||||
|
REDIS_CONNECT_TIMEOUT=5
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# APPLICATION CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
ENVIRONMENT=production
|
||||||
|
DEBUG=false
|
||||||
|
LOG_LEVEL=INFO
|
||||||
|
LOG_FILE=logs/coby.log
|
||||||
|
LOG_MAX_SIZE=100
|
||||||
|
LOG_BACKUP_COUNT=5
|
||||||
|
ENABLE_CORRELATION_ID=true
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# API CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
API_HOST=0.0.0.0
|
||||||
|
API_PORT=8080
|
||||||
|
WS_PORT=8081
|
||||||
|
DASHBOARD_PORT=3000
|
||||||
|
CORS_ORIGINS=*
|
||||||
|
RATE_LIMIT=100
|
||||||
|
MAX_WS_CONNECTIONS=1000
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# EXCHANGE CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
MAX_CONNECTIONS_PER_EXCHANGE=5
|
||||||
|
RECONNECT_DELAY=5
|
||||||
|
MAX_RECONNECT_ATTEMPTS=10
|
||||||
|
HEARTBEAT_INTERVAL=30
|
||||||
|
|
||||||
|
# Supported exchanges (comma-separated)
|
||||||
|
EXCHANGES=binance,coinbase,kraken,bybit,okx,huobi,kucoin,gateio,bitfinex,mexc
|
||||||
|
|
||||||
|
# Trading symbols (comma-separated)
|
||||||
|
SYMBOLS=BTCUSDT,ETHUSDT
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# AGGREGATION CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
BUCKET_SIZE=1.0
|
||||||
|
HEATMAP_DEPTH=50
|
||||||
|
UPDATE_FREQUENCY=0.5
|
||||||
|
VOLUME_THRESHOLD=0.01
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PERFORMANCE CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
DATA_BUFFER_SIZE=10000
|
||||||
|
BATCH_WRITE_SIZE=1000
|
||||||
|
MAX_MEMORY_USAGE=2048
|
||||||
|
GC_THRESHOLD=0.8
|
||||||
|
PROCESSING_TIMEOUT=10
|
||||||
|
MAX_QUEUE_SIZE=50000
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# MONITORING CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
PROMETHEUS_PORT=9090
|
||||||
|
GRAFANA_PORT=3001
|
||||||
|
GRAFANA_PASSWORD=admin
|
||||||
|
|
||||||
|
# Metrics collection interval (seconds)
|
||||||
|
METRICS_COLLECTION_INTERVAL=1.0
|
||||||
|
|
||||||
|
# Performance thresholds
|
||||||
|
MAX_CPU_USAGE=80.0
|
||||||
|
MAX_MEMORY_USAGE_PERCENT=85.0
|
||||||
|
MIN_MEMORY_AVAILABLE_GB=1.0
|
||||||
|
MAX_LATENCY_MS=100.0
|
||||||
|
MAX_ERROR_RATE_PERCENT=5.0
|
||||||
|
MIN_THROUGHPUT_OPS=100.0
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DOCKER CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
# Container resource limits
|
||||||
|
COBY_APP_MEMORY=2g
|
||||||
|
COBY_APP_CPUS=2.0
|
||||||
|
TIMESCALEDB_MEMORY=1g
|
||||||
|
TIMESCALEDB_CPUS=1.0
|
||||||
|
REDIS_MEMORY=512m
|
||||||
|
REDIS_CPUS=0.5
|
||||||
|
|
||||||
|
# Network configuration
|
||||||
|
DOCKER_NETWORK_SUBNET=172.20.0.0/16
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# DEVELOPMENT CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
# Override for development environment
|
||||||
|
DEV_DB_NAME=market_data_dev
|
||||||
|
DEV_DB_PASSWORD=dev_password
|
||||||
|
DEV_REDIS_PASSWORD=dev_redis
|
||||||
|
DEV_API_PORT=8080
|
||||||
|
DEV_WS_PORT=8081
|
||||||
|
DEV_DASHBOARD_PORT=3000
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# SECURITY CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
# API Keys for exchanges (optional, for private data)
|
||||||
|
BINANCE_API_KEY=
|
||||||
|
BINANCE_API_SECRET=
|
||||||
|
COINBASE_API_KEY=
|
||||||
|
COINBASE_API_SECRET=
|
||||||
|
COINBASE_PASSPHRASE=
|
||||||
|
KRAKEN_API_KEY=
|
||||||
|
KRAKEN_API_SECRET=
|
||||||
|
BYBIT_API_KEY=
|
||||||
|
BYBIT_API_SECRET=
|
||||||
|
OKX_API_KEY=
|
||||||
|
OKX_API_SECRET=
|
||||||
|
OKX_PASSPHRASE=
|
||||||
|
HUOBI_API_KEY=
|
||||||
|
HUOBI_API_SECRET=
|
||||||
|
KUCOIN_API_KEY=
|
||||||
|
KUCOIN_API_SECRET=
|
||||||
|
KUCOIN_PASSPHRASE=
|
||||||
|
GATEIO_API_KEY=
|
||||||
|
GATEIO_API_SECRET=
|
||||||
|
BITFINEX_API_KEY=
|
||||||
|
BITFINEX_API_SECRET=
|
||||||
|
MEXC_API_KEY=
|
||||||
|
MEXC_API_SECRET=
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# NOTIFICATION CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
# Email notifications
|
||||||
|
SMTP_SERVER=
|
||||||
|
SMTP_PORT=587
|
||||||
|
SMTP_USERNAME=
|
||||||
|
SMTP_PASSWORD=
|
||||||
|
SMTP_FROM_EMAIL=
|
||||||
|
SMTP_TO_EMAILS=
|
||||||
|
|
||||||
|
# Slack notifications
|
||||||
|
SLACK_WEBHOOK_URL=
|
||||||
|
SLACK_CHANNEL=
|
||||||
|
|
||||||
|
# Webhook notifications
|
||||||
|
WEBHOOK_URL=
|
||||||
|
WEBHOOK_HEADERS=
|
||||||
273
COBY/docker/README.md
Normal file
273
COBY/docker/README.md
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
# Market Data Infrastructure Docker Setup
|
||||||
|
|
||||||
|
This directory contains Docker Compose configurations and scripts for deploying TimescaleDB and Redis infrastructure for the multi-exchange data aggregation system.
|
||||||
|
|
||||||
|
## 🏗️ Architecture
|
||||||
|
|
||||||
|
- **TimescaleDB**: Time-series database optimized for high-frequency market data
|
||||||
|
- **Redis**: High-performance caching layer for real-time data
|
||||||
|
- **Network**: Isolated Docker network for secure communication
|
||||||
|
|
||||||
|
## 📋 Prerequisites
|
||||||
|
|
||||||
|
- Docker Engine 20.10+
|
||||||
|
- Docker Compose 2.0+
|
||||||
|
- At least 4GB RAM available for containers
|
||||||
|
- 50GB+ disk space for data storage
|
||||||
|
|
||||||
|
## 🚀 Quick Start
|
||||||
|
|
||||||
|
1. **Copy environment file**:
|
||||||
|
```bash
|
||||||
|
cp .env.example .env
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Edit configuration** (update passwords and settings):
|
||||||
|
```bash
|
||||||
|
nano .env
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Deploy infrastructure**:
|
||||||
|
```bash
|
||||||
|
chmod +x deploy.sh
|
||||||
|
./deploy.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Verify deployment**:
|
||||||
|
```bash
|
||||||
|
docker-compose -f timescaledb-compose.yml ps
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📁 File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
docker/
|
||||||
|
├── timescaledb-compose.yml # Main Docker Compose configuration
|
||||||
|
├── init-scripts/ # Database initialization scripts
|
||||||
|
│ └── 01-init-timescaledb.sql
|
||||||
|
├── redis.conf # Redis configuration
|
||||||
|
├── .env # Environment variables
|
||||||
|
├── deploy.sh # Deployment script
|
||||||
|
├── backup.sh # Backup script
|
||||||
|
├── restore.sh # Restore script
|
||||||
|
└── README.md # This file
|
||||||
|
```
|
||||||
|
|
||||||
|
## ⚙️ Configuration
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
Key variables in `.env`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Database credentials
|
||||||
|
POSTGRES_PASSWORD=your_secure_password
|
||||||
|
POSTGRES_USER=market_user
|
||||||
|
POSTGRES_DB=market_data
|
||||||
|
|
||||||
|
# Redis settings
|
||||||
|
REDIS_PASSWORD=your_redis_password
|
||||||
|
|
||||||
|
# Performance tuning
|
||||||
|
POSTGRES_SHARED_BUFFERS=256MB
|
||||||
|
POSTGRES_EFFECTIVE_CACHE_SIZE=1GB
|
||||||
|
REDIS_MAXMEMORY=2gb
|
||||||
|
```
|
||||||
|
|
||||||
|
### TimescaleDB Configuration
|
||||||
|
|
||||||
|
The database is pre-configured with:
|
||||||
|
- Optimized PostgreSQL settings for time-series data
|
||||||
|
- TimescaleDB extension enabled
|
||||||
|
- Hypertables for automatic partitioning
|
||||||
|
- Retention policies (90 days for raw data)
|
||||||
|
- Continuous aggregates for common queries
|
||||||
|
- Proper indexes for query performance
|
||||||
|
|
||||||
|
### Redis Configuration
|
||||||
|
|
||||||
|
Redis is configured for:
|
||||||
|
- High-frequency data caching
|
||||||
|
- Memory optimization (2GB limit)
|
||||||
|
- Persistence with AOF and RDB
|
||||||
|
- Optimized for order book data structures
|
||||||
|
|
||||||
|
## 🔌 Connection Details
|
||||||
|
|
||||||
|
After deployment, connect using:
|
||||||
|
|
||||||
|
### TimescaleDB
|
||||||
|
```
|
||||||
|
Host: 192.168.0.10
|
||||||
|
Port: 5432
|
||||||
|
Database: market_data
|
||||||
|
Username: market_user
|
||||||
|
Password: (from .env file)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Redis
|
||||||
|
```
|
||||||
|
Host: 192.168.0.10
|
||||||
|
Port: 6379
|
||||||
|
Password: (from .env file)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🗄️ Database Schema
|
||||||
|
|
||||||
|
The system creates the following tables:
|
||||||
|
|
||||||
|
- `order_book_snapshots`: Real-time order book data
|
||||||
|
- `trade_events`: Individual trade events
|
||||||
|
- `heatmap_data`: Aggregated price bucket data
|
||||||
|
- `ohlcv_data`: OHLCV candlestick data
|
||||||
|
- `exchange_status`: Exchange connection monitoring
|
||||||
|
- `system_metrics`: System performance metrics
|
||||||
|
|
||||||
|
## 💾 Backup & Restore
|
||||||
|
|
||||||
|
### Create Backup
|
||||||
|
```bash
|
||||||
|
chmod +x backup.sh
|
||||||
|
./backup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Backups are stored in `./backups/` with timestamp.
|
||||||
|
|
||||||
|
### Restore from Backup
|
||||||
|
```bash
|
||||||
|
chmod +x restore.sh
|
||||||
|
./restore.sh market_data_backup_YYYYMMDD_HHMMSS.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
### Automated Backups
|
||||||
|
|
||||||
|
Set up a cron job for regular backups:
|
||||||
|
```bash
|
||||||
|
# Daily backup at 2 AM
|
||||||
|
0 2 * * * /path/to/docker/backup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📊 Monitoring
|
||||||
|
|
||||||
|
### Health Checks
|
||||||
|
|
||||||
|
Check service health:
|
||||||
|
```bash
|
||||||
|
# TimescaleDB
|
||||||
|
docker exec market_data_timescaledb pg_isready -U market_user -d market_data
|
||||||
|
|
||||||
|
# Redis
|
||||||
|
docker exec market_data_redis redis-cli -a your_password ping
|
||||||
|
```
|
||||||
|
|
||||||
|
### View Logs
|
||||||
|
```bash
|
||||||
|
# All services
|
||||||
|
docker-compose -f timescaledb-compose.yml logs -f
|
||||||
|
|
||||||
|
# Specific service
|
||||||
|
docker-compose -f timescaledb-compose.yml logs -f timescaledb
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database Queries
|
||||||
|
|
||||||
|
Connect to TimescaleDB:
|
||||||
|
```bash
|
||||||
|
docker exec -it market_data_timescaledb psql -U market_user -d market_data
|
||||||
|
```
|
||||||
|
|
||||||
|
Example queries:
|
||||||
|
```sql
|
||||||
|
-- Check table sizes
|
||||||
|
SELECT
|
||||||
|
schemaname,
|
||||||
|
tablename,
|
||||||
|
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size
|
||||||
|
FROM pg_tables
|
||||||
|
WHERE schemaname = 'market_data';
|
||||||
|
|
||||||
|
-- Recent order book data
|
||||||
|
SELECT * FROM market_data.order_book_snapshots
|
||||||
|
ORDER BY timestamp DESC LIMIT 10;
|
||||||
|
|
||||||
|
-- Exchange status
|
||||||
|
SELECT * FROM market_data.exchange_status
|
||||||
|
ORDER BY timestamp DESC LIMIT 10;
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 Maintenance
|
||||||
|
|
||||||
|
### Update Images
|
||||||
|
```bash
|
||||||
|
docker-compose -f timescaledb-compose.yml pull
|
||||||
|
docker-compose -f timescaledb-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
### Clean Up Old Data
|
||||||
|
```bash
|
||||||
|
# TimescaleDB has automatic retention policies
|
||||||
|
# Manual cleanup if needed:
|
||||||
|
docker exec market_data_timescaledb psql -U market_user -d market_data -c "
|
||||||
|
SELECT drop_chunks('market_data.order_book_snapshots', INTERVAL '30 days');
|
||||||
|
"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Scale Resources
|
||||||
|
|
||||||
|
Edit `timescaledb-compose.yml` to adjust:
|
||||||
|
- Memory limits
|
||||||
|
- CPU limits
|
||||||
|
- Shared buffers
|
||||||
|
- Connection limits
|
||||||
|
|
||||||
|
## 🚨 Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Port conflicts**: Change ports in compose file if 5432/6379 are in use
|
||||||
|
2. **Memory issues**: Reduce shared_buffers and Redis maxmemory
|
||||||
|
3. **Disk space**: Monitor `/var/lib/docker/volumes/` usage
|
||||||
|
4. **Connection refused**: Check firewall settings and container status
|
||||||
|
|
||||||
|
### Performance Tuning
|
||||||
|
|
||||||
|
1. **TimescaleDB**:
|
||||||
|
- Adjust `shared_buffers` based on available RAM
|
||||||
|
- Tune `effective_cache_size` to 75% of system RAM
|
||||||
|
- Monitor query performance with `pg_stat_statements`
|
||||||
|
|
||||||
|
2. **Redis**:
|
||||||
|
- Adjust `maxmemory` based on data volume
|
||||||
|
- Monitor memory usage with `INFO memory`
|
||||||
|
- Use appropriate eviction policy
|
||||||
|
|
||||||
|
### Recovery Procedures
|
||||||
|
|
||||||
|
1. **Container failure**: `docker-compose restart <service>`
|
||||||
|
2. **Data corruption**: Restore from latest backup
|
||||||
|
3. **Network issues**: Check Docker network configuration
|
||||||
|
4. **Performance degradation**: Review logs and system metrics
|
||||||
|
|
||||||
|
## 🔐 Security
|
||||||
|
|
||||||
|
- Change default passwords in `.env`
|
||||||
|
- Use strong passwords (20+ characters)
|
||||||
|
- Restrict network access to trusted IPs
|
||||||
|
- Regular security updates
|
||||||
|
- Monitor access logs
|
||||||
|
- Enable SSL/TLS for production
|
||||||
|
|
||||||
|
## 📞 Support
|
||||||
|
|
||||||
|
For issues related to:
|
||||||
|
- TimescaleDB: Check [TimescaleDB docs](https://docs.timescale.com/)
|
||||||
|
- Redis: Check [Redis docs](https://redis.io/documentation)
|
||||||
|
- Docker: Check [Docker docs](https://docs.docker.com/)
|
||||||
|
|
||||||
|
## 🔄 Updates
|
||||||
|
|
||||||
|
This infrastructure supports:
|
||||||
|
- Rolling updates with zero downtime
|
||||||
|
- Blue-green deployments
|
||||||
|
- Automated failover
|
||||||
|
- Data migration scripts
|
||||||
103
COBY/docker/alert_rules.yml
Normal file
103
COBY/docker/alert_rules.yml
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# Prometheus alert rules for COBY system
|
||||||
|
groups:
|
||||||
|
- name: coby_alerts
|
||||||
|
rules:
|
||||||
|
# High CPU usage
|
||||||
|
- alert: HighCPUUsage
|
||||||
|
expr: system_cpu_usage > 80
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "High CPU usage detected"
|
||||||
|
description: "CPU usage is above 80% for more than 2 minutes"
|
||||||
|
|
||||||
|
# High memory usage
|
||||||
|
- alert: HighMemoryUsage
|
||||||
|
expr: system_memory_usage > 85
|
||||||
|
for: 2m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "High memory usage detected"
|
||||||
|
description: "Memory usage is above 85% for more than 2 minutes"
|
||||||
|
|
||||||
|
# Low available memory
|
||||||
|
- alert: LowAvailableMemory
|
||||||
|
expr: system_memory_available_gb < 1
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Low available memory"
|
||||||
|
description: "Available memory is below 1GB"
|
||||||
|
|
||||||
|
# High latency
|
||||||
|
- alert: HighLatency
|
||||||
|
expr: processing_latency_ms > 100
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "High processing latency"
|
||||||
|
description: "Processing latency is above 100ms for more than 5 minutes"
|
||||||
|
|
||||||
|
# Exchange connection failures
|
||||||
|
- alert: ExchangeConnectionFailure
|
||||||
|
expr: increase(exchange_connection_errors_total[5m]) > 5
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Exchange connection failures"
|
||||||
|
description: "More than 5 exchange connection errors in the last 5 minutes"
|
||||||
|
|
||||||
|
# Database connection issues
|
||||||
|
- alert: DatabaseConnectionFailure
|
||||||
|
expr: database_connection_errors_total > 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Database connection failure"
|
||||||
|
description: "Database connection errors detected"
|
||||||
|
|
||||||
|
# High error rate
|
||||||
|
- alert: HighErrorRate
|
||||||
|
expr: kpi_error_rate_percent > 5
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "High error rate"
|
||||||
|
description: "Error rate is above 5% for more than 5 minutes"
|
||||||
|
|
||||||
|
# Low throughput
|
||||||
|
- alert: LowThroughput
|
||||||
|
expr: kpi_throughput_ops_per_sec < 10
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
summary: "Low system throughput"
|
||||||
|
description: "System throughput is below 10 ops/sec for more than 10 minutes"
|
||||||
|
|
||||||
|
# Service down
|
||||||
|
- alert: ServiceDown
|
||||||
|
expr: up == 0
|
||||||
|
for: 1m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Service is down"
|
||||||
|
description: "{{ $labels.job }} service is down"
|
||||||
|
|
||||||
|
# Disk space low
|
||||||
|
- alert: DiskSpaceLow
|
||||||
|
expr: system_disk_usage > 90
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: critical
|
||||||
|
annotations:
|
||||||
|
summary: "Disk space low"
|
||||||
|
description: "Disk usage is above 90%"
|
||||||
108
COBY/docker/backup.sh
Normal file
108
COBY/docker/backup.sh
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Backup script for market data infrastructure
|
||||||
|
# Run this script regularly to backup your data
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
BACKUP_DIR="./backups"
|
||||||
|
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||||
|
RETENTION_DAYS=30
|
||||||
|
|
||||||
|
# Load environment variables
|
||||||
|
if [ -f .env ]; then
|
||||||
|
source .env
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🗄️ Starting backup process..."
|
||||||
|
|
||||||
|
# Create backup directory if it doesn't exist
|
||||||
|
mkdir -p "$BACKUP_DIR"
|
||||||
|
|
||||||
|
# Backup TimescaleDB
|
||||||
|
echo "📊 Backing up TimescaleDB..."
|
||||||
|
docker exec market_data_timescaledb pg_dump \
|
||||||
|
-U market_user \
|
||||||
|
-d market_data \
|
||||||
|
--verbose \
|
||||||
|
--no-password \
|
||||||
|
--format=custom \
|
||||||
|
--compress=9 \
|
||||||
|
> "$BACKUP_DIR/timescaledb_backup_$TIMESTAMP.dump"
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "✅ TimescaleDB backup completed: timescaledb_backup_$TIMESTAMP.dump"
|
||||||
|
else
|
||||||
|
echo "❌ TimescaleDB backup failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Backup Redis
|
||||||
|
echo "📦 Backing up Redis..."
|
||||||
|
docker exec market_data_redis redis-cli \
|
||||||
|
-a "$REDIS_PASSWORD" \
|
||||||
|
--rdb /data/redis_backup_$TIMESTAMP.rdb \
|
||||||
|
BGSAVE
|
||||||
|
|
||||||
|
# Wait for Redis backup to complete
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Copy Redis backup from container
|
||||||
|
docker cp market_data_redis:/data/redis_backup_$TIMESTAMP.rdb "$BACKUP_DIR/"
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "✅ Redis backup completed: redis_backup_$TIMESTAMP.rdb"
|
||||||
|
else
|
||||||
|
echo "❌ Redis backup failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create backup metadata
|
||||||
|
cat > "$BACKUP_DIR/backup_$TIMESTAMP.info" << EOF
|
||||||
|
Backup Information
|
||||||
|
==================
|
||||||
|
Timestamp: $TIMESTAMP
|
||||||
|
Date: $(date)
|
||||||
|
TimescaleDB Backup: timescaledb_backup_$TIMESTAMP.dump
|
||||||
|
Redis Backup: redis_backup_$TIMESTAMP.rdb
|
||||||
|
|
||||||
|
Container Versions:
|
||||||
|
TimescaleDB: $(docker exec market_data_timescaledb psql -U market_user -d market_data -t -c "SELECT version();")
|
||||||
|
Redis: $(docker exec market_data_redis redis-cli -a "$REDIS_PASSWORD" INFO server | grep redis_version)
|
||||||
|
|
||||||
|
Database Size:
|
||||||
|
$(docker exec market_data_timescaledb psql -U market_user -d market_data -c "\l+")
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Compress backups
|
||||||
|
echo "🗜️ Compressing backups..."
|
||||||
|
tar -czf "$BACKUP_DIR/market_data_backup_$TIMESTAMP.tar.gz" \
|
||||||
|
-C "$BACKUP_DIR" \
|
||||||
|
"timescaledb_backup_$TIMESTAMP.dump" \
|
||||||
|
"redis_backup_$TIMESTAMP.rdb" \
|
||||||
|
"backup_$TIMESTAMP.info"
|
||||||
|
|
||||||
|
# Remove individual files after compression
|
||||||
|
rm "$BACKUP_DIR/timescaledb_backup_$TIMESTAMP.dump"
|
||||||
|
rm "$BACKUP_DIR/redis_backup_$TIMESTAMP.rdb"
|
||||||
|
rm "$BACKUP_DIR/backup_$TIMESTAMP.info"
|
||||||
|
|
||||||
|
echo "✅ Compressed backup created: market_data_backup_$TIMESTAMP.tar.gz"
|
||||||
|
|
||||||
|
# Clean up old backups
|
||||||
|
echo "🧹 Cleaning up old backups (older than $RETENTION_DAYS days)..."
|
||||||
|
find "$BACKUP_DIR" -name "market_data_backup_*.tar.gz" -mtime +$RETENTION_DAYS -delete
|
||||||
|
|
||||||
|
# Display backup information
|
||||||
|
BACKUP_SIZE=$(du -h "$BACKUP_DIR/market_data_backup_$TIMESTAMP.tar.gz" | cut -f1)
|
||||||
|
echo ""
|
||||||
|
echo "📋 Backup Summary:"
|
||||||
|
echo " File: market_data_backup_$TIMESTAMP.tar.gz"
|
||||||
|
echo " Size: $BACKUP_SIZE"
|
||||||
|
echo " Location: $BACKUP_DIR"
|
||||||
|
echo ""
|
||||||
|
echo "🔄 To restore from this backup:"
|
||||||
|
echo " ./restore.sh market_data_backup_$TIMESTAMP.tar.gz"
|
||||||
|
echo ""
|
||||||
|
echo "✅ Backup process completed successfully!"
|
||||||
416
COBY/docker/deploy.sh
Normal file
416
COBY/docker/deploy.sh
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# COBY Multi-Exchange Data Aggregation System Deployment Script
|
||||||
|
# This script handles deployment of the COBY system using Docker Compose
|
||||||
|
|
||||||
|
set -e # Exit on any error
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||||
|
COMPOSE_FILE="$PROJECT_ROOT/docker-compose.yml"
|
||||||
|
DEV_COMPOSE_FILE="$PROJECT_ROOT/docker-compose.dev.yml"
|
||||||
|
ENV_FILE="$PROJECT_ROOT/docker/.env"
|
||||||
|
ENV_EXAMPLE="$PROJECT_ROOT/docker/.env.example"
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
ENVIRONMENT="production"
|
||||||
|
PROFILE=""
|
||||||
|
SERVICES=""
|
||||||
|
ACTION="up"
|
||||||
|
DETACHED=true
|
||||||
|
BUILD=false
|
||||||
|
PULL=false
|
||||||
|
FORCE_RECREATE=false
|
||||||
|
REMOVE_ORPHANS=true
|
||||||
|
|
||||||
|
# Function to print colored output
|
||||||
|
print_status() {
|
||||||
|
echo -e "${BLUE}[INFO]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}[ERROR]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to show usage
|
||||||
|
show_usage() {
|
||||||
|
cat << EOF
|
||||||
|
COBY Deployment Script
|
||||||
|
|
||||||
|
Usage: $0 [OPTIONS] [ACTION] [SERVICES...]
|
||||||
|
|
||||||
|
ACTIONS:
|
||||||
|
up Start services (default)
|
||||||
|
down Stop and remove services
|
||||||
|
restart Restart services
|
||||||
|
logs Show service logs
|
||||||
|
ps Show running services
|
||||||
|
build Build services
|
||||||
|
pull Pull latest images
|
||||||
|
exec Execute command in service
|
||||||
|
health Check service health
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
-e, --env ENV Environment (production|development) [default: production]
|
||||||
|
-p, --profile PROFILE Docker compose profile (monitoring|tools)
|
||||||
|
-d, --detach Run in detached mode [default: true]
|
||||||
|
-f, --foreground Run in foreground mode
|
||||||
|
-b, --build Build images before starting
|
||||||
|
--pull Pull latest images before starting
|
||||||
|
--force-recreate Force recreate containers
|
||||||
|
--no-remove-orphans Don't remove orphaned containers
|
||||||
|
-h, --help Show this help message
|
||||||
|
|
||||||
|
EXAMPLES:
|
||||||
|
$0 # Start production environment
|
||||||
|
$0 -e development # Start development environment
|
||||||
|
$0 -p monitoring up # Start with monitoring profile
|
||||||
|
$0 down # Stop all services
|
||||||
|
$0 logs coby-app # Show logs for coby-app service
|
||||||
|
$0 exec coby-app bash # Execute bash in coby-app container
|
||||||
|
$0 -b up # Build and start services
|
||||||
|
|
||||||
|
SERVICES:
|
||||||
|
coby-app Main application
|
||||||
|
timescaledb Database
|
||||||
|
redis Cache
|
||||||
|
coby-dashboard Web dashboard
|
||||||
|
prometheus Metrics collection (monitoring profile)
|
||||||
|
grafana Visualization (monitoring profile)
|
||||||
|
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check prerequisites
|
||||||
|
check_prerequisites() {
|
||||||
|
print_status "Checking prerequisites..."
|
||||||
|
|
||||||
|
# Check if Docker is installed and running
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
print_error "Docker is not installed. Please install Docker first."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! docker info &> /dev/null; then
|
||||||
|
print_error "Docker is not running. Please start Docker first."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Docker Compose is available
|
||||||
|
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
|
||||||
|
print_error "Docker Compose is not available. Please install Docker Compose."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine compose command
|
||||||
|
if command -v docker-compose &> /dev/null; then
|
||||||
|
COMPOSE_CMD="docker-compose"
|
||||||
|
else
|
||||||
|
COMPOSE_CMD="docker compose"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Prerequisites check passed"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to setup environment file
|
||||||
|
setup_env_file() {
|
||||||
|
if [[ ! -f "$ENV_FILE" ]]; then
|
||||||
|
print_warning "Environment file not found. Creating from example..."
|
||||||
|
cp "$ENV_EXAMPLE" "$ENV_FILE"
|
||||||
|
print_status "Please edit $ENV_FILE with your configuration"
|
||||||
|
print_warning "Using default configuration for now"
|
||||||
|
else
|
||||||
|
print_success "Environment file found"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to build compose command
|
||||||
|
build_compose_command() {
|
||||||
|
local cmd="$COMPOSE_CMD"
|
||||||
|
|
||||||
|
# Add compose files
|
||||||
|
cmd="$cmd -f $COMPOSE_FILE"
|
||||||
|
|
||||||
|
if [[ "$ENVIRONMENT" == "development" ]]; then
|
||||||
|
cmd="$cmd -f $DEV_COMPOSE_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add environment file
|
||||||
|
cmd="$cmd --env-file $ENV_FILE"
|
||||||
|
|
||||||
|
# Add profile if specified
|
||||||
|
if [[ -n "$PROFILE" ]]; then
|
||||||
|
cmd="$cmd --profile $PROFILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$cmd"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to start services
|
||||||
|
start_services() {
|
||||||
|
print_status "Starting COBY services in $ENVIRONMENT mode..."
|
||||||
|
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
local up_cmd="$cmd up"
|
||||||
|
|
||||||
|
if [[ "$BUILD" == true ]]; then
|
||||||
|
up_cmd="$up_cmd --build"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$PULL" == true ]]; then
|
||||||
|
up_cmd="$up_cmd --pull always"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$FORCE_RECREATE" == true ]]; then
|
||||||
|
up_cmd="$up_cmd --force-recreate"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$REMOVE_ORPHANS" == true ]]; then
|
||||||
|
up_cmd="$up_cmd --remove-orphans"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$DETACHED" == true ]]; then
|
||||||
|
up_cmd="$up_cmd -d"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$SERVICES" ]]; then
|
||||||
|
up_cmd="$up_cmd $SERVICES"
|
||||||
|
fi
|
||||||
|
|
||||||
|
eval "$up_cmd"
|
||||||
|
|
||||||
|
if [[ "$DETACHED" == true ]]; then
|
||||||
|
print_success "Services started successfully"
|
||||||
|
show_service_status
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to stop services
|
||||||
|
stop_services() {
|
||||||
|
print_status "Stopping COBY services..."
|
||||||
|
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
eval "$cmd down --remove-orphans"
|
||||||
|
|
||||||
|
print_success "Services stopped successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to restart services
|
||||||
|
restart_services() {
|
||||||
|
print_status "Restarting COBY services..."
|
||||||
|
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
if [[ -n "$SERVICES" ]]; then
|
||||||
|
eval "$cmd restart $SERVICES"
|
||||||
|
else
|
||||||
|
eval "$cmd restart"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Services restarted successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to show logs
|
||||||
|
show_logs() {
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
if [[ -n "$SERVICES" ]]; then
|
||||||
|
eval "$cmd logs -f $SERVICES"
|
||||||
|
else
|
||||||
|
eval "$cmd logs -f"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to show service status
|
||||||
|
show_service_status() {
|
||||||
|
print_status "Service status:"
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
eval "$cmd ps"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to build services
|
||||||
|
build_services() {
|
||||||
|
print_status "Building COBY services..."
|
||||||
|
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
if [[ -n "$SERVICES" ]]; then
|
||||||
|
eval "$cmd build $SERVICES"
|
||||||
|
else
|
||||||
|
eval "$cmd build"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Services built successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to pull images
|
||||||
|
pull_images() {
|
||||||
|
print_status "Pulling latest images..."
|
||||||
|
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
eval "$cmd pull"
|
||||||
|
|
||||||
|
print_success "Images pulled successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to execute command in service
|
||||||
|
exec_command() {
|
||||||
|
if [[ -z "$SERVICES" ]]; then
|
||||||
|
print_error "Service name required for exec command"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local service=$(echo "$SERVICES" | cut -d' ' -f1)
|
||||||
|
local command=$(echo "$SERVICES" | cut -d' ' -f2-)
|
||||||
|
|
||||||
|
if [[ "$service" == "$command" ]]; then
|
||||||
|
command="bash"
|
||||||
|
fi
|
||||||
|
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
eval "$cmd exec $service $command"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to check service health
|
||||||
|
check_health() {
|
||||||
|
print_status "Checking service health..."
|
||||||
|
|
||||||
|
local cmd=$(build_compose_command)
|
||||||
|
local services=$(eval "$cmd ps --services")
|
||||||
|
|
||||||
|
for service in $services; do
|
||||||
|
local health=$(eval "$cmd ps $service" | grep -o "healthy\|unhealthy\|starting" | head -1)
|
||||||
|
if [[ -n "$health" ]]; then
|
||||||
|
if [[ "$health" == "healthy" ]]; then
|
||||||
|
print_success "$service: $health"
|
||||||
|
elif [[ "$health" == "unhealthy" ]]; then
|
||||||
|
print_error "$service: $health"
|
||||||
|
else
|
||||||
|
print_warning "$service: $health"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_warning "$service: no health check"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-e|--env)
|
||||||
|
ENVIRONMENT="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-p|--profile)
|
||||||
|
PROFILE="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-d|--detach)
|
||||||
|
DETACHED=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-f|--foreground)
|
||||||
|
DETACHED=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-b|--build)
|
||||||
|
BUILD=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--pull)
|
||||||
|
PULL=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--force-recreate)
|
||||||
|
FORCE_RECREATE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--no-remove-orphans)
|
||||||
|
REMOVE_ORPHANS=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
show_usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
up|down|restart|logs|ps|build|pull|exec|health)
|
||||||
|
ACTION="$1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SERVICES="$SERVICES $1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Trim leading/trailing spaces from services
|
||||||
|
SERVICES=$(echo "$SERVICES" | xargs)
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
main() {
|
||||||
|
print_status "COBY Multi-Exchange Data Aggregation System Deployment"
|
||||||
|
print_status "Environment: $ENVIRONMENT"
|
||||||
|
if [[ -n "$PROFILE" ]]; then
|
||||||
|
print_status "Profile: $PROFILE"
|
||||||
|
fi
|
||||||
|
if [[ -n "$SERVICES" ]]; then
|
||||||
|
print_status "Services: $SERVICES"
|
||||||
|
fi
|
||||||
|
print_status "Action: $ACTION"
|
||||||
|
echo
|
||||||
|
|
||||||
|
check_prerequisites
|
||||||
|
setup_env_file
|
||||||
|
|
||||||
|
case $ACTION in
|
||||||
|
up)
|
||||||
|
start_services
|
||||||
|
;;
|
||||||
|
down)
|
||||||
|
stop_services
|
||||||
|
;;
|
||||||
|
restart)
|
||||||
|
restart_services
|
||||||
|
;;
|
||||||
|
logs)
|
||||||
|
show_logs
|
||||||
|
;;
|
||||||
|
ps)
|
||||||
|
show_service_status
|
||||||
|
;;
|
||||||
|
build)
|
||||||
|
build_services
|
||||||
|
;;
|
||||||
|
pull)
|
||||||
|
pull_images
|
||||||
|
;;
|
||||||
|
exec)
|
||||||
|
exec_command
|
||||||
|
;;
|
||||||
|
health)
|
||||||
|
check_health
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
print_error "Unknown action: $ACTION"
|
||||||
|
show_usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main
|
||||||
214
COBY/docker/init-scripts/01-init-timescaledb.sql
Normal file
214
COBY/docker/init-scripts/01-init-timescaledb.sql
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
-- Initialize TimescaleDB extension and create market data schema
|
||||||
|
CREATE EXTENSION IF NOT EXISTS timescaledb;
|
||||||
|
|
||||||
|
-- Create database schema for market data
|
||||||
|
CREATE SCHEMA IF NOT EXISTS market_data;
|
||||||
|
|
||||||
|
-- Set search path
|
||||||
|
SET search_path TO market_data, public;
|
||||||
|
|
||||||
|
-- Order book snapshots table
|
||||||
|
CREATE TABLE IF NOT EXISTS order_book_snapshots (
|
||||||
|
id BIGSERIAL,
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
exchange VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
bids JSONB NOT NULL,
|
||||||
|
asks JSONB NOT NULL,
|
||||||
|
sequence_id BIGINT,
|
||||||
|
mid_price DECIMAL(20,8),
|
||||||
|
spread DECIMAL(20,8),
|
||||||
|
bid_volume DECIMAL(30,8),
|
||||||
|
ask_volume DECIMAL(30,8),
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (timestamp, symbol, exchange)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('order_book_snapshots', 'timestamp', if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create indexes for better query performance
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_order_book_symbol_exchange ON order_book_snapshots (symbol, exchange, timestamp DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_order_book_timestamp ON order_book_snapshots (timestamp DESC);
|
||||||
|
|
||||||
|
-- Trade events table
|
||||||
|
CREATE TABLE IF NOT EXISTS trade_events (
|
||||||
|
id BIGSERIAL,
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
exchange VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
price DECIMAL(20,8) NOT NULL,
|
||||||
|
size DECIMAL(30,8) NOT NULL,
|
||||||
|
side VARCHAR(4) NOT NULL,
|
||||||
|
trade_id VARCHAR(100) NOT NULL,
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (timestamp, symbol, exchange, trade_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('trade_events', 'timestamp', if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create indexes for trade events
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_trade_events_symbol_exchange ON trade_events (symbol, exchange, timestamp DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_trade_events_timestamp ON trade_events (timestamp DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_trade_events_price ON trade_events (symbol, price, timestamp DESC);
|
||||||
|
|
||||||
|
-- Aggregated heatmap data table
|
||||||
|
CREATE TABLE IF NOT EXISTS heatmap_data (
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
bucket_size DECIMAL(10,2) NOT NULL,
|
||||||
|
price_bucket DECIMAL(20,8) NOT NULL,
|
||||||
|
volume DECIMAL(30,8) NOT NULL,
|
||||||
|
side VARCHAR(3) NOT NULL,
|
||||||
|
exchange_count INTEGER NOT NULL,
|
||||||
|
exchanges JSONB,
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (timestamp, symbol, bucket_size, price_bucket, side)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('heatmap_data', 'timestamp', if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create indexes for heatmap data
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_heatmap_symbol_bucket ON heatmap_data (symbol, bucket_size, timestamp DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_heatmap_timestamp ON heatmap_data (timestamp DESC);
|
||||||
|
|
||||||
|
-- OHLCV data table
|
||||||
|
CREATE TABLE IF NOT EXISTS ohlcv_data (
|
||||||
|
symbol VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
timeframe VARCHAR(10) NOT NULL,
|
||||||
|
open_price DECIMAL(20,8) NOT NULL,
|
||||||
|
high_price DECIMAL(20,8) NOT NULL,
|
||||||
|
low_price DECIMAL(20,8) NOT NULL,
|
||||||
|
close_price DECIMAL(20,8) NOT NULL,
|
||||||
|
volume DECIMAL(30,8) NOT NULL,
|
||||||
|
trade_count INTEGER,
|
||||||
|
vwap DECIMAL(20,8),
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (timestamp, symbol, timeframe)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('ohlcv_data', 'timestamp', if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create indexes for OHLCV data
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ohlcv_symbol_timeframe ON ohlcv_data (symbol, timeframe, timestamp DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ohlcv_timestamp ON ohlcv_data (timestamp DESC);
|
||||||
|
|
||||||
|
-- Exchange status tracking table
|
||||||
|
CREATE TABLE IF NOT EXISTS exchange_status (
|
||||||
|
exchange VARCHAR(20) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
status VARCHAR(20) NOT NULL, -- 'connected', 'disconnected', 'error'
|
||||||
|
last_message_time TIMESTAMPTZ,
|
||||||
|
error_message TEXT,
|
||||||
|
connection_count INTEGER DEFAULT 0,
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (timestamp, exchange)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('exchange_status', 'timestamp', if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create indexes for exchange status
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_exchange_status_exchange ON exchange_status (exchange, timestamp DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_exchange_status_timestamp ON exchange_status (timestamp DESC);
|
||||||
|
|
||||||
|
-- System metrics table for monitoring
|
||||||
|
CREATE TABLE IF NOT EXISTS system_metrics (
|
||||||
|
metric_name VARCHAR(50) NOT NULL,
|
||||||
|
timestamp TIMESTAMPTZ NOT NULL,
|
||||||
|
value DECIMAL(20,8) NOT NULL,
|
||||||
|
labels JSONB,
|
||||||
|
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||||
|
PRIMARY KEY (timestamp, metric_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Convert to hypertable
|
||||||
|
SELECT create_hypertable('system_metrics', 'timestamp', if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create indexes for system metrics
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_system_metrics_name ON system_metrics (metric_name, timestamp DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_system_metrics_timestamp ON system_metrics (timestamp DESC);
|
||||||
|
|
||||||
|
-- Create retention policies (keep data for 90 days by default)
|
||||||
|
SELECT add_retention_policy('order_book_snapshots', INTERVAL '90 days', if_not_exists => TRUE);
|
||||||
|
SELECT add_retention_policy('trade_events', INTERVAL '90 days', if_not_exists => TRUE);
|
||||||
|
SELECT add_retention_policy('heatmap_data', INTERVAL '90 days', if_not_exists => TRUE);
|
||||||
|
SELECT add_retention_policy('ohlcv_data', INTERVAL '365 days', if_not_exists => TRUE);
|
||||||
|
SELECT add_retention_policy('exchange_status', INTERVAL '30 days', if_not_exists => TRUE);
|
||||||
|
SELECT add_retention_policy('system_metrics', INTERVAL '30 days', if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create continuous aggregates for common queries
|
||||||
|
CREATE MATERIALIZED VIEW IF NOT EXISTS hourly_ohlcv
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT
|
||||||
|
symbol,
|
||||||
|
exchange,
|
||||||
|
time_bucket('1 hour', timestamp) AS hour,
|
||||||
|
first(price, timestamp) AS open_price,
|
||||||
|
max(price) AS high_price,
|
||||||
|
min(price) AS low_price,
|
||||||
|
last(price, timestamp) AS close_price,
|
||||||
|
sum(size) AS volume,
|
||||||
|
count(*) AS trade_count,
|
||||||
|
avg(price) AS vwap
|
||||||
|
FROM trade_events
|
||||||
|
GROUP BY symbol, exchange, hour
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
-- Add refresh policy for continuous aggregate
|
||||||
|
SELECT add_continuous_aggregate_policy('hourly_ohlcv',
|
||||||
|
start_offset => INTERVAL '3 hours',
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '1 hour',
|
||||||
|
if_not_exists => TRUE);
|
||||||
|
|
||||||
|
-- Create view for latest order book data
|
||||||
|
CREATE OR REPLACE VIEW latest_order_books AS
|
||||||
|
SELECT DISTINCT ON (symbol, exchange)
|
||||||
|
symbol,
|
||||||
|
exchange,
|
||||||
|
timestamp,
|
||||||
|
bids,
|
||||||
|
asks,
|
||||||
|
mid_price,
|
||||||
|
spread,
|
||||||
|
bid_volume,
|
||||||
|
ask_volume
|
||||||
|
FROM order_book_snapshots
|
||||||
|
ORDER BY symbol, exchange, timestamp DESC;
|
||||||
|
|
||||||
|
-- Create view for latest heatmap data
|
||||||
|
CREATE OR REPLACE VIEW latest_heatmaps AS
|
||||||
|
SELECT DISTINCT ON (symbol, bucket_size, price_bucket, side)
|
||||||
|
symbol,
|
||||||
|
bucket_size,
|
||||||
|
price_bucket,
|
||||||
|
side,
|
||||||
|
timestamp,
|
||||||
|
volume,
|
||||||
|
exchange_count,
|
||||||
|
exchanges
|
||||||
|
FROM heatmap_data
|
||||||
|
ORDER BY symbol, bucket_size, price_bucket, side, timestamp DESC;
|
||||||
|
|
||||||
|
-- Grant permissions to market_user
|
||||||
|
GRANT ALL PRIVILEGES ON SCHEMA market_data TO market_user;
|
||||||
|
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA market_data TO market_user;
|
||||||
|
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA market_data TO market_user;
|
||||||
|
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA market_data TO market_user;
|
||||||
|
|
||||||
|
-- Set default privileges for future objects
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA market_data GRANT ALL ON TABLES TO market_user;
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA market_data GRANT ALL ON SEQUENCES TO market_user;
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA market_data GRANT ALL ON FUNCTIONS TO market_user;
|
||||||
|
|
||||||
|
-- Create database user for read-only access (for dashboards)
|
||||||
|
CREATE USER IF NOT EXISTS dashboard_user WITH PASSWORD 'dashboard_read_2024';
|
||||||
|
GRANT CONNECT ON DATABASE market_data TO dashboard_user;
|
||||||
|
GRANT USAGE ON SCHEMA market_data TO dashboard_user;
|
||||||
|
GRANT SELECT ON ALL TABLES IN SCHEMA market_data TO dashboard_user;
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA market_data GRANT SELECT ON TABLES TO dashboard_user;
|
||||||
37
COBY/docker/manual-init.sh
Normal file
37
COBY/docker/manual-init.sh
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Manual database initialization script
|
||||||
|
# Run this to initialize the TimescaleDB schema
|
||||||
|
|
||||||
|
echo "🔧 Initializing TimescaleDB schema..."
|
||||||
|
|
||||||
|
# Check if we can connect to the database
|
||||||
|
echo "📡 Testing connection to TimescaleDB..."
|
||||||
|
|
||||||
|
# You can run this command on your Docker host (192.168.0.10)
|
||||||
|
# Replace with your actual password from the .env file
|
||||||
|
|
||||||
|
PGPASSWORD="market_data_secure_pass_2024" psql -h 192.168.0.10 -p 5432 -U market_user -d market_data -c "SELECT version();"
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "✅ Connection successful!"
|
||||||
|
|
||||||
|
echo "🏗️ Creating database schema..."
|
||||||
|
|
||||||
|
# Execute the initialization script
|
||||||
|
PGPASSWORD="market_data_secure_pass_2024" psql -h 192.168.0.10 -p 5432 -U market_user -d market_data -f ../docker/init-scripts/01-init-timescaledb.sql
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "✅ Database schema initialized successfully!"
|
||||||
|
|
||||||
|
echo "📊 Verifying tables..."
|
||||||
|
PGPASSWORD="market_data_secure_pass_2024" psql -h 192.168.0.10 -p 5432 -U market_user -d market_data -c "\dt market_data.*"
|
||||||
|
|
||||||
|
else
|
||||||
|
echo "❌ Schema initialization failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "❌ Cannot connect to database"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
68
COBY/docker/nginx-dev.conf
Normal file
68
COBY/docker/nginx-dev.conf
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# Nginx configuration for development environment
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
# Development logging
|
||||||
|
access_log /var/log/nginx/access.log;
|
||||||
|
error_log /var/log/nginx/error.log debug;
|
||||||
|
|
||||||
|
# Basic settings
|
||||||
|
sendfile on;
|
||||||
|
keepalive_timeout 65;
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name localhost;
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html;
|
||||||
|
|
||||||
|
# Disable caching for development
|
||||||
|
add_header Cache-Control "no-cache, no-store, must-revalidate";
|
||||||
|
add_header Pragma "no-cache";
|
||||||
|
add_header Expires "0";
|
||||||
|
|
||||||
|
# Main dashboard
|
||||||
|
location / {
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
|
}
|
||||||
|
|
||||||
|
# API proxy to COBY app
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://coby-app:8080/;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection 'upgrade';
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_cache_bypass $http_upgrade;
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
}
|
||||||
|
|
||||||
|
# WebSocket proxy
|
||||||
|
location /ws/ {
|
||||||
|
proxy_pass http://coby-app:8081/;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
location /health {
|
||||||
|
access_log off;
|
||||||
|
return 200 "healthy\n";
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
112
COBY/docker/nginx.conf
Normal file
112
COBY/docker/nginx.conf
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# Nginx configuration for COBY dashboard
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
# Logging
|
||||||
|
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
|
'$status $body_bytes_sent "$http_referer" '
|
||||||
|
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log main;
|
||||||
|
error_log /var/log/nginx/error.log warn;
|
||||||
|
|
||||||
|
# Basic settings
|
||||||
|
sendfile on;
|
||||||
|
tcp_nopush on;
|
||||||
|
tcp_nodelay on;
|
||||||
|
keepalive_timeout 65;
|
||||||
|
types_hash_max_size 2048;
|
||||||
|
|
||||||
|
# Gzip compression
|
||||||
|
gzip on;
|
||||||
|
gzip_vary on;
|
||||||
|
gzip_min_length 1024;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_comp_level 6;
|
||||||
|
gzip_types
|
||||||
|
text/plain
|
||||||
|
text/css
|
||||||
|
text/xml
|
||||||
|
text/javascript
|
||||||
|
application/json
|
||||||
|
application/javascript
|
||||||
|
application/xml+rss
|
||||||
|
application/atom+xml
|
||||||
|
image/svg+xml;
|
||||||
|
|
||||||
|
# Rate limiting
|
||||||
|
limit_req_zone $binary_remote_addr zone=dashboard:10m rate=10r/s;
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name localhost;
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html;
|
||||||
|
|
||||||
|
# Security headers
|
||||||
|
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||||
|
add_header X-XSS-Protection "1; mode=block" always;
|
||||||
|
add_header X-Content-Type-Options "nosniff" always;
|
||||||
|
add_header Referrer-Policy "no-referrer-when-downgrade" always;
|
||||||
|
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
|
||||||
|
|
||||||
|
# Main dashboard
|
||||||
|
location / {
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
|
limit_req zone=dashboard burst=20 nodelay;
|
||||||
|
}
|
||||||
|
|
||||||
|
# API proxy to COBY app
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://coby-app:8080/;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection 'upgrade';
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_cache_bypass $http_upgrade;
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
}
|
||||||
|
|
||||||
|
# WebSocket proxy
|
||||||
|
location /ws/ {
|
||||||
|
proxy_pass http://coby-app:8081/;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_read_timeout 86400;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
location /health {
|
||||||
|
access_log off;
|
||||||
|
return 200 "healthy\n";
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Static assets caching
|
||||||
|
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
|
||||||
|
expires 1y;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Error pages
|
||||||
|
error_page 404 /404.html;
|
||||||
|
error_page 500 502 503 504 /50x.html;
|
||||||
|
|
||||||
|
location = /50x.html {
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
53
COBY/docker/prometheus.yml
Normal file
53
COBY/docker/prometheus.yml
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# Prometheus configuration for COBY monitoring
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
evaluation_interval: 15s
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
# - "first_rules.yml"
|
||||||
|
# - "second_rules.yml"
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
# COBY application metrics
|
||||||
|
- job_name: 'coby-app'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['coby-app:8080']
|
||||||
|
metrics_path: '/metrics'
|
||||||
|
scrape_interval: 10s
|
||||||
|
scrape_timeout: 5s
|
||||||
|
|
||||||
|
# TimescaleDB metrics (if postgres_exporter is added)
|
||||||
|
- job_name: 'timescaledb'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['timescaledb:5432']
|
||||||
|
scrape_interval: 30s
|
||||||
|
scrape_timeout: 10s
|
||||||
|
|
||||||
|
# Redis metrics (if redis_exporter is added)
|
||||||
|
- job_name: 'redis'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['redis:6379']
|
||||||
|
scrape_interval: 30s
|
||||||
|
scrape_timeout: 10s
|
||||||
|
|
||||||
|
# Prometheus self-monitoring
|
||||||
|
- job_name: 'prometheus'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:9090']
|
||||||
|
|
||||||
|
# Node exporter for system metrics (if added)
|
||||||
|
- job_name: 'node'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['node-exporter:9100']
|
||||||
|
scrape_interval: 30s
|
||||||
|
|
||||||
|
# Alerting configuration
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- static_configs:
|
||||||
|
- targets:
|
||||||
|
# - alertmanager:9093
|
||||||
|
|
||||||
|
# Alert rules
|
||||||
|
rule_files:
|
||||||
|
- "/etc/prometheus/alert_rules.yml"
|
||||||
131
COBY/docker/redis.conf
Normal file
131
COBY/docker/redis.conf
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
# Redis configuration for market data caching
|
||||||
|
# Optimized for high-frequency trading data
|
||||||
|
|
||||||
|
# Network settings
|
||||||
|
bind 0.0.0.0
|
||||||
|
port 6379
|
||||||
|
tcp-backlog 511
|
||||||
|
timeout 0
|
||||||
|
tcp-keepalive 300
|
||||||
|
|
||||||
|
# General settings
|
||||||
|
daemonize no
|
||||||
|
supervised no
|
||||||
|
pidfile /var/run/redis_6379.pid
|
||||||
|
loglevel notice
|
||||||
|
logfile ""
|
||||||
|
databases 16
|
||||||
|
|
||||||
|
# Snapshotting (persistence)
|
||||||
|
save 900 1
|
||||||
|
save 300 10
|
||||||
|
save 60 10000
|
||||||
|
stop-writes-on-bgsave-error yes
|
||||||
|
rdbcompression yes
|
||||||
|
rdbchecksum yes
|
||||||
|
dbfilename dump.rdb
|
||||||
|
dir /data
|
||||||
|
|
||||||
|
# Replication
|
||||||
|
replica-serve-stale-data yes
|
||||||
|
replica-read-only yes
|
||||||
|
repl-diskless-sync no
|
||||||
|
repl-diskless-sync-delay 5
|
||||||
|
repl-ping-replica-period 10
|
||||||
|
repl-timeout 60
|
||||||
|
repl-disable-tcp-nodelay no
|
||||||
|
repl-backlog-size 1mb
|
||||||
|
repl-backlog-ttl 3600
|
||||||
|
|
||||||
|
# Security
|
||||||
|
requirepass market_data_redis_2024
|
||||||
|
|
||||||
|
# Memory management
|
||||||
|
maxmemory 2gb
|
||||||
|
maxmemory-policy allkeys-lru
|
||||||
|
maxmemory-samples 5
|
||||||
|
|
||||||
|
# Lazy freeing
|
||||||
|
lazyfree-lazy-eviction no
|
||||||
|
lazyfree-lazy-expire no
|
||||||
|
lazyfree-lazy-server-del no
|
||||||
|
replica-lazy-flush no
|
||||||
|
|
||||||
|
# Threaded I/O
|
||||||
|
io-threads 4
|
||||||
|
io-threads-do-reads yes
|
||||||
|
|
||||||
|
# Append only file (AOF)
|
||||||
|
appendonly yes
|
||||||
|
appendfilename "appendonly.aof"
|
||||||
|
appendfsync everysec
|
||||||
|
no-appendfsync-on-rewrite no
|
||||||
|
auto-aof-rewrite-percentage 100
|
||||||
|
auto-aof-rewrite-min-size 64mb
|
||||||
|
aof-load-truncated yes
|
||||||
|
aof-use-rdb-preamble yes
|
||||||
|
|
||||||
|
# Lua scripting
|
||||||
|
lua-time-limit 5000
|
||||||
|
|
||||||
|
# Slow log
|
||||||
|
slowlog-log-slower-than 10000
|
||||||
|
slowlog-max-len 128
|
||||||
|
|
||||||
|
# Latency monitor
|
||||||
|
latency-monitor-threshold 100
|
||||||
|
|
||||||
|
# Event notification
|
||||||
|
notify-keyspace-events ""
|
||||||
|
|
||||||
|
# Hash settings (optimized for order book data)
|
||||||
|
hash-max-ziplist-entries 512
|
||||||
|
hash-max-ziplist-value 64
|
||||||
|
|
||||||
|
# List settings
|
||||||
|
list-max-ziplist-size -2
|
||||||
|
list-compress-depth 0
|
||||||
|
|
||||||
|
# Set settings
|
||||||
|
set-max-intset-entries 512
|
||||||
|
|
||||||
|
# Sorted set settings
|
||||||
|
zset-max-ziplist-entries 128
|
||||||
|
zset-max-ziplist-value 64
|
||||||
|
|
||||||
|
# HyperLogLog settings
|
||||||
|
hll-sparse-max-bytes 3000
|
||||||
|
|
||||||
|
# Streams settings
|
||||||
|
stream-node-max-bytes 4096
|
||||||
|
stream-node-max-entries 100
|
||||||
|
|
||||||
|
# Active rehashing
|
||||||
|
activerehashing yes
|
||||||
|
|
||||||
|
# Client settings
|
||||||
|
client-output-buffer-limit normal 0 0 0
|
||||||
|
client-output-buffer-limit replica 256mb 64mb 60
|
||||||
|
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||||
|
client-query-buffer-limit 1gb
|
||||||
|
|
||||||
|
# Protocol settings
|
||||||
|
proto-max-bulk-len 512mb
|
||||||
|
|
||||||
|
# Frequency settings
|
||||||
|
hz 10
|
||||||
|
|
||||||
|
# Dynamic HZ
|
||||||
|
dynamic-hz yes
|
||||||
|
|
||||||
|
# AOF rewrite settings
|
||||||
|
aof-rewrite-incremental-fsync yes
|
||||||
|
|
||||||
|
# RDB settings
|
||||||
|
rdb-save-incremental-fsync yes
|
||||||
|
|
||||||
|
# Jemalloc settings
|
||||||
|
jemalloc-bg-thread yes
|
||||||
|
|
||||||
|
# TLS settings (disabled for internal network)
|
||||||
|
tls-port 0
|
||||||
188
COBY/docker/restore.sh
Normal file
188
COBY/docker/restore.sh
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Restore script for market data infrastructure
|
||||||
|
# Usage: ./restore.sh <backup_file.tar.gz>
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Check if backup file is provided
|
||||||
|
if [ $# -eq 0 ]; then
|
||||||
|
echo "❌ Usage: $0 <backup_file.tar.gz>"
|
||||||
|
echo "Available backups:"
|
||||||
|
ls -la ./backups/market_data_backup_*.tar.gz 2>/dev/null || echo "No backups found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
BACKUP_FILE="$1"
|
||||||
|
RESTORE_DIR="./restore_temp"
|
||||||
|
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||||
|
|
||||||
|
# Load environment variables
|
||||||
|
if [ -f .env ]; then
|
||||||
|
source .env
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🔄 Starting restore process..."
|
||||||
|
echo "📁 Backup file: $BACKUP_FILE"
|
||||||
|
|
||||||
|
# Check if backup file exists
|
||||||
|
if [ ! -f "$BACKUP_FILE" ]; then
|
||||||
|
echo "❌ Backup file not found: $BACKUP_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create temporary restore directory
|
||||||
|
mkdir -p "$RESTORE_DIR"
|
||||||
|
|
||||||
|
# Extract backup
|
||||||
|
echo "📦 Extracting backup..."
|
||||||
|
tar -xzf "$BACKUP_FILE" -C "$RESTORE_DIR"
|
||||||
|
|
||||||
|
# Find extracted files
|
||||||
|
TIMESCALE_BACKUP=$(find "$RESTORE_DIR" -name "timescaledb_backup_*.dump" | head -1)
|
||||||
|
REDIS_BACKUP=$(find "$RESTORE_DIR" -name "redis_backup_*.rdb" | head -1)
|
||||||
|
BACKUP_INFO=$(find "$RESTORE_DIR" -name "backup_*.info" | head -1)
|
||||||
|
|
||||||
|
if [ -z "$TIMESCALE_BACKUP" ] || [ -z "$REDIS_BACKUP" ]; then
|
||||||
|
echo "❌ Invalid backup file structure"
|
||||||
|
rm -rf "$RESTORE_DIR"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Display backup information
|
||||||
|
if [ -f "$BACKUP_INFO" ]; then
|
||||||
|
echo "📋 Backup Information:"
|
||||||
|
cat "$BACKUP_INFO"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Confirm restore
|
||||||
|
read -p "⚠️ This will replace all existing data. Continue? (y/N): " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
echo "❌ Restore cancelled"
|
||||||
|
rm -rf "$RESTORE_DIR"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop services
|
||||||
|
echo "🛑 Stopping services..."
|
||||||
|
docker-compose -f timescaledb-compose.yml down
|
||||||
|
|
||||||
|
# Backup current data (just in case)
|
||||||
|
echo "💾 Creating safety backup of current data..."
|
||||||
|
mkdir -p "./backups/pre_restore_$TIMESTAMP"
|
||||||
|
docker run --rm -v market_data_timescale_data:/data -v "$(pwd)/backups/pre_restore_$TIMESTAMP":/backup alpine tar czf /backup/current_timescale.tar.gz -C /data .
|
||||||
|
docker run --rm -v market_data_redis_data:/data -v "$(pwd)/backups/pre_restore_$TIMESTAMP":/backup alpine tar czf /backup/current_redis.tar.gz -C /data .
|
||||||
|
|
||||||
|
# Start only TimescaleDB for restore
|
||||||
|
echo "🏃 Starting TimescaleDB for restore..."
|
||||||
|
docker-compose -f timescaledb-compose.yml up -d timescaledb
|
||||||
|
|
||||||
|
# Wait for TimescaleDB to be ready
|
||||||
|
echo "⏳ Waiting for TimescaleDB to be ready..."
|
||||||
|
sleep 30
|
||||||
|
|
||||||
|
# Check if TimescaleDB is ready
|
||||||
|
if ! docker exec market_data_timescaledb pg_isready -U market_user -d market_data; then
|
||||||
|
echo "❌ TimescaleDB is not ready"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Drop existing database and recreate
|
||||||
|
echo "🗑️ Dropping existing database..."
|
||||||
|
docker exec market_data_timescaledb psql -U postgres -c "DROP DATABASE IF EXISTS market_data;"
|
||||||
|
docker exec market_data_timescaledb psql -U postgres -c "CREATE DATABASE market_data OWNER market_user;"
|
||||||
|
|
||||||
|
# Restore TimescaleDB
|
||||||
|
echo "📊 Restoring TimescaleDB..."
|
||||||
|
docker cp "$TIMESCALE_BACKUP" market_data_timescaledb:/tmp/restore.dump
|
||||||
|
docker exec market_data_timescaledb pg_restore \
|
||||||
|
-U market_user \
|
||||||
|
-d market_data \
|
||||||
|
--verbose \
|
||||||
|
--no-password \
|
||||||
|
/tmp/restore.dump
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "✅ TimescaleDB restore completed"
|
||||||
|
else
|
||||||
|
echo "❌ TimescaleDB restore failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop TimescaleDB
|
||||||
|
docker-compose -f timescaledb-compose.yml stop timescaledb
|
||||||
|
|
||||||
|
# Restore Redis data
|
||||||
|
echo "📦 Restoring Redis data..."
|
||||||
|
# Remove existing Redis data
|
||||||
|
docker volume rm market_data_redis_data 2>/dev/null || true
|
||||||
|
docker volume create market_data_redis_data
|
||||||
|
|
||||||
|
# Copy Redis backup to volume
|
||||||
|
docker run --rm -v market_data_redis_data:/data -v "$(pwd)/$RESTORE_DIR":/backup alpine cp "/backup/$(basename "$REDIS_BACKUP")" /data/dump.rdb
|
||||||
|
|
||||||
|
# Start all services
|
||||||
|
echo "🏃 Starting all services..."
|
||||||
|
docker-compose -f timescaledb-compose.yml up -d
|
||||||
|
|
||||||
|
# Wait for services to be ready
|
||||||
|
echo "⏳ Waiting for services to be ready..."
|
||||||
|
sleep 30
|
||||||
|
|
||||||
|
# Verify restore
|
||||||
|
echo "🔍 Verifying restore..."
|
||||||
|
|
||||||
|
# Check TimescaleDB
|
||||||
|
if docker exec market_data_timescaledb pg_isready -U market_user -d market_data; then
|
||||||
|
echo "✅ TimescaleDB is ready"
|
||||||
|
|
||||||
|
# Show table counts
|
||||||
|
echo "📊 Database table counts:"
|
||||||
|
docker exec market_data_timescaledb psql -U market_user -d market_data -c "
|
||||||
|
SELECT
|
||||||
|
schemaname,
|
||||||
|
tablename,
|
||||||
|
n_tup_ins as row_count
|
||||||
|
FROM pg_stat_user_tables
|
||||||
|
WHERE schemaname = 'market_data'
|
||||||
|
ORDER BY tablename;
|
||||||
|
"
|
||||||
|
else
|
||||||
|
echo "❌ TimescaleDB verification failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Redis
|
||||||
|
if docker exec market_data_redis redis-cli -a "$REDIS_PASSWORD" ping | grep -q PONG; then
|
||||||
|
echo "✅ Redis is ready"
|
||||||
|
|
||||||
|
# Show Redis info
|
||||||
|
echo "📦 Redis database info:"
|
||||||
|
docker exec market_data_redis redis-cli -a "$REDIS_PASSWORD" INFO keyspace
|
||||||
|
else
|
||||||
|
echo "❌ Redis verification failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
echo "🧹 Cleaning up temporary files..."
|
||||||
|
rm -rf "$RESTORE_DIR"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "🎉 Restore completed successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "📋 Restore Summary:"
|
||||||
|
echo " Source: $BACKUP_FILE"
|
||||||
|
echo " Timestamp: $TIMESTAMP"
|
||||||
|
echo " Safety backup: ./backups/pre_restore_$TIMESTAMP/"
|
||||||
|
echo ""
|
||||||
|
echo "⚠️ If you encounter any issues, you can restore the safety backup:"
|
||||||
|
echo " docker-compose -f timescaledb-compose.yml down"
|
||||||
|
echo " docker volume rm market_data_timescale_data market_data_redis_data"
|
||||||
|
echo " docker volume create market_data_timescale_data"
|
||||||
|
echo " docker volume create market_data_redis_data"
|
||||||
|
echo " docker run --rm -v market_data_timescale_data:/data -v $(pwd)/backups/pre_restore_$TIMESTAMP:/backup alpine tar xzf /backup/current_timescale.tar.gz -C /data"
|
||||||
|
echo " docker run --rm -v market_data_redis_data:/data -v $(pwd)/backups/pre_restore_$TIMESTAMP:/backup alpine tar xzf /backup/current_redis.tar.gz -C /data"
|
||||||
|
echo " docker-compose -f timescaledb-compose.yml up -d"
|
||||||
78
COBY/docker/timescaledb-compose.yml
Normal file
78
COBY/docker/timescaledb-compose.yml
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
timescaledb:
|
||||||
|
image: timescale/timescaledb:latest-pg15
|
||||||
|
container_name: market_data_timescaledb
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: market_data
|
||||||
|
POSTGRES_USER: market_user
|
||||||
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-market_data_secure_pass_2024}
|
||||||
|
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
|
||||||
|
# TimescaleDB specific settings
|
||||||
|
TIMESCALEDB_TELEMETRY: 'off'
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- timescale_data:/var/lib/postgresql/data
|
||||||
|
- ./init-scripts:/docker-entrypoint-initdb.d
|
||||||
|
command: >
|
||||||
|
postgres
|
||||||
|
-c shared_preload_libraries=timescaledb
|
||||||
|
-c max_connections=200
|
||||||
|
-c shared_buffers=256MB
|
||||||
|
-c effective_cache_size=1GB
|
||||||
|
-c maintenance_work_mem=64MB
|
||||||
|
-c checkpoint_completion_target=0.9
|
||||||
|
-c wal_buffers=16MB
|
||||||
|
-c default_statistics_target=100
|
||||||
|
-c random_page_cost=1.1
|
||||||
|
-c effective_io_concurrency=200
|
||||||
|
-c work_mem=4MB
|
||||||
|
-c min_wal_size=1GB
|
||||||
|
-c max_wal_size=4GB
|
||||||
|
-c max_worker_processes=8
|
||||||
|
-c max_parallel_workers_per_gather=4
|
||||||
|
-c max_parallel_workers=8
|
||||||
|
-c max_parallel_maintenance_workers=4
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U market_user -d market_data"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 60s
|
||||||
|
networks:
|
||||||
|
- market_data_network
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: market_data_redis
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis_data:/data
|
||||||
|
- ./redis.conf:/usr/local/etc/redis/redis.conf
|
||||||
|
command: redis-server /usr/local/etc/redis/redis.conf
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 30s
|
||||||
|
networks:
|
||||||
|
- market_data_network
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
timescale_data:
|
||||||
|
driver: local
|
||||||
|
redis_data:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
networks:
|
||||||
|
market_data_network:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 172.20.0.0/16
|
||||||
168
COBY/examples/binance_example.py
Normal file
168
COBY/examples/binance_example.py
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Example usage of Binance connector.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add COBY to path
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||||
|
|
||||||
|
from connectors.binance_connector import BinanceConnector
|
||||||
|
from utils.logging import setup_logging, get_logger
|
||||||
|
from models.core import OrderBookSnapshot, TradeEvent
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
setup_logging(level='INFO', console_output=True)
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BinanceExample:
|
||||||
|
"""Example Binance connector usage"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.connector = BinanceConnector()
|
||||||
|
self.orderbook_count = 0
|
||||||
|
self.trade_count = 0
|
||||||
|
|
||||||
|
# Add data callbacks
|
||||||
|
self.connector.add_data_callback(self.on_data_received)
|
||||||
|
self.connector.add_status_callback(self.on_status_changed)
|
||||||
|
|
||||||
|
def on_data_received(self, data):
|
||||||
|
"""Handle received data"""
|
||||||
|
if isinstance(data, OrderBookSnapshot):
|
||||||
|
self.orderbook_count += 1
|
||||||
|
logger.info(
|
||||||
|
f"📊 Order Book {self.orderbook_count}: {data.symbol} - "
|
||||||
|
f"Mid: ${data.mid_price:.2f}, Spread: ${data.spread:.2f}, "
|
||||||
|
f"Bids: {len(data.bids)}, Asks: {len(data.asks)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif isinstance(data, TradeEvent):
|
||||||
|
self.trade_count += 1
|
||||||
|
logger.info(
|
||||||
|
f"💰 Trade {self.trade_count}: {data.symbol} - "
|
||||||
|
f"{data.side.upper()} {data.size} @ ${data.price:.2f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def on_status_changed(self, exchange, status):
|
||||||
|
"""Handle status changes"""
|
||||||
|
logger.info(f"🔄 {exchange} status changed to: {status.value}")
|
||||||
|
|
||||||
|
async def run_example(self):
|
||||||
|
"""Run the example"""
|
||||||
|
try:
|
||||||
|
logger.info("🚀 Starting Binance connector example")
|
||||||
|
|
||||||
|
# Connect to Binance
|
||||||
|
logger.info("🔌 Connecting to Binance...")
|
||||||
|
connected = await self.connector.connect()
|
||||||
|
|
||||||
|
if not connected:
|
||||||
|
logger.error("❌ Failed to connect to Binance")
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("✅ Connected to Binance successfully")
|
||||||
|
|
||||||
|
# Get available symbols
|
||||||
|
logger.info("📋 Getting available symbols...")
|
||||||
|
symbols = await self.connector.get_symbols()
|
||||||
|
logger.info(f"📋 Found {len(symbols)} trading symbols")
|
||||||
|
|
||||||
|
# Show some popular symbols
|
||||||
|
popular_symbols = ['BTCUSDT', 'ETHUSDT', 'ADAUSDT', 'BNBUSDT']
|
||||||
|
available_popular = [s for s in popular_symbols if s in symbols]
|
||||||
|
logger.info(f"📋 Popular symbols available: {available_popular}")
|
||||||
|
|
||||||
|
# Get order book snapshot
|
||||||
|
if 'BTCUSDT' in symbols:
|
||||||
|
logger.info("📊 Getting BTC order book snapshot...")
|
||||||
|
orderbook = await self.connector.get_orderbook_snapshot('BTCUSDT', depth=10)
|
||||||
|
if orderbook:
|
||||||
|
logger.info(
|
||||||
|
f"📊 BTC Order Book: Mid=${orderbook.mid_price:.2f}, "
|
||||||
|
f"Spread=${orderbook.spread:.2f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Subscribe to real-time data
|
||||||
|
logger.info("🔔 Subscribing to real-time data...")
|
||||||
|
|
||||||
|
# Subscribe to BTC order book and trades
|
||||||
|
if 'BTCUSDT' in symbols:
|
||||||
|
await self.connector.subscribe_orderbook('BTCUSDT')
|
||||||
|
await self.connector.subscribe_trades('BTCUSDT')
|
||||||
|
logger.info("✅ Subscribed to BTCUSDT order book and trades")
|
||||||
|
|
||||||
|
# Subscribe to ETH order book
|
||||||
|
if 'ETHUSDT' in symbols:
|
||||||
|
await self.connector.subscribe_orderbook('ETHUSDT')
|
||||||
|
logger.info("✅ Subscribed to ETHUSDT order book")
|
||||||
|
|
||||||
|
# Let it run for a while
|
||||||
|
logger.info("⏳ Collecting data for 30 seconds...")
|
||||||
|
await asyncio.sleep(30)
|
||||||
|
|
||||||
|
# Show statistics
|
||||||
|
stats = self.connector.get_binance_stats()
|
||||||
|
logger.info("📈 Final Statistics:")
|
||||||
|
logger.info(f" 📊 Order books received: {self.orderbook_count}")
|
||||||
|
logger.info(f" 💰 Trades received: {self.trade_count}")
|
||||||
|
logger.info(f" 📡 Total messages: {stats['message_count']}")
|
||||||
|
logger.info(f" ❌ Errors: {stats['error_count']}")
|
||||||
|
logger.info(f" 🔗 Active streams: {stats['active_streams']}")
|
||||||
|
logger.info(f" 📋 Subscriptions: {list(stats['subscriptions'].keys())}")
|
||||||
|
|
||||||
|
# Unsubscribe and disconnect
|
||||||
|
logger.info("🔌 Cleaning up...")
|
||||||
|
|
||||||
|
if 'BTCUSDT' in self.connector.subscriptions:
|
||||||
|
await self.connector.unsubscribe_orderbook('BTCUSDT')
|
||||||
|
await self.connector.unsubscribe_trades('BTCUSDT')
|
||||||
|
|
||||||
|
if 'ETHUSDT' in self.connector.subscriptions:
|
||||||
|
await self.connector.unsubscribe_orderbook('ETHUSDT')
|
||||||
|
|
||||||
|
await self.connector.disconnect()
|
||||||
|
logger.info("✅ Disconnected successfully")
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("⏹️ Interrupted by user")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Example failed: {e}")
|
||||||
|
finally:
|
||||||
|
# Ensure cleanup
|
||||||
|
try:
|
||||||
|
await self.connector.disconnect()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main function"""
|
||||||
|
example = BinanceExample()
|
||||||
|
await example.run_example()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("Binance Connector Example")
|
||||||
|
print("=" * 25)
|
||||||
|
print("This example will:")
|
||||||
|
print("1. Connect to Binance WebSocket")
|
||||||
|
print("2. Get available trading symbols")
|
||||||
|
print("3. Subscribe to real-time order book and trade data")
|
||||||
|
print("4. Display received data for 30 seconds")
|
||||||
|
print("5. Show statistics and disconnect")
|
||||||
|
print()
|
||||||
|
print("Press Ctrl+C to stop early")
|
||||||
|
print("=" * 25)
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(main())
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\n👋 Example stopped by user")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n❌ Example failed: {e}")
|
||||||
|
sys.exit(1)
|
||||||
284
COBY/examples/multi_exchange_example.py
Normal file
284
COBY/examples/multi_exchange_example.py
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
"""
|
||||||
|
Example demonstrating multi-exchange connectivity with Binance, Coinbase, and Kraken.
|
||||||
|
Shows how to connect to multiple exchanges simultaneously and handle their data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from ..connectors.binance_connector import BinanceConnector
|
||||||
|
from ..connectors.coinbase_connector import CoinbaseConnector
|
||||||
|
from ..connectors.kraken_connector import KrakenConnector
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent
|
||||||
|
|
||||||
|
# Set up logging
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class MultiExchangeManager:
|
||||||
|
"""Manages connections to multiple exchanges."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize multi-exchange manager."""
|
||||||
|
# Initialize connectors
|
||||||
|
self.connectors = {
|
||||||
|
'binance': BinanceConnector(),
|
||||||
|
'coinbase': CoinbaseConnector(use_sandbox=True), # Use sandbox for testing
|
||||||
|
'kraken': KrakenConnector()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Data tracking
|
||||||
|
self.data_received = {
|
||||||
|
'binance': {'orderbooks': 0, 'trades': 0},
|
||||||
|
'coinbase': {'orderbooks': 0, 'trades': 0},
|
||||||
|
'kraken': {'orderbooks': 0, 'trades': 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set up data callbacks
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
connector.add_data_callback(lambda data, exchange=name: self._handle_data(exchange, data))
|
||||||
|
|
||||||
|
def _handle_data(self, exchange: str, data):
|
||||||
|
"""Handle data from any exchange."""
|
||||||
|
try:
|
||||||
|
if isinstance(data, OrderBookSnapshot):
|
||||||
|
self.data_received[exchange]['orderbooks'] += 1
|
||||||
|
logger.info(f"📊 {exchange.upper()}: Order book for {data.symbol} - "
|
||||||
|
f"Bids: {len(data.bids)}, Asks: {len(data.asks)}")
|
||||||
|
|
||||||
|
# Show best bid/ask if available
|
||||||
|
if data.bids and data.asks:
|
||||||
|
best_bid = max(data.bids, key=lambda x: x.price)
|
||||||
|
best_ask = min(data.asks, key=lambda x: x.price)
|
||||||
|
spread = best_ask.price - best_bid.price
|
||||||
|
logger.info(f" Best: {best_bid.price} / {best_ask.price} (spread: {spread:.2f})")
|
||||||
|
|
||||||
|
elif isinstance(data, TradeEvent):
|
||||||
|
self.data_received[exchange]['trades'] += 1
|
||||||
|
logger.info(f"💰 {exchange.upper()}: Trade {data.symbol} - "
|
||||||
|
f"{data.side} {data.size} @ {data.price}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling data from {exchange}: {e}")
|
||||||
|
|
||||||
|
async def connect_all(self):
|
||||||
|
"""Connect to all exchanges."""
|
||||||
|
logger.info("Connecting to all exchanges...")
|
||||||
|
|
||||||
|
connection_tasks = []
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
task = asyncio.create_task(self._connect_exchange(name, connector))
|
||||||
|
connection_tasks.append(task)
|
||||||
|
|
||||||
|
# Wait for all connections
|
||||||
|
results = await asyncio.gather(*connection_tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
# Report results
|
||||||
|
for i, (name, result) in enumerate(zip(self.connectors.keys(), results)):
|
||||||
|
if isinstance(result, Exception):
|
||||||
|
logger.error(f"❌ Failed to connect to {name}: {result}")
|
||||||
|
elif result:
|
||||||
|
logger.info(f"✅ Connected to {name}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"⚠️ Connection to {name} returned False")
|
||||||
|
|
||||||
|
async def _connect_exchange(self, name: str, connector) -> bool:
|
||||||
|
"""Connect to a single exchange."""
|
||||||
|
try:
|
||||||
|
return await connector.connect()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error connecting to {name}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def subscribe_to_symbols(self, symbols: list):
|
||||||
|
"""Subscribe to order book and trade data for given symbols."""
|
||||||
|
logger.info(f"Subscribing to symbols: {symbols}")
|
||||||
|
|
||||||
|
for symbol in symbols:
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
try:
|
||||||
|
if connector.is_connected:
|
||||||
|
# Subscribe to order book
|
||||||
|
await connector.subscribe_orderbook(symbol)
|
||||||
|
logger.info(f"📈 Subscribed to {symbol} order book on {name}")
|
||||||
|
|
||||||
|
# Subscribe to trades
|
||||||
|
await connector.subscribe_trades(symbol)
|
||||||
|
logger.info(f"💱 Subscribed to {symbol} trades on {name}")
|
||||||
|
|
||||||
|
# Small delay between subscriptions
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
else:
|
||||||
|
logger.warning(f"⚠️ {name} not connected, skipping {symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error subscribing to {symbol} on {name}: {e}")
|
||||||
|
|
||||||
|
async def run_for_duration(self, duration_seconds: int):
|
||||||
|
"""Run data collection for specified duration."""
|
||||||
|
logger.info(f"Running data collection for {duration_seconds} seconds...")
|
||||||
|
|
||||||
|
start_time = datetime.now()
|
||||||
|
|
||||||
|
# Print statistics periodically
|
||||||
|
while (datetime.now() - start_time).seconds < duration_seconds:
|
||||||
|
await asyncio.sleep(10) # Print stats every 10 seconds
|
||||||
|
self._print_statistics()
|
||||||
|
|
||||||
|
logger.info("Data collection period completed")
|
||||||
|
|
||||||
|
def _print_statistics(self):
|
||||||
|
"""Print current data statistics."""
|
||||||
|
logger.info("📊 Current Statistics:")
|
||||||
|
total_orderbooks = 0
|
||||||
|
total_trades = 0
|
||||||
|
|
||||||
|
for exchange, stats in self.data_received.items():
|
||||||
|
orderbooks = stats['orderbooks']
|
||||||
|
trades = stats['trades']
|
||||||
|
total_orderbooks += orderbooks
|
||||||
|
total_trades += trades
|
||||||
|
|
||||||
|
logger.info(f" {exchange.upper()}: {orderbooks} order books, {trades} trades")
|
||||||
|
|
||||||
|
logger.info(f" TOTAL: {total_orderbooks} order books, {total_trades} trades")
|
||||||
|
|
||||||
|
async def disconnect_all(self):
|
||||||
|
"""Disconnect from all exchanges."""
|
||||||
|
logger.info("Disconnecting from all exchanges...")
|
||||||
|
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
try:
|
||||||
|
await connector.disconnect()
|
||||||
|
logger.info(f"✅ Disconnected from {name}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error disconnecting from {name}: {e}")
|
||||||
|
|
||||||
|
def get_connector_stats(self):
|
||||||
|
"""Get statistics from all connectors."""
|
||||||
|
stats = {}
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
try:
|
||||||
|
if hasattr(connector, 'get_stats'):
|
||||||
|
stats[name] = connector.get_stats()
|
||||||
|
else:
|
||||||
|
stats[name] = {
|
||||||
|
'connected': connector.is_connected,
|
||||||
|
'exchange': connector.exchange_name
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
stats[name] = {'error': str(e)}
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
|
||||||
|
async def demonstrate_multi_exchange():
|
||||||
|
"""Demonstrate multi-exchange connectivity."""
|
||||||
|
logger.info("=== Multi-Exchange Connectivity Demo ===")
|
||||||
|
|
||||||
|
# Create manager
|
||||||
|
manager = MultiExchangeManager()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Connect to all exchanges
|
||||||
|
await manager.connect_all()
|
||||||
|
|
||||||
|
# Wait a moment for connections to stabilize
|
||||||
|
await asyncio.sleep(2)
|
||||||
|
|
||||||
|
# Subscribe to some popular symbols
|
||||||
|
symbols = ['BTCUSDT', 'ETHUSDT']
|
||||||
|
await manager.subscribe_to_symbols(symbols)
|
||||||
|
|
||||||
|
# Run data collection for 30 seconds
|
||||||
|
await manager.run_for_duration(30)
|
||||||
|
|
||||||
|
# Print final statistics
|
||||||
|
logger.info("=== Final Statistics ===")
|
||||||
|
manager._print_statistics()
|
||||||
|
|
||||||
|
# Print connector statistics
|
||||||
|
logger.info("=== Connector Statistics ===")
|
||||||
|
connector_stats = manager.get_connector_stats()
|
||||||
|
for exchange, stats in connector_stats.items():
|
||||||
|
logger.info(f"{exchange.upper()}: {stats}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in multi-exchange demo: {e}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Clean up
|
||||||
|
await manager.disconnect_all()
|
||||||
|
|
||||||
|
|
||||||
|
async def test_individual_connectors():
|
||||||
|
"""Test each connector individually."""
|
||||||
|
logger.info("=== Individual Connector Tests ===")
|
||||||
|
|
||||||
|
# Test Binance
|
||||||
|
logger.info("Testing Binance connector...")
|
||||||
|
binance = BinanceConnector()
|
||||||
|
try:
|
||||||
|
symbols = await binance.get_symbols()
|
||||||
|
logger.info(f"Binance symbols available: {len(symbols)}")
|
||||||
|
|
||||||
|
# Test order book snapshot
|
||||||
|
orderbook = await binance.get_orderbook_snapshot('BTCUSDT')
|
||||||
|
if orderbook:
|
||||||
|
logger.info(f"Binance order book: {len(orderbook.bids)} bids, {len(orderbook.asks)} asks")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Binance test error: {e}")
|
||||||
|
|
||||||
|
# Test Coinbase
|
||||||
|
logger.info("Testing Coinbase connector...")
|
||||||
|
coinbase = CoinbaseConnector(use_sandbox=True)
|
||||||
|
try:
|
||||||
|
symbols = await coinbase.get_symbols()
|
||||||
|
logger.info(f"Coinbase symbols available: {len(symbols)}")
|
||||||
|
|
||||||
|
# Test order book snapshot
|
||||||
|
orderbook = await coinbase.get_orderbook_snapshot('BTCUSDT')
|
||||||
|
if orderbook:
|
||||||
|
logger.info(f"Coinbase order book: {len(orderbook.bids)} bids, {len(orderbook.asks)} asks")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Coinbase test error: {e}")
|
||||||
|
|
||||||
|
# Test Kraken
|
||||||
|
logger.info("Testing Kraken connector...")
|
||||||
|
kraken = KrakenConnector()
|
||||||
|
try:
|
||||||
|
symbols = await kraken.get_symbols()
|
||||||
|
logger.info(f"Kraken symbols available: {len(symbols)}")
|
||||||
|
|
||||||
|
# Test order book snapshot
|
||||||
|
orderbook = await kraken.get_orderbook_snapshot('BTCUSDT')
|
||||||
|
if orderbook:
|
||||||
|
logger.info(f"Kraken order book: {len(orderbook.bids)} bids, {len(orderbook.asks)} asks")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Kraken test error: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Run all demonstrations."""
|
||||||
|
logger.info("Starting Multi-Exchange Examples...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Test individual connectors first
|
||||||
|
await test_individual_connectors()
|
||||||
|
|
||||||
|
await asyncio.sleep(2)
|
||||||
|
|
||||||
|
# Then test multi-exchange connectivity
|
||||||
|
await demonstrate_multi_exchange()
|
||||||
|
|
||||||
|
logger.info("All multi-exchange examples completed successfully!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error running examples: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Run the examples
|
||||||
|
asyncio.run(main())
|
||||||
276
COBY/examples/orchestrator_integration_example.py
Normal file
276
COBY/examples/orchestrator_integration_example.py
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
"""
|
||||||
|
Example showing how to integrate COBY system with existing orchestrator.
|
||||||
|
Demonstrates drop-in replacement and mode switching capabilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
# Import the COBY data provider replacement
|
||||||
|
from ..integration.data_provider_replacement import COBYDataProvider
|
||||||
|
from ..integration.orchestrator_adapter import MarketTick
|
||||||
|
|
||||||
|
# Set up logging
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
async def demonstrate_basic_usage():
|
||||||
|
"""Demonstrate basic COBY data provider usage."""
|
||||||
|
logger.info("=== Basic COBY Data Provider Usage ===")
|
||||||
|
|
||||||
|
# Initialize COBY data provider (drop-in replacement)
|
||||||
|
data_provider = COBYDataProvider()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Test basic data access methods
|
||||||
|
logger.info("Testing basic data access...")
|
||||||
|
|
||||||
|
# Get current price
|
||||||
|
current_price = data_provider.get_current_price('BTCUSDT')
|
||||||
|
logger.info(f"Current BTC price: ${current_price}")
|
||||||
|
|
||||||
|
# Get historical data
|
||||||
|
historical_data = data_provider.get_historical_data('BTCUSDT', '1m', limit=10)
|
||||||
|
if historical_data is not None:
|
||||||
|
logger.info(f"Historical data shape: {historical_data.shape}")
|
||||||
|
logger.info(f"Latest close price: ${historical_data['close'].iloc[-1]}")
|
||||||
|
|
||||||
|
# Get COB data
|
||||||
|
cob_data = data_provider.get_latest_cob_data('BTCUSDT')
|
||||||
|
if cob_data:
|
||||||
|
logger.info(f"Latest COB data: {cob_data}")
|
||||||
|
|
||||||
|
# Get data quality indicators
|
||||||
|
quality = data_provider.adapter.get_data_quality_indicators('BTCUSDT')
|
||||||
|
logger.info(f"Data quality score: {quality.get('quality_score', 0)}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in basic usage: {e}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
await data_provider.close()
|
||||||
|
|
||||||
|
|
||||||
|
async def demonstrate_subscription_system():
|
||||||
|
"""Demonstrate the subscription system."""
|
||||||
|
logger.info("=== COBY Subscription System ===")
|
||||||
|
|
||||||
|
data_provider = COBYDataProvider()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Set up tick subscription
|
||||||
|
tick_count = 0
|
||||||
|
|
||||||
|
def tick_callback(tick: MarketTick):
|
||||||
|
nonlocal tick_count
|
||||||
|
tick_count += 1
|
||||||
|
logger.info(f"Received tick #{tick_count}: {tick.symbol} @ ${tick.price}")
|
||||||
|
|
||||||
|
# Subscribe to ticks
|
||||||
|
subscriber_id = data_provider.subscribe_to_ticks(
|
||||||
|
tick_callback,
|
||||||
|
symbols=['BTCUSDT', 'ETHUSDT'],
|
||||||
|
subscriber_name='example_subscriber'
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Subscribed to ticks with ID: {subscriber_id}")
|
||||||
|
|
||||||
|
# Set up COB data subscription
|
||||||
|
cob_count = 0
|
||||||
|
|
||||||
|
def cob_callback(symbol: str, data: dict):
|
||||||
|
nonlocal cob_count
|
||||||
|
cob_count += 1
|
||||||
|
logger.info(f"Received COB data #{cob_count} for {symbol}")
|
||||||
|
|
||||||
|
cob_subscriber_id = data_provider.subscribe_to_cob_raw_ticks(cob_callback)
|
||||||
|
logger.info(f"Subscribed to COB data with ID: {cob_subscriber_id}")
|
||||||
|
|
||||||
|
# Wait for some data
|
||||||
|
logger.info("Waiting for data updates...")
|
||||||
|
await asyncio.sleep(10)
|
||||||
|
|
||||||
|
# Unsubscribe
|
||||||
|
data_provider.unsubscribe(subscriber_id)
|
||||||
|
data_provider.unsubscribe(cob_subscriber_id)
|
||||||
|
logger.info("Unsubscribed from all feeds")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in subscription demo: {e}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
await data_provider.close()
|
||||||
|
|
||||||
|
|
||||||
|
async def demonstrate_mode_switching():
|
||||||
|
"""Demonstrate switching between live and replay modes."""
|
||||||
|
logger.info("=== COBY Mode Switching ===")
|
||||||
|
|
||||||
|
data_provider = COBYDataProvider()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Start in live mode
|
||||||
|
current_mode = data_provider.get_current_mode()
|
||||||
|
logger.info(f"Current mode: {current_mode}")
|
||||||
|
|
||||||
|
# Get some live data
|
||||||
|
live_price = data_provider.get_current_price('BTCUSDT')
|
||||||
|
logger.info(f"Live price: ${live_price}")
|
||||||
|
|
||||||
|
# Switch to replay mode
|
||||||
|
logger.info("Switching to replay mode...")
|
||||||
|
start_time = datetime.utcnow() - timedelta(hours=1)
|
||||||
|
end_time = datetime.utcnow() - timedelta(minutes=30)
|
||||||
|
|
||||||
|
success = await data_provider.switch_to_replay_mode(
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
speed=10.0, # 10x speed
|
||||||
|
symbols=['BTCUSDT']
|
||||||
|
)
|
||||||
|
|
||||||
|
if success:
|
||||||
|
logger.info("Successfully switched to replay mode")
|
||||||
|
|
||||||
|
# Get replay status
|
||||||
|
replay_status = data_provider.get_replay_status()
|
||||||
|
if replay_status:
|
||||||
|
logger.info(f"Replay progress: {replay_status['progress']:.2%}")
|
||||||
|
logger.info(f"Replay speed: {replay_status['speed']}x")
|
||||||
|
|
||||||
|
# Wait for some replay data
|
||||||
|
await asyncio.sleep(5)
|
||||||
|
|
||||||
|
# Get data during replay
|
||||||
|
replay_price = data_provider.get_current_price('BTCUSDT')
|
||||||
|
logger.info(f"Replay price: ${replay_price}")
|
||||||
|
|
||||||
|
# Switch back to live mode
|
||||||
|
logger.info("Switching back to live mode...")
|
||||||
|
success = await data_provider.switch_to_live_mode()
|
||||||
|
|
||||||
|
if success:
|
||||||
|
logger.info("Successfully switched back to live mode")
|
||||||
|
current_mode = data_provider.get_current_mode()
|
||||||
|
logger.info(f"Current mode: {current_mode}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in mode switching demo: {e}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
await data_provider.close()
|
||||||
|
|
||||||
|
|
||||||
|
async def demonstrate_orchestrator_compatibility():
|
||||||
|
"""Demonstrate compatibility with orchestrator interface."""
|
||||||
|
logger.info("=== Orchestrator Compatibility ===")
|
||||||
|
|
||||||
|
data_provider = COBYDataProvider()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Test methods that orchestrator uses
|
||||||
|
logger.info("Testing orchestrator-compatible methods...")
|
||||||
|
|
||||||
|
# Build base data input (used by ML models)
|
||||||
|
base_data = data_provider.build_base_data_input('BTCUSDT')
|
||||||
|
if base_data:
|
||||||
|
features = base_data.get_feature_vector()
|
||||||
|
logger.info(f"Feature vector shape: {features.shape}")
|
||||||
|
|
||||||
|
# Get feature matrix (used by ML models)
|
||||||
|
feature_matrix = data_provider.get_feature_matrix(
|
||||||
|
'BTCUSDT',
|
||||||
|
timeframes=['1m', '5m'],
|
||||||
|
window_size=20
|
||||||
|
)
|
||||||
|
if feature_matrix is not None:
|
||||||
|
logger.info(f"Feature matrix shape: {feature_matrix.shape}")
|
||||||
|
|
||||||
|
# Get pivot bounds (used for normalization)
|
||||||
|
pivot_bounds = data_provider.get_pivot_bounds('BTCUSDT')
|
||||||
|
if pivot_bounds:
|
||||||
|
logger.info(f"Price range: ${pivot_bounds.price_min:.2f} - ${pivot_bounds.price_max:.2f}")
|
||||||
|
|
||||||
|
# Get COB imbalance (used for market microstructure analysis)
|
||||||
|
imbalance = data_provider.get_current_cob_imbalance('BTCUSDT')
|
||||||
|
logger.info(f"Order book imbalance: {imbalance['imbalance']:.3f}")
|
||||||
|
|
||||||
|
# Get system status
|
||||||
|
status = data_provider.get_cached_data_summary()
|
||||||
|
logger.info(f"System status: {status}")
|
||||||
|
|
||||||
|
# Test compatibility methods
|
||||||
|
data_provider.start_centralized_data_collection()
|
||||||
|
data_provider.invalidate_ohlcv_cache('BTCUSDT')
|
||||||
|
|
||||||
|
logger.info("All orchestrator compatibility tests passed!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in compatibility demo: {e}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
await data_provider.close()
|
||||||
|
|
||||||
|
|
||||||
|
async def demonstrate_performance_monitoring():
|
||||||
|
"""Demonstrate performance monitoring capabilities."""
|
||||||
|
logger.info("=== Performance Monitoring ===")
|
||||||
|
|
||||||
|
data_provider = COBYDataProvider()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get initial statistics
|
||||||
|
initial_stats = data_provider.get_subscriber_stats()
|
||||||
|
logger.info(f"Initial stats: {initial_stats}")
|
||||||
|
|
||||||
|
# Get data quality information
|
||||||
|
quality_info = data_provider.get_cob_data_quality()
|
||||||
|
logger.info(f"Data quality info: {quality_info}")
|
||||||
|
|
||||||
|
# Get WebSocket status
|
||||||
|
ws_status = data_provider.get_cob_websocket_status()
|
||||||
|
logger.info(f"WebSocket status: {ws_status}")
|
||||||
|
|
||||||
|
# Monitor system metadata
|
||||||
|
system_metadata = data_provider.adapter.get_system_metadata()
|
||||||
|
logger.info(f"System components health: {system_metadata['components']}")
|
||||||
|
logger.info(f"Active subscribers: {system_metadata['active_subscribers']}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in performance monitoring: {e}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
await data_provider.close()
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Run all demonstration examples."""
|
||||||
|
logger.info("Starting COBY Integration Examples...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Run all demonstrations
|
||||||
|
await demonstrate_basic_usage()
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
await demonstrate_subscription_system()
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
await demonstrate_mode_switching()
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
await demonstrate_orchestrator_compatibility()
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
await demonstrate_performance_monitoring()
|
||||||
|
|
||||||
|
logger.info("All COBY integration examples completed successfully!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error running examples: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Run the examples
|
||||||
|
asyncio.run(main())
|
||||||
104
COBY/health_check.py
Normal file
104
COBY/health_check.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Health check script for COBY application
|
||||||
|
Used by Docker health checks and monitoring systems
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
def check_health():
|
||||||
|
"""Perform health check on COBY application"""
|
||||||
|
try:
|
||||||
|
# Check main API endpoint
|
||||||
|
response = requests.get('http://localhost:8080/health', timeout=5)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
health_data = response.json()
|
||||||
|
|
||||||
|
# Basic health check passed
|
||||||
|
print(f"✅ API Health Check: PASSED")
|
||||||
|
print(f" Status: {health_data.get('status', 'unknown')}")
|
||||||
|
print(f" Timestamp: {health_data.get('timestamp', 'unknown')}")
|
||||||
|
|
||||||
|
# Check individual components
|
||||||
|
components = health_data.get('components', {})
|
||||||
|
all_healthy = True
|
||||||
|
|
||||||
|
for component, status in components.items():
|
||||||
|
if status.get('healthy', False):
|
||||||
|
print(f"✅ {component}: HEALTHY")
|
||||||
|
else:
|
||||||
|
print(f"❌ {component}: UNHEALTHY - {status.get('error', 'unknown error')}")
|
||||||
|
all_healthy = False
|
||||||
|
|
||||||
|
if all_healthy:
|
||||||
|
print("\n🎉 Overall Health: HEALTHY")
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
print("\n⚠️ Overall Health: DEGRADED")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
else:
|
||||||
|
print(f"❌ API Health Check: FAILED (HTTP {response.status_code})")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
print("❌ API Health Check: FAILED (Connection refused)")
|
||||||
|
return 1
|
||||||
|
except requests.exceptions.Timeout:
|
||||||
|
print("❌ API Health Check: FAILED (Timeout)")
|
||||||
|
return 1
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ API Health Check: FAILED ({str(e)})")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
def check_websocket():
|
||||||
|
"""Check WebSocket server health"""
|
||||||
|
try:
|
||||||
|
# Simple TCP connection check to WebSocket port
|
||||||
|
import socket
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
sock.settimeout(5)
|
||||||
|
result = sock.connect_ex(('localhost', 8081))
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
if result == 0:
|
||||||
|
print("✅ WebSocket Server: ACCESSIBLE")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print("❌ WebSocket Server: NOT ACCESSIBLE")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ WebSocket Server: ERROR ({str(e)})")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main health check function"""
|
||||||
|
print(f"COBY Health Check - {datetime.now().isoformat()}")
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
# Check API health
|
||||||
|
api_healthy = check_health() == 0
|
||||||
|
|
||||||
|
# Check WebSocket
|
||||||
|
ws_healthy = check_websocket()
|
||||||
|
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
if api_healthy and ws_healthy:
|
||||||
|
print("🎉 COBY System: FULLY HEALTHY")
|
||||||
|
return 0
|
||||||
|
elif api_healthy:
|
||||||
|
print("⚠️ COBY System: PARTIALLY HEALTHY (API only)")
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
print("❌ COBY System: UNHEALTHY")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
8
COBY/integration/__init__.py
Normal file
8
COBY/integration/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
"""
|
||||||
|
Integration layer for the COBY multi-exchange data aggregation system.
|
||||||
|
Provides compatibility interfaces for seamless integration with existing systems.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .orchestrator_adapter import COBYOrchestratorAdapter, MarketTick, PivotBounds
|
||||||
|
|
||||||
|
__all__ = ['COBYOrchestratorAdapter', 'MarketTick', 'PivotBounds']
|
||||||
390
COBY/integration/data_provider_replacement.py
Normal file
390
COBY/integration/data_provider_replacement.py
Normal file
@@ -0,0 +1,390 @@
|
|||||||
|
"""
|
||||||
|
Drop-in replacement for the existing DataProvider class using COBY system.
|
||||||
|
Provides full compatibility with the orchestrator interface.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Dict, List, Optional, Any, Callable, Union
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from .orchestrator_adapter import COBYOrchestratorAdapter, MarketTick, PivotBounds
|
||||||
|
from ..config import Config
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class COBYDataProvider:
|
||||||
|
"""
|
||||||
|
Drop-in replacement for DataProvider using COBY system.
|
||||||
|
|
||||||
|
Provides full compatibility with existing orchestrator interface while
|
||||||
|
leveraging COBY's multi-exchange data aggregation capabilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config_path: Optional[str] = None):
|
||||||
|
"""
|
||||||
|
Initialize COBY data provider.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_path: Optional path to configuration file
|
||||||
|
"""
|
||||||
|
# Initialize COBY configuration
|
||||||
|
self.config = Config()
|
||||||
|
|
||||||
|
# Initialize COBY adapter
|
||||||
|
self.adapter = COBYOrchestratorAdapter(self.config)
|
||||||
|
|
||||||
|
# Initialize adapter components
|
||||||
|
asyncio.run(self.adapter._initialize_components())
|
||||||
|
|
||||||
|
# Compatibility attributes
|
||||||
|
self.symbols = self.config.exchanges.symbols
|
||||||
|
self.exchanges = self.config.exchanges.exchanges
|
||||||
|
|
||||||
|
logger.info("COBY data provider initialized")
|
||||||
|
|
||||||
|
# === CORE DATA METHODS ===
|
||||||
|
|
||||||
|
def get_historical_data(self, symbol: str, timeframe: str, limit: int = 1000,
|
||||||
|
refresh: bool = False) -> Optional[pd.DataFrame]:
|
||||||
|
"""Get historical OHLCV data."""
|
||||||
|
return self.adapter.get_historical_data(symbol, timeframe, limit, refresh)
|
||||||
|
|
||||||
|
def get_current_price(self, symbol: str) -> Optional[float]:
|
||||||
|
"""Get current price for a symbol."""
|
||||||
|
return self.adapter.get_current_price(symbol)
|
||||||
|
|
||||||
|
def get_live_price_from_api(self, symbol: str) -> Optional[float]:
|
||||||
|
"""Get live price from API (low-latency method)."""
|
||||||
|
return self.adapter.get_live_price_from_api(symbol)
|
||||||
|
|
||||||
|
def build_base_data_input(self, symbol: str) -> Optional[Any]:
|
||||||
|
"""Build base data input for ML models."""
|
||||||
|
return self.adapter.build_base_data_input(symbol)
|
||||||
|
|
||||||
|
# === COB DATA METHODS ===
|
||||||
|
|
||||||
|
def get_cob_raw_ticks(self, symbol: str, count: int = 1000) -> List[Dict]:
|
||||||
|
"""Get raw COB ticks for a symbol."""
|
||||||
|
return self.adapter.get_cob_raw_ticks(symbol, count)
|
||||||
|
|
||||||
|
def get_cob_1s_aggregated(self, symbol: str, count: int = 300) -> List[Dict]:
|
||||||
|
"""Get 1s aggregated COB data with $1 price buckets."""
|
||||||
|
return self.adapter.get_cob_1s_aggregated(symbol, count)
|
||||||
|
|
||||||
|
def get_latest_cob_data(self, symbol: str) -> Optional[Dict]:
|
||||||
|
"""Get latest COB raw tick for a symbol."""
|
||||||
|
return self.adapter.get_latest_cob_data(symbol)
|
||||||
|
|
||||||
|
def get_latest_cob_aggregated(self, symbol: str) -> Optional[Dict]:
|
||||||
|
"""Get latest 1s aggregated COB data for a symbol."""
|
||||||
|
return self.adapter.get_latest_cob_aggregated(symbol)
|
||||||
|
|
||||||
|
def get_current_cob_imbalance(self, symbol: str) -> Dict[str, float]:
|
||||||
|
"""Get current COB imbalance metrics for a symbol."""
|
||||||
|
try:
|
||||||
|
latest_data = self.get_latest_cob_data(symbol)
|
||||||
|
if not latest_data:
|
||||||
|
return {'bid_volume': 0.0, 'ask_volume': 0.0, 'imbalance': 0.0}
|
||||||
|
|
||||||
|
bid_volume = latest_data.get('bid_volume', 0.0)
|
||||||
|
ask_volume = latest_data.get('ask_volume', 0.0)
|
||||||
|
total_volume = bid_volume + ask_volume
|
||||||
|
|
||||||
|
imbalance = 0.0
|
||||||
|
if total_volume > 0:
|
||||||
|
imbalance = (bid_volume - ask_volume) / total_volume
|
||||||
|
|
||||||
|
return {
|
||||||
|
'bid_volume': bid_volume,
|
||||||
|
'ask_volume': ask_volume,
|
||||||
|
'imbalance': imbalance
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting COB imbalance for {symbol}: {e}")
|
||||||
|
return {'bid_volume': 0.0, 'ask_volume': 0.0, 'imbalance': 0.0}
|
||||||
|
|
||||||
|
def get_cob_price_buckets(self, symbol: str, timeframe_seconds: int = 60) -> Dict:
|
||||||
|
"""Get price bucket analysis for a timeframe."""
|
||||||
|
try:
|
||||||
|
# Get aggregated data for the timeframe
|
||||||
|
count = timeframe_seconds # 1 second per data point
|
||||||
|
aggregated_data = self.get_cob_1s_aggregated(symbol, count)
|
||||||
|
|
||||||
|
if not aggregated_data:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Combine all buckets
|
||||||
|
combined_bid_buckets = {}
|
||||||
|
combined_ask_buckets = {}
|
||||||
|
|
||||||
|
for data_point in aggregated_data:
|
||||||
|
for price, volume in data_point.get('bid_buckets', {}).items():
|
||||||
|
combined_bid_buckets[price] = combined_bid_buckets.get(price, 0) + volume
|
||||||
|
|
||||||
|
for price, volume in data_point.get('ask_buckets', {}).items():
|
||||||
|
combined_ask_buckets[price] = combined_ask_buckets.get(price, 0) + volume
|
||||||
|
|
||||||
|
return {
|
||||||
|
'symbol': symbol,
|
||||||
|
'timeframe_seconds': timeframe_seconds,
|
||||||
|
'bid_buckets': combined_bid_buckets,
|
||||||
|
'ask_buckets': combined_ask_buckets,
|
||||||
|
'timestamp': datetime.utcnow().isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting price buckets for {symbol}: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def get_cob_websocket_status(self) -> Dict[str, Any]:
|
||||||
|
"""Get COB WebSocket status."""
|
||||||
|
try:
|
||||||
|
system_metadata = self.adapter.get_system_metadata()
|
||||||
|
connectors = system_metadata.get('components', {}).get('connectors', {})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'connected': any(connectors.values()),
|
||||||
|
'exchanges': connectors,
|
||||||
|
'last_update': datetime.utcnow().isoformat(),
|
||||||
|
'mode': system_metadata.get('mode', 'unknown')
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting WebSocket status: {e}")
|
||||||
|
return {'connected': False, 'error': str(e)}
|
||||||
|
|
||||||
|
# === SUBSCRIPTION METHODS ===
|
||||||
|
|
||||||
|
def subscribe_to_ticks(self, callback: Callable[[MarketTick], None],
|
||||||
|
symbols: List[str] = None,
|
||||||
|
subscriber_name: str = None) -> str:
|
||||||
|
"""Subscribe to tick data updates."""
|
||||||
|
return self.adapter.subscribe_to_ticks(callback, symbols, subscriber_name)
|
||||||
|
|
||||||
|
def subscribe_to_cob_raw_ticks(self, callback: Callable[[str, Dict], None]) -> str:
|
||||||
|
"""Subscribe to raw COB tick updates."""
|
||||||
|
return self.adapter.subscribe_to_cob_raw_ticks(callback)
|
||||||
|
|
||||||
|
def subscribe_to_cob_aggregated(self, callback: Callable[[str, Dict], None]) -> str:
|
||||||
|
"""Subscribe to 1s aggregated COB updates."""
|
||||||
|
return self.adapter.subscribe_to_cob_aggregated(callback)
|
||||||
|
|
||||||
|
def subscribe_to_training_data(self, callback: Callable[[str, dict], None]) -> str:
|
||||||
|
"""Subscribe to training data updates."""
|
||||||
|
return self.adapter.subscribe_to_training_data(callback)
|
||||||
|
|
||||||
|
def subscribe_to_model_predictions(self, callback: Callable[[str, dict], None]) -> str:
|
||||||
|
"""Subscribe to model prediction updates."""
|
||||||
|
return self.adapter.subscribe_to_model_predictions(callback)
|
||||||
|
|
||||||
|
def unsubscribe(self, subscriber_id: str) -> bool:
|
||||||
|
"""Unsubscribe from data feeds."""
|
||||||
|
return self.adapter.unsubscribe(subscriber_id)
|
||||||
|
|
||||||
|
# === MODE SWITCHING ===
|
||||||
|
|
||||||
|
async def switch_to_live_mode(self) -> bool:
|
||||||
|
"""Switch to live data mode."""
|
||||||
|
return await self.adapter.switch_to_live_mode()
|
||||||
|
|
||||||
|
async def switch_to_replay_mode(self, start_time: datetime, end_time: datetime,
|
||||||
|
speed: float = 1.0, symbols: List[str] = None) -> bool:
|
||||||
|
"""Switch to replay data mode."""
|
||||||
|
return await self.adapter.switch_to_replay_mode(start_time, end_time, speed, symbols)
|
||||||
|
|
||||||
|
def get_current_mode(self) -> str:
|
||||||
|
"""Get current data mode."""
|
||||||
|
return self.adapter.get_current_mode()
|
||||||
|
|
||||||
|
def get_replay_status(self) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get replay session status."""
|
||||||
|
return self.adapter.get_replay_status()
|
||||||
|
|
||||||
|
# === COMPATIBILITY METHODS ===
|
||||||
|
|
||||||
|
def start_centralized_data_collection(self) -> None:
|
||||||
|
"""Start centralized data collection."""
|
||||||
|
self.adapter.start_centralized_data_collection()
|
||||||
|
|
||||||
|
def start_training_data_collection(self) -> None:
|
||||||
|
"""Start training data collection."""
|
||||||
|
self.adapter.start_training_data_collection()
|
||||||
|
|
||||||
|
def invalidate_ohlcv_cache(self, symbol: str) -> None:
|
||||||
|
"""Invalidate OHLCV cache for a symbol."""
|
||||||
|
self.adapter.invalidate_ohlcv_cache(symbol)
|
||||||
|
|
||||||
|
def get_latest_candles(self, symbol: str, timeframe: str, limit: int = 100) -> pd.DataFrame:
|
||||||
|
"""Get the latest candles from cached data."""
|
||||||
|
return self.get_historical_data(symbol, timeframe, limit) or pd.DataFrame()
|
||||||
|
|
||||||
|
def get_price_at_index(self, symbol: str, index: int, timeframe: str = '1m') -> Optional[float]:
|
||||||
|
"""Get price at specific index for backtesting."""
|
||||||
|
try:
|
||||||
|
df = self.get_historical_data(symbol, timeframe, limit=index + 10)
|
||||||
|
if df is not None and len(df) > index:
|
||||||
|
return float(df.iloc[-(index + 1)]['close'])
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting price at index {index} for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# === PIVOT AND MARKET STRUCTURE (MOCK IMPLEMENTATIONS) ===
|
||||||
|
|
||||||
|
def get_pivot_bounds(self, symbol: str) -> Optional[PivotBounds]:
|
||||||
|
"""Get pivot bounds for a symbol (mock implementation)."""
|
||||||
|
try:
|
||||||
|
# Get recent price data
|
||||||
|
df = self.get_historical_data(symbol, '1m', limit=1000)
|
||||||
|
if df is None or df.empty:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Calculate basic pivot levels
|
||||||
|
high_prices = df['high'].values
|
||||||
|
low_prices = df['low'].values
|
||||||
|
volumes = df['volume'].values
|
||||||
|
|
||||||
|
price_max = float(np.max(high_prices))
|
||||||
|
price_min = float(np.min(low_prices))
|
||||||
|
volume_max = float(np.max(volumes))
|
||||||
|
volume_min = float(np.min(volumes))
|
||||||
|
|
||||||
|
# Simple support/resistance calculation
|
||||||
|
price_range = price_max - price_min
|
||||||
|
support_levels = [price_min + i * price_range / 10 for i in range(1, 5)]
|
||||||
|
resistance_levels = [price_max - i * price_range / 10 for i in range(1, 5)]
|
||||||
|
|
||||||
|
return PivotBounds(
|
||||||
|
symbol=symbol,
|
||||||
|
price_max=price_max,
|
||||||
|
price_min=price_min,
|
||||||
|
volume_max=volume_max,
|
||||||
|
volume_min=volume_min,
|
||||||
|
pivot_support_levels=support_levels,
|
||||||
|
pivot_resistance_levels=resistance_levels,
|
||||||
|
pivot_context={'method': 'simple'},
|
||||||
|
created_timestamp=datetime.utcnow(),
|
||||||
|
data_period_start=df.index[0].to_pydatetime(),
|
||||||
|
data_period_end=df.index[-1].to_pydatetime(),
|
||||||
|
total_candles_analyzed=len(df)
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting pivot bounds for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_pivot_normalized_features(self, symbol: str, df: pd.DataFrame) -> Optional[pd.DataFrame]:
|
||||||
|
"""Get dataframe with pivot-normalized features."""
|
||||||
|
try:
|
||||||
|
pivot_bounds = self.get_pivot_bounds(symbol)
|
||||||
|
if not pivot_bounds:
|
||||||
|
return df
|
||||||
|
|
||||||
|
# Add normalized features
|
||||||
|
df_copy = df.copy()
|
||||||
|
price_range = pivot_bounds.get_price_range()
|
||||||
|
|
||||||
|
if price_range > 0:
|
||||||
|
df_copy['normalized_close'] = (df_copy['close'] - pivot_bounds.price_min) / price_range
|
||||||
|
df_copy['normalized_high'] = (df_copy['high'] - pivot_bounds.price_min) / price_range
|
||||||
|
df_copy['normalized_low'] = (df_copy['low'] - pivot_bounds.price_min) / price_range
|
||||||
|
|
||||||
|
return df_copy
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting pivot normalized features for {symbol}: {e}")
|
||||||
|
return df
|
||||||
|
|
||||||
|
# === FEATURE EXTRACTION METHODS ===
|
||||||
|
|
||||||
|
def get_feature_matrix(self, symbol: str, timeframes: List[str] = None,
|
||||||
|
window_size: int = 20) -> Optional[np.ndarray]:
|
||||||
|
"""Get feature matrix for ML models."""
|
||||||
|
try:
|
||||||
|
if not timeframes:
|
||||||
|
timeframes = ['1m', '5m', '15m']
|
||||||
|
|
||||||
|
features = []
|
||||||
|
|
||||||
|
for timeframe in timeframes:
|
||||||
|
df = self.get_historical_data(symbol, timeframe, limit=window_size + 10)
|
||||||
|
if df is not None and len(df) >= window_size:
|
||||||
|
# Extract basic features
|
||||||
|
closes = df['close'].values[-window_size:]
|
||||||
|
volumes = df['volume'].values[-window_size:]
|
||||||
|
|
||||||
|
# Normalize features
|
||||||
|
close_mean = np.mean(closes)
|
||||||
|
close_std = np.std(closes) + 1e-8
|
||||||
|
normalized_closes = (closes - close_mean) / close_std
|
||||||
|
|
||||||
|
volume_mean = np.mean(volumes)
|
||||||
|
volume_std = np.std(volumes) + 1e-8
|
||||||
|
normalized_volumes = (volumes - volume_mean) / volume_std
|
||||||
|
|
||||||
|
features.extend(normalized_closes)
|
||||||
|
features.extend(normalized_volumes)
|
||||||
|
|
||||||
|
if features:
|
||||||
|
return np.array(features, dtype=np.float32)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting feature matrix for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# === SYSTEM STATUS AND STATISTICS ===
|
||||||
|
|
||||||
|
def get_cached_data_summary(self) -> Dict[str, Any]:
|
||||||
|
"""Get summary of cached data."""
|
||||||
|
try:
|
||||||
|
system_metadata = self.adapter.get_system_metadata()
|
||||||
|
return {
|
||||||
|
'system': 'COBY',
|
||||||
|
'mode': system_metadata.get('mode'),
|
||||||
|
'statistics': system_metadata.get('statistics', {}),
|
||||||
|
'components_healthy': system_metadata.get('components', {}),
|
||||||
|
'active_subscribers': system_metadata.get('active_subscribers', 0)
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting cached data summary: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
def get_cob_data_quality(self) -> Dict[str, Any]:
|
||||||
|
"""Get COB data quality information."""
|
||||||
|
try:
|
||||||
|
quality_info = {}
|
||||||
|
|
||||||
|
for symbol in self.symbols:
|
||||||
|
quality_info[symbol] = self.adapter.get_data_quality_indicators(symbol)
|
||||||
|
|
||||||
|
return quality_info
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting COB data quality: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
def get_subscriber_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get subscriber statistics."""
|
||||||
|
return self.adapter.get_stats()
|
||||||
|
|
||||||
|
# === CLEANUP ===
|
||||||
|
|
||||||
|
async def close(self) -> None:
|
||||||
|
"""Close all connections and cleanup."""
|
||||||
|
await self.adapter.close()
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
"""Cleanup on deletion."""
|
||||||
|
try:
|
||||||
|
asyncio.run(self.close())
|
||||||
|
except:
|
||||||
|
pass
|
||||||
888
COBY/integration/orchestrator_adapter.py
Normal file
888
COBY/integration/orchestrator_adapter.py
Normal file
@@ -0,0 +1,888 @@
|
|||||||
|
"""
|
||||||
|
Orchestrator integration adapter for COBY system.
|
||||||
|
Provides compatibility layer for seamless integration with existing orchestrator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Dict, List, Optional, Any, Callable, Union
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
import uuid
|
||||||
|
from collections import deque
|
||||||
|
import threading
|
||||||
|
|
||||||
|
from ..storage.storage_manager import StorageManager
|
||||||
|
from ..replay.replay_manager import HistoricalReplayManager
|
||||||
|
from ..caching.redis_manager import RedisManager
|
||||||
|
from ..aggregation.aggregation_engine import StandardAggregationEngine
|
||||||
|
from ..processing.data_processor import StandardDataProcessor
|
||||||
|
from ..connectors.binance_connector import BinanceConnector
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, HeatmapData, ReplayStatus
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.exceptions import IntegrationError, ValidationError
|
||||||
|
from ..config import Config
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MarketTick:
|
||||||
|
"""Market tick data structure compatible with orchestrator"""
|
||||||
|
symbol: str
|
||||||
|
price: float
|
||||||
|
volume: float
|
||||||
|
timestamp: datetime
|
||||||
|
side: str = "unknown"
|
||||||
|
exchange: str = "binance"
|
||||||
|
subscriber_name: str = "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PivotBounds:
|
||||||
|
"""Pivot bounds structure compatible with orchestrator"""
|
||||||
|
symbol: str
|
||||||
|
price_max: float
|
||||||
|
price_min: float
|
||||||
|
volume_max: float
|
||||||
|
volume_min: float
|
||||||
|
pivot_support_levels: List[float]
|
||||||
|
pivot_resistance_levels: List[float]
|
||||||
|
pivot_context: Dict[str, Any]
|
||||||
|
created_timestamp: datetime
|
||||||
|
data_period_start: datetime
|
||||||
|
data_period_end: datetime
|
||||||
|
total_candles_analyzed: int
|
||||||
|
|
||||||
|
def get_price_range(self) -> float:
|
||||||
|
return self.price_max - self.price_min
|
||||||
|
|
||||||
|
def normalize_price(self, price: float) -> float:
|
||||||
|
return (price - self.price_min) / self.get_price_range()
|
||||||
|
|
||||||
|
|
||||||
|
class COBYOrchestratorAdapter:
|
||||||
|
"""
|
||||||
|
Adapter that makes COBY system compatible with existing orchestrator interface.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- Data provider interface compatibility
|
||||||
|
- Live/replay mode switching
|
||||||
|
- Data quality indicators
|
||||||
|
- Subscription management
|
||||||
|
- Caching and performance optimization
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config: Config):
|
||||||
|
"""
|
||||||
|
Initialize orchestrator adapter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: COBY system configuration
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
# Core components
|
||||||
|
self.storage_manager = StorageManager(config)
|
||||||
|
self.replay_manager = HistoricalReplayManager(self.storage_manager, config)
|
||||||
|
self.redis_manager = RedisManager()
|
||||||
|
self.aggregation_engine = StandardAggregationEngine()
|
||||||
|
self.data_processor = StandardDataProcessor()
|
||||||
|
|
||||||
|
# Exchange connectors
|
||||||
|
self.connectors = {
|
||||||
|
'binance': BinanceConnector()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mode management
|
||||||
|
self.mode = 'live' # 'live' or 'replay'
|
||||||
|
self.current_replay_session = None
|
||||||
|
|
||||||
|
# Subscription management
|
||||||
|
self.subscribers = {
|
||||||
|
'ticks': {},
|
||||||
|
'cob_raw': {},
|
||||||
|
'cob_aggregated': {},
|
||||||
|
'training_data': {},
|
||||||
|
'model_predictions': {}
|
||||||
|
}
|
||||||
|
self.subscriber_lock = threading.Lock()
|
||||||
|
|
||||||
|
# Data caching
|
||||||
|
self.tick_cache = {}
|
||||||
|
self.orderbook_cache = {}
|
||||||
|
self.price_cache = {}
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.stats = {
|
||||||
|
'ticks_processed': 0,
|
||||||
|
'orderbooks_processed': 0,
|
||||||
|
'subscribers_active': 0,
|
||||||
|
'cache_hits': 0,
|
||||||
|
'cache_misses': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize components
|
||||||
|
self._initialize_components()
|
||||||
|
|
||||||
|
logger.info("COBY orchestrator adapter initialized")
|
||||||
|
|
||||||
|
async def _initialize_components(self):
|
||||||
|
"""Initialize all COBY components."""
|
||||||
|
try:
|
||||||
|
# Initialize storage
|
||||||
|
await self.storage_manager.initialize()
|
||||||
|
|
||||||
|
# Initialize Redis cache
|
||||||
|
await self.redis_manager.initialize()
|
||||||
|
|
||||||
|
# Initialize connectors
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
await connector.connect()
|
||||||
|
connector.add_data_callback(self._handle_connector_data)
|
||||||
|
|
||||||
|
logger.info("COBY components initialized successfully")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize COBY components: {e}")
|
||||||
|
raise IntegrationError(f"Component initialization failed: {e}")
|
||||||
|
|
||||||
|
# === ORCHESTRATOR COMPATIBILITY METHODS ===
|
||||||
|
|
||||||
|
def get_historical_data(self, symbol: str, timeframe: str, limit: int = 1000,
|
||||||
|
refresh: bool = False) -> Optional[pd.DataFrame]:
|
||||||
|
"""Get historical OHLCV data compatible with orchestrator interface."""
|
||||||
|
try:
|
||||||
|
set_correlation_id()
|
||||||
|
|
||||||
|
# Convert timeframe to minutes
|
||||||
|
timeframe_minutes = self._parse_timeframe(timeframe)
|
||||||
|
if not timeframe_minutes:
|
||||||
|
logger.warning(f"Unsupported timeframe: {timeframe}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Calculate time range
|
||||||
|
end_time = datetime.utcnow()
|
||||||
|
start_time = end_time - timedelta(minutes=timeframe_minutes * limit)
|
||||||
|
|
||||||
|
# Get data from storage
|
||||||
|
if self.mode == 'replay' and self.current_replay_session:
|
||||||
|
# Use replay data
|
||||||
|
data = asyncio.run(self.storage_manager.get_historical_data(
|
||||||
|
symbol, start_time, end_time, 'ohlcv'
|
||||||
|
))
|
||||||
|
else:
|
||||||
|
# Use live data from cache or storage
|
||||||
|
cache_key = f"ohlcv:{symbol}:{timeframe}:{limit}"
|
||||||
|
cached_data = asyncio.run(self.redis_manager.get(cache_key))
|
||||||
|
|
||||||
|
if cached_data and not refresh:
|
||||||
|
self.stats['cache_hits'] += 1
|
||||||
|
return pd.DataFrame(cached_data)
|
||||||
|
|
||||||
|
self.stats['cache_misses'] += 1
|
||||||
|
data = asyncio.run(self.storage_manager.get_historical_data(
|
||||||
|
symbol, start_time, end_time, 'ohlcv'
|
||||||
|
))
|
||||||
|
|
||||||
|
# Cache the result
|
||||||
|
if data:
|
||||||
|
asyncio.run(self.redis_manager.set(cache_key, data, ttl=60))
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Convert to DataFrame compatible with orchestrator
|
||||||
|
df = pd.DataFrame(data)
|
||||||
|
if not df.empty:
|
||||||
|
df['timestamp'] = pd.to_datetime(df['timestamp'])
|
||||||
|
df.set_index('timestamp', inplace=True)
|
||||||
|
df = df.sort_index()
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting historical data for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_current_price(self, symbol: str) -> Optional[float]:
|
||||||
|
"""Get current price for a symbol."""
|
||||||
|
try:
|
||||||
|
# Check cache first
|
||||||
|
if symbol in self.price_cache:
|
||||||
|
cached_price, timestamp = self.price_cache[symbol]
|
||||||
|
if (datetime.utcnow() - timestamp).seconds < 5: # 5 second cache
|
||||||
|
return cached_price
|
||||||
|
|
||||||
|
# Get latest orderbook
|
||||||
|
latest_orderbook = asyncio.run(
|
||||||
|
self.storage_manager.get_latest_orderbook(symbol)
|
||||||
|
)
|
||||||
|
|
||||||
|
if latest_orderbook and latest_orderbook.get('mid_price'):
|
||||||
|
price = float(latest_orderbook['mid_price'])
|
||||||
|
self.price_cache[symbol] = (price, datetime.utcnow())
|
||||||
|
return price
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting current price for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_live_price_from_api(self, symbol: str) -> Optional[float]:
|
||||||
|
"""Get live price from API (low-latency method)."""
|
||||||
|
return self.get_current_price(symbol)
|
||||||
|
|
||||||
|
def build_base_data_input(self, symbol: str) -> Optional[Any]:
|
||||||
|
"""Build base data input compatible with orchestrator models."""
|
||||||
|
try:
|
||||||
|
# This would need to be implemented based on the specific
|
||||||
|
# BaseDataInput class used by the orchestrator
|
||||||
|
# For now, return a mock object that provides the interface
|
||||||
|
|
||||||
|
class MockBaseDataInput:
|
||||||
|
def __init__(self, symbol: str, adapter):
|
||||||
|
self.symbol = symbol
|
||||||
|
self.adapter = adapter
|
||||||
|
|
||||||
|
def get_feature_vector(self) -> np.ndarray:
|
||||||
|
# Return feature vector from COBY data
|
||||||
|
return self.adapter._get_feature_vector(self.symbol)
|
||||||
|
|
||||||
|
return MockBaseDataInput(symbol, self)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error building base data input for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_feature_vector(self, symbol: str) -> np.ndarray:
|
||||||
|
"""Get feature vector for ML models."""
|
||||||
|
try:
|
||||||
|
# Get latest market data
|
||||||
|
latest_orderbook = asyncio.run(
|
||||||
|
self.storage_manager.get_latest_orderbook(symbol)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not latest_orderbook:
|
||||||
|
return np.zeros(100, dtype=np.float32) # Default size
|
||||||
|
|
||||||
|
# Extract features from orderbook
|
||||||
|
features = []
|
||||||
|
|
||||||
|
# Price features
|
||||||
|
if latest_orderbook.get('mid_price'):
|
||||||
|
features.append(float(latest_orderbook['mid_price']))
|
||||||
|
if latest_orderbook.get('spread'):
|
||||||
|
features.append(float(latest_orderbook['spread']))
|
||||||
|
|
||||||
|
# Volume features
|
||||||
|
if latest_orderbook.get('bid_volume'):
|
||||||
|
features.append(float(latest_orderbook['bid_volume']))
|
||||||
|
if latest_orderbook.get('ask_volume'):
|
||||||
|
features.append(float(latest_orderbook['ask_volume']))
|
||||||
|
|
||||||
|
# Pad or truncate to expected size
|
||||||
|
target_size = 100
|
||||||
|
if len(features) < target_size:
|
||||||
|
features.extend([0.0] * (target_size - len(features)))
|
||||||
|
elif len(features) > target_size:
|
||||||
|
features = features[:target_size]
|
||||||
|
|
||||||
|
return np.array(features, dtype=np.float32)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting feature vector for {symbol}: {e}")
|
||||||
|
return np.zeros(100, dtype=np.float32)
|
||||||
|
|
||||||
|
# === COB DATA METHODS ===
|
||||||
|
|
||||||
|
def get_cob_raw_ticks(self, symbol: str, count: int = 1000) -> List[Dict]:
|
||||||
|
"""Get raw COB ticks for a symbol."""
|
||||||
|
try:
|
||||||
|
# Get recent orderbook snapshots
|
||||||
|
end_time = datetime.utcnow()
|
||||||
|
start_time = end_time - timedelta(minutes=15) # 15 minutes of data
|
||||||
|
|
||||||
|
data = asyncio.run(self.storage_manager.get_historical_data(
|
||||||
|
symbol, start_time, end_time, 'orderbook'
|
||||||
|
))
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Convert to COB tick format
|
||||||
|
ticks = []
|
||||||
|
for item in data[-count:]:
|
||||||
|
tick = {
|
||||||
|
'symbol': item['symbol'],
|
||||||
|
'timestamp': item['timestamp'].isoformat(),
|
||||||
|
'mid_price': item.get('mid_price'),
|
||||||
|
'spread': item.get('spread'),
|
||||||
|
'bid_volume': item.get('bid_volume'),
|
||||||
|
'ask_volume': item.get('ask_volume'),
|
||||||
|
'exchange': item['exchange']
|
||||||
|
}
|
||||||
|
ticks.append(tick)
|
||||||
|
|
||||||
|
return ticks
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting COB raw ticks for {symbol}: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_cob_1s_aggregated(self, symbol: str, count: int = 300) -> List[Dict]:
|
||||||
|
"""Get 1s aggregated COB data with $1 price buckets."""
|
||||||
|
try:
|
||||||
|
# Get heatmap data
|
||||||
|
bucket_size = self.config.aggregation.bucket_size
|
||||||
|
start_time = datetime.utcnow() - timedelta(seconds=count)
|
||||||
|
|
||||||
|
heatmap_data = asyncio.run(
|
||||||
|
self.storage_manager.get_heatmap_data(symbol, bucket_size, start_time)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not heatmap_data:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Group by timestamp and aggregate
|
||||||
|
aggregated = {}
|
||||||
|
for item in heatmap_data:
|
||||||
|
timestamp = item['timestamp']
|
||||||
|
if timestamp not in aggregated:
|
||||||
|
aggregated[timestamp] = {
|
||||||
|
'timestamp': timestamp.isoformat(),
|
||||||
|
'symbol': symbol,
|
||||||
|
'bid_buckets': {},
|
||||||
|
'ask_buckets': {},
|
||||||
|
'total_bid_volume': 0,
|
||||||
|
'total_ask_volume': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
price_bucket = float(item['price_bucket'])
|
||||||
|
volume = float(item['volume'])
|
||||||
|
side = item['side']
|
||||||
|
|
||||||
|
if side == 'bid':
|
||||||
|
aggregated[timestamp]['bid_buckets'][price_bucket] = volume
|
||||||
|
aggregated[timestamp]['total_bid_volume'] += volume
|
||||||
|
else:
|
||||||
|
aggregated[timestamp]['ask_buckets'][price_bucket] = volume
|
||||||
|
aggregated[timestamp]['total_ask_volume'] += volume
|
||||||
|
|
||||||
|
# Return sorted by timestamp
|
||||||
|
result = list(aggregated.values())
|
||||||
|
result.sort(key=lambda x: x['timestamp'])
|
||||||
|
return result[-count:]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting COB 1s aggregated data for {symbol}: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_latest_cob_data(self, symbol: str) -> Optional[Dict]:
|
||||||
|
"""Get latest COB raw tick for a symbol."""
|
||||||
|
try:
|
||||||
|
latest_orderbook = asyncio.run(
|
||||||
|
self.storage_manager.get_latest_orderbook(symbol)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not latest_orderbook:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return {
|
||||||
|
'symbol': symbol,
|
||||||
|
'timestamp': latest_orderbook['timestamp'].isoformat(),
|
||||||
|
'mid_price': latest_orderbook.get('mid_price'),
|
||||||
|
'spread': latest_orderbook.get('spread'),
|
||||||
|
'bid_volume': latest_orderbook.get('bid_volume'),
|
||||||
|
'ask_volume': latest_orderbook.get('ask_volume'),
|
||||||
|
'exchange': latest_orderbook['exchange']
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting latest COB data for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_latest_cob_aggregated(self, symbol: str) -> Optional[Dict]:
|
||||||
|
"""Get latest 1s aggregated COB data for a symbol."""
|
||||||
|
try:
|
||||||
|
aggregated_data = self.get_cob_1s_aggregated(symbol, count=1)
|
||||||
|
return aggregated_data[0] if aggregated_data else None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting latest COB aggregated data for {symbol}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# === SUBSCRIPTION METHODS ===
|
||||||
|
|
||||||
|
def subscribe_to_ticks(self, callback: Callable[[MarketTick], None],
|
||||||
|
symbols: List[str] = None,
|
||||||
|
subscriber_name: str = None) -> str:
|
||||||
|
"""Subscribe to tick data updates."""
|
||||||
|
try:
|
||||||
|
subscriber_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
with self.subscriber_lock:
|
||||||
|
self.subscribers['ticks'][subscriber_id] = {
|
||||||
|
'callback': callback,
|
||||||
|
'symbols': symbols or [],
|
||||||
|
'subscriber_name': subscriber_name or 'unknown',
|
||||||
|
'created_at': datetime.utcnow()
|
||||||
|
}
|
||||||
|
self.stats['subscribers_active'] += 1
|
||||||
|
|
||||||
|
logger.info(f"Added tick subscriber {subscriber_id} for {subscriber_name}")
|
||||||
|
return subscriber_id
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error adding tick subscriber: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def subscribe_to_cob_raw_ticks(self, callback: Callable[[str, Dict], None]) -> str:
|
||||||
|
"""Subscribe to raw COB tick updates."""
|
||||||
|
try:
|
||||||
|
subscriber_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
with self.subscriber_lock:
|
||||||
|
self.subscribers['cob_raw'][subscriber_id] = {
|
||||||
|
'callback': callback,
|
||||||
|
'created_at': datetime.utcnow()
|
||||||
|
}
|
||||||
|
self.stats['subscribers_active'] += 1
|
||||||
|
|
||||||
|
logger.info(f"Added COB raw tick subscriber {subscriber_id}")
|
||||||
|
return subscriber_id
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error adding COB raw tick subscriber: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def subscribe_to_cob_aggregated(self, callback: Callable[[str, Dict], None]) -> str:
|
||||||
|
"""Subscribe to 1s aggregated COB updates."""
|
||||||
|
try:
|
||||||
|
subscriber_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
with self.subscriber_lock:
|
||||||
|
self.subscribers['cob_aggregated'][subscriber_id] = {
|
||||||
|
'callback': callback,
|
||||||
|
'created_at': datetime.utcnow()
|
||||||
|
}
|
||||||
|
self.stats['subscribers_active'] += 1
|
||||||
|
|
||||||
|
logger.info(f"Added COB aggregated subscriber {subscriber_id}")
|
||||||
|
return subscriber_id
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error adding COB aggregated subscriber: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def subscribe_to_training_data(self, callback: Callable[[str, dict], None]) -> str:
|
||||||
|
"""Subscribe to training data updates."""
|
||||||
|
try:
|
||||||
|
subscriber_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
with self.subscriber_lock:
|
||||||
|
self.subscribers['training_data'][subscriber_id] = {
|
||||||
|
'callback': callback,
|
||||||
|
'created_at': datetime.utcnow()
|
||||||
|
}
|
||||||
|
self.stats['subscribers_active'] += 1
|
||||||
|
|
||||||
|
logger.info(f"Added training data subscriber {subscriber_id}")
|
||||||
|
return subscriber_id
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error adding training data subscriber: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def subscribe_to_model_predictions(self, callback: Callable[[str, dict], None]) -> str:
|
||||||
|
"""Subscribe to model prediction updates."""
|
||||||
|
try:
|
||||||
|
subscriber_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
with self.subscriber_lock:
|
||||||
|
self.subscribers['model_predictions'][subscriber_id] = {
|
||||||
|
'callback': callback,
|
||||||
|
'created_at': datetime.utcnow()
|
||||||
|
}
|
||||||
|
self.stats['subscribers_active'] += 1
|
||||||
|
|
||||||
|
logger.info(f"Added model prediction subscriber {subscriber_id}")
|
||||||
|
return subscriber_id
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error adding model prediction subscriber: {e}")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def unsubscribe(self, subscriber_id: str) -> bool:
|
||||||
|
"""Unsubscribe from all data feeds."""
|
||||||
|
try:
|
||||||
|
with self.subscriber_lock:
|
||||||
|
removed = False
|
||||||
|
for category in self.subscribers:
|
||||||
|
if subscriber_id in self.subscribers[category]:
|
||||||
|
del self.subscribers[category][subscriber_id]
|
||||||
|
self.stats['subscribers_active'] -= 1
|
||||||
|
removed = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if removed:
|
||||||
|
logger.info(f"Removed subscriber {subscriber_id}")
|
||||||
|
|
||||||
|
return removed
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error removing subscriber {subscriber_id}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# === MODE SWITCHING ===
|
||||||
|
|
||||||
|
async def switch_to_live_mode(self) -> bool:
|
||||||
|
"""Switch to live data mode."""
|
||||||
|
try:
|
||||||
|
if self.mode == 'live':
|
||||||
|
logger.info("Already in live mode")
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Stop replay session if active
|
||||||
|
if self.current_replay_session:
|
||||||
|
await self.replay_manager.stop_replay(self.current_replay_session)
|
||||||
|
self.current_replay_session = None
|
||||||
|
|
||||||
|
# Start live connectors
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
if not connector.is_connected:
|
||||||
|
await connector.connect()
|
||||||
|
|
||||||
|
self.mode = 'live'
|
||||||
|
logger.info("Switched to live data mode")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error switching to live mode: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def switch_to_replay_mode(self, start_time: datetime, end_time: datetime,
|
||||||
|
speed: float = 1.0, symbols: List[str] = None) -> bool:
|
||||||
|
"""Switch to replay data mode."""
|
||||||
|
try:
|
||||||
|
if self.mode == 'replay' and self.current_replay_session:
|
||||||
|
await self.replay_manager.stop_replay(self.current_replay_session)
|
||||||
|
|
||||||
|
# Create replay session
|
||||||
|
session_id = self.replay_manager.create_replay_session(
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
speed=speed,
|
||||||
|
symbols=symbols or self.config.exchanges.symbols
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add data callback for replay
|
||||||
|
self.replay_manager.add_data_callback(session_id, self._handle_replay_data)
|
||||||
|
|
||||||
|
# Start replay
|
||||||
|
await self.replay_manager.start_replay(session_id)
|
||||||
|
|
||||||
|
self.current_replay_session = session_id
|
||||||
|
self.mode = 'replay'
|
||||||
|
|
||||||
|
logger.info(f"Switched to replay mode: {start_time} to {end_time}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error switching to replay mode: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_current_mode(self) -> str:
|
||||||
|
"""Get current data mode (live or replay)."""
|
||||||
|
return self.mode
|
||||||
|
|
||||||
|
def get_replay_status(self) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get current replay session status."""
|
||||||
|
if not self.current_replay_session:
|
||||||
|
return None
|
||||||
|
|
||||||
|
session = self.replay_manager.get_replay_status(self.current_replay_session)
|
||||||
|
if not session:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return {
|
||||||
|
'session_id': session.session_id,
|
||||||
|
'status': session.status.value,
|
||||||
|
'progress': session.progress,
|
||||||
|
'current_time': session.current_time.isoformat(),
|
||||||
|
'speed': session.speed,
|
||||||
|
'events_replayed': session.events_replayed,
|
||||||
|
'total_events': session.total_events
|
||||||
|
}
|
||||||
|
|
||||||
|
# === DATA QUALITY AND METADATA ===
|
||||||
|
|
||||||
|
def get_data_quality_indicators(self, symbol: str) -> Dict[str, Any]:
|
||||||
|
"""Get data quality indicators for a symbol."""
|
||||||
|
try:
|
||||||
|
# Get recent data statistics
|
||||||
|
end_time = datetime.utcnow()
|
||||||
|
start_time = end_time - timedelta(minutes=5)
|
||||||
|
|
||||||
|
orderbook_data = asyncio.run(self.storage_manager.get_historical_data(
|
||||||
|
symbol, start_time, end_time, 'orderbook'
|
||||||
|
))
|
||||||
|
|
||||||
|
trade_data = asyncio.run(self.storage_manager.get_historical_data(
|
||||||
|
symbol, start_time, end_time, 'trades'
|
||||||
|
))
|
||||||
|
|
||||||
|
# Calculate quality metrics
|
||||||
|
quality = {
|
||||||
|
'symbol': symbol,
|
||||||
|
'timestamp': datetime.utcnow().isoformat(),
|
||||||
|
'orderbook_updates': len(orderbook_data) if orderbook_data else 0,
|
||||||
|
'trade_events': len(trade_data) if trade_data else 0,
|
||||||
|
'data_freshness_seconds': 0,
|
||||||
|
'exchange_coverage': [],
|
||||||
|
'quality_score': 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate data freshness
|
||||||
|
if orderbook_data:
|
||||||
|
latest_timestamp = max(item['timestamp'] for item in orderbook_data)
|
||||||
|
quality['data_freshness_seconds'] = (
|
||||||
|
datetime.utcnow() - latest_timestamp
|
||||||
|
).total_seconds()
|
||||||
|
|
||||||
|
# Get exchange coverage
|
||||||
|
if orderbook_data:
|
||||||
|
exchanges = set(item['exchange'] for item in orderbook_data)
|
||||||
|
quality['exchange_coverage'] = list(exchanges)
|
||||||
|
|
||||||
|
# Calculate quality score (0-1)
|
||||||
|
score = 0.0
|
||||||
|
if quality['orderbook_updates'] > 0:
|
||||||
|
score += 0.4
|
||||||
|
if quality['trade_events'] > 0:
|
||||||
|
score += 0.3
|
||||||
|
if quality['data_freshness_seconds'] < 10:
|
||||||
|
score += 0.3
|
||||||
|
|
||||||
|
quality['quality_score'] = score
|
||||||
|
|
||||||
|
return quality
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting data quality for {symbol}: {e}")
|
||||||
|
return {
|
||||||
|
'symbol': symbol,
|
||||||
|
'timestamp': datetime.utcnow().isoformat(),
|
||||||
|
'quality_score': 0.0,
|
||||||
|
'error': str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_system_metadata(self) -> Dict[str, Any]:
|
||||||
|
"""Get system metadata and status."""
|
||||||
|
try:
|
||||||
|
return {
|
||||||
|
'system': 'COBY',
|
||||||
|
'version': '1.0.0',
|
||||||
|
'mode': self.mode,
|
||||||
|
'timestamp': datetime.utcnow().isoformat(),
|
||||||
|
'components': {
|
||||||
|
'storage': self.storage_manager.is_healthy(),
|
||||||
|
'redis': True, # Simplified check
|
||||||
|
'connectors': {
|
||||||
|
name: connector.is_connected
|
||||||
|
for name, connector in self.connectors.items()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'statistics': self.stats,
|
||||||
|
'replay_session': self.get_replay_status(),
|
||||||
|
'active_subscribers': sum(
|
||||||
|
len(subs) for subs in self.subscribers.values()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting system metadata: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
# === DATA HANDLERS ===
|
||||||
|
|
||||||
|
async def _handle_connector_data(self, data: Union[OrderBookSnapshot, TradeEvent]) -> None:
|
||||||
|
"""Handle data from exchange connectors."""
|
||||||
|
try:
|
||||||
|
# Store data
|
||||||
|
if isinstance(data, OrderBookSnapshot):
|
||||||
|
await self.storage_manager.store_orderbook(data)
|
||||||
|
self.stats['orderbooks_processed'] += 1
|
||||||
|
|
||||||
|
# Create market tick for subscribers
|
||||||
|
if data.bids and data.asks:
|
||||||
|
best_bid = max(data.bids, key=lambda x: x.price)
|
||||||
|
best_ask = min(data.asks, key=lambda x: x.price)
|
||||||
|
mid_price = (best_bid.price + best_ask.price) / 2
|
||||||
|
|
||||||
|
tick = MarketTick(
|
||||||
|
symbol=data.symbol,
|
||||||
|
price=mid_price,
|
||||||
|
volume=best_bid.size + best_ask.size,
|
||||||
|
timestamp=data.timestamp,
|
||||||
|
exchange=data.exchange
|
||||||
|
)
|
||||||
|
|
||||||
|
await self._notify_tick_subscribers(tick)
|
||||||
|
|
||||||
|
# Create COB data for subscribers
|
||||||
|
cob_data = {
|
||||||
|
'symbol': data.symbol,
|
||||||
|
'timestamp': data.timestamp.isoformat(),
|
||||||
|
'bids': [{'price': b.price, 'size': b.size} for b in data.bids[:10]],
|
||||||
|
'asks': [{'price': a.price, 'size': a.size} for a in data.asks[:10]],
|
||||||
|
'exchange': data.exchange
|
||||||
|
}
|
||||||
|
|
||||||
|
await self._notify_cob_raw_subscribers(data.symbol, cob_data)
|
||||||
|
|
||||||
|
elif isinstance(data, TradeEvent):
|
||||||
|
await self.storage_manager.store_trade(data)
|
||||||
|
self.stats['ticks_processed'] += 1
|
||||||
|
|
||||||
|
# Create market tick
|
||||||
|
tick = MarketTick(
|
||||||
|
symbol=data.symbol,
|
||||||
|
price=data.price,
|
||||||
|
volume=data.size,
|
||||||
|
timestamp=data.timestamp,
|
||||||
|
side=data.side,
|
||||||
|
exchange=data.exchange
|
||||||
|
)
|
||||||
|
|
||||||
|
await self._notify_tick_subscribers(tick)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling connector data: {e}")
|
||||||
|
|
||||||
|
async def _handle_replay_data(self, data: Union[OrderBookSnapshot, TradeEvent]) -> None:
|
||||||
|
"""Handle data from replay system."""
|
||||||
|
try:
|
||||||
|
# Process replay data same as live data
|
||||||
|
await self._handle_connector_data(data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling replay data: {e}")
|
||||||
|
|
||||||
|
async def _notify_tick_subscribers(self, tick: MarketTick) -> None:
|
||||||
|
"""Notify tick subscribers."""
|
||||||
|
try:
|
||||||
|
with self.subscriber_lock:
|
||||||
|
subscribers = self.subscribers['ticks'].copy()
|
||||||
|
|
||||||
|
for subscriber_id, sub_info in subscribers.items():
|
||||||
|
try:
|
||||||
|
callback = sub_info['callback']
|
||||||
|
symbols = sub_info['symbols']
|
||||||
|
|
||||||
|
# Check if subscriber wants this symbol
|
||||||
|
if not symbols or tick.symbol in symbols:
|
||||||
|
if asyncio.iscoroutinefunction(callback):
|
||||||
|
await callback(tick)
|
||||||
|
else:
|
||||||
|
callback(tick)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error notifying tick subscriber {subscriber_id}: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error notifying tick subscribers: {e}")
|
||||||
|
|
||||||
|
async def _notify_cob_raw_subscribers(self, symbol: str, data: Dict) -> None:
|
||||||
|
"""Notify COB raw tick subscribers."""
|
||||||
|
try:
|
||||||
|
with self.subscriber_lock:
|
||||||
|
subscribers = self.subscribers['cob_raw'].copy()
|
||||||
|
|
||||||
|
for subscriber_id, sub_info in subscribers.items():
|
||||||
|
try:
|
||||||
|
callback = sub_info['callback']
|
||||||
|
|
||||||
|
if asyncio.iscoroutinefunction(callback):
|
||||||
|
await callback(symbol, data)
|
||||||
|
else:
|
||||||
|
callback(symbol, data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error notifying COB raw subscriber {subscriber_id}: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error notifying COB raw subscribers: {e}")
|
||||||
|
|
||||||
|
# === UTILITY METHODS ===
|
||||||
|
|
||||||
|
def _parse_timeframe(self, timeframe: str) -> Optional[int]:
|
||||||
|
"""Parse timeframe string to minutes."""
|
||||||
|
try:
|
||||||
|
if timeframe.endswith('m'):
|
||||||
|
return int(timeframe[:-1])
|
||||||
|
elif timeframe.endswith('h'):
|
||||||
|
return int(timeframe[:-1]) * 60
|
||||||
|
elif timeframe.endswith('d'):
|
||||||
|
return int(timeframe[:-1]) * 24 * 60
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
except:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def start_centralized_data_collection(self) -> None:
|
||||||
|
"""Start centralized data collection (compatibility method)."""
|
||||||
|
logger.info("Centralized data collection started (COBY mode)")
|
||||||
|
|
||||||
|
def start_training_data_collection(self) -> None:
|
||||||
|
"""Start training data collection (compatibility method)."""
|
||||||
|
logger.info("Training data collection started (COBY mode)")
|
||||||
|
|
||||||
|
def invalidate_ohlcv_cache(self, symbol: str) -> None:
|
||||||
|
"""Invalidate OHLCV cache for a symbol."""
|
||||||
|
try:
|
||||||
|
# Clear Redis cache for this symbol
|
||||||
|
cache_pattern = f"ohlcv:{symbol}:*"
|
||||||
|
asyncio.run(self.redis_manager.delete_pattern(cache_pattern))
|
||||||
|
|
||||||
|
# Clear local price cache
|
||||||
|
if symbol in self.price_cache:
|
||||||
|
del self.price_cache[symbol]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error invalidating cache for {symbol}: {e}")
|
||||||
|
|
||||||
|
async def close(self) -> None:
|
||||||
|
"""Close all connections and cleanup."""
|
||||||
|
try:
|
||||||
|
# Stop replay session
|
||||||
|
if self.current_replay_session:
|
||||||
|
await self.replay_manager.stop_replay(self.current_replay_session)
|
||||||
|
|
||||||
|
# Close connectors
|
||||||
|
for connector in self.connectors.values():
|
||||||
|
await connector.disconnect()
|
||||||
|
|
||||||
|
# Close storage
|
||||||
|
await self.storage_manager.close()
|
||||||
|
|
||||||
|
# Close Redis
|
||||||
|
await self.redis_manager.close()
|
||||||
|
|
||||||
|
logger.info("COBY orchestrator adapter closed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error closing adapter: {e}")
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get adapter statistics."""
|
||||||
|
return {
|
||||||
|
**self.stats,
|
||||||
|
'mode': self.mode,
|
||||||
|
'active_subscribers': sum(len(subs) for subs in self.subscribers.values()),
|
||||||
|
'cache_size': len(self.price_cache),
|
||||||
|
'replay_session': self.current_replay_session
|
||||||
|
}
|
||||||
17
COBY/interfaces/__init__.py
Normal file
17
COBY/interfaces/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
"""
|
||||||
|
Interface definitions for the multi-exchange data aggregation system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .exchange_connector import ExchangeConnector
|
||||||
|
from .data_processor import DataProcessor
|
||||||
|
from .aggregation_engine import AggregationEngine
|
||||||
|
from .storage_manager import StorageManager
|
||||||
|
from .replay_manager import ReplayManager
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'ExchangeConnector',
|
||||||
|
'DataProcessor',
|
||||||
|
'AggregationEngine',
|
||||||
|
'StorageManager',
|
||||||
|
'ReplayManager'
|
||||||
|
]
|
||||||
139
COBY/interfaces/aggregation_engine.py
Normal file
139
COBY/interfaces/aggregation_engine.py
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
"""
|
||||||
|
Interface for data aggregation and heatmap generation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, List
|
||||||
|
from ..models.core import (
|
||||||
|
OrderBookSnapshot, PriceBuckets, HeatmapData,
|
||||||
|
ImbalanceMetrics, ConsolidatedOrderBook
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AggregationEngine(ABC):
|
||||||
|
"""Aggregates data into price buckets and heatmaps"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def create_price_buckets(self, orderbook: OrderBookSnapshot,
|
||||||
|
bucket_size: float) -> PriceBuckets:
|
||||||
|
"""
|
||||||
|
Convert order book data to price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
bucket_size: Size of each price bucket
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PriceBuckets: Aggregated price bucket data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def update_heatmap(self, symbol: str, buckets: PriceBuckets) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Update heatmap data with new price buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
buckets: Price bucket data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Updated heatmap visualization data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def calculate_imbalances(self, orderbook: OrderBookSnapshot) -> ImbalanceMetrics:
|
||||||
|
"""
|
||||||
|
Calculate order book imbalance metrics.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ImbalanceMetrics: Calculated imbalance metrics
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def aggregate_across_exchanges(self, symbol: str,
|
||||||
|
orderbooks: List[OrderBookSnapshot]) -> ConsolidatedOrderBook:
|
||||||
|
"""
|
||||||
|
Aggregate order book data from multiple exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
orderbooks: List of order book snapshots from different exchanges
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ConsolidatedOrderBook: Consolidated order book data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def calculate_volume_weighted_price(self, orderbooks: List[OrderBookSnapshot]) -> float:
|
||||||
|
"""
|
||||||
|
Calculate volume-weighted average price across exchanges.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbooks: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: Volume-weighted average price
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_market_depth(self, orderbook: OrderBookSnapshot,
|
||||||
|
depth_levels: List[float]) -> Dict[float, Dict[str, float]]:
|
||||||
|
"""
|
||||||
|
Calculate market depth at different price levels.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
depth_levels: List of depth percentages (e.g., [0.1, 0.5, 1.0])
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Market depth data {level: {'bid_volume': x, 'ask_volume': y}}
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def smooth_heatmap(self, heatmap: HeatmapData, smoothing_factor: float) -> HeatmapData:
|
||||||
|
"""
|
||||||
|
Apply smoothing to heatmap data to reduce noise.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Raw heatmap data
|
||||||
|
smoothing_factor: Smoothing factor (0.0 to 1.0)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Smoothed heatmap data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def calculate_liquidity_score(self, orderbook: OrderBookSnapshot) -> float:
|
||||||
|
"""
|
||||||
|
Calculate liquidity score for an order book.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: Liquidity score (0.0 to 1.0)
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def detect_support_resistance(self, heatmap: HeatmapData) -> Dict[str, List[float]]:
|
||||||
|
"""
|
||||||
|
Detect support and resistance levels from heatmap data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
heatmap: Heatmap data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: {'support': [prices], 'resistance': [prices]}
|
||||||
|
"""
|
||||||
|
pass
|
||||||
119
COBY/interfaces/data_processor.py
Normal file
119
COBY/interfaces/data_processor.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
"""
|
||||||
|
Interface for data processing and normalization.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, Union, List, Optional
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, OrderBookMetrics
|
||||||
|
|
||||||
|
|
||||||
|
class DataProcessor(ABC):
|
||||||
|
"""Processes and normalizes raw exchange data"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def normalize_orderbook(self, raw_data: Dict, exchange: str) -> OrderBookSnapshot:
|
||||||
|
"""
|
||||||
|
Normalize raw order book data to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
raw_data: Raw order book data from exchange
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Normalized order book data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def normalize_trade(self, raw_data: Dict, exchange: str) -> TradeEvent:
|
||||||
|
"""
|
||||||
|
Normalize raw trade data to standard format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
raw_data: Raw trade data from exchange
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
TradeEvent: Normalized trade data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def validate_data(self, data: Union[OrderBookSnapshot, TradeEvent]) -> bool:
|
||||||
|
"""
|
||||||
|
Validate normalized data for quality and consistency.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Normalized data to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if data is valid, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def calculate_metrics(self, orderbook: OrderBookSnapshot) -> OrderBookMetrics:
|
||||||
|
"""
|
||||||
|
Calculate metrics from order book data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
orderbook: Order book snapshot
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookMetrics: Calculated metrics
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def detect_anomalies(self, data: Union[OrderBookSnapshot, TradeEvent]) -> List[str]:
|
||||||
|
"""
|
||||||
|
Detect anomalies in the data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Data to analyze for anomalies
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of detected anomaly descriptions
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def filter_data(self, data: Union[OrderBookSnapshot, TradeEvent],
|
||||||
|
criteria: Dict) -> bool:
|
||||||
|
"""
|
||||||
|
Filter data based on criteria.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Data to filter
|
||||||
|
criteria: Filtering criteria
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if data passes filter, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def enrich_data(self, data: Union[OrderBookSnapshot, TradeEvent]) -> Dict:
|
||||||
|
"""
|
||||||
|
Enrich data with additional metadata.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Data to enrich
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Enriched data with metadata
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_data_quality_score(self, data: Union[OrderBookSnapshot, TradeEvent]) -> float:
|
||||||
|
"""
|
||||||
|
Calculate data quality score.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Data to score
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: Quality score between 0.0 and 1.0
|
||||||
|
"""
|
||||||
|
pass
|
||||||
189
COBY/interfaces/exchange_connector.py
Normal file
189
COBY/interfaces/exchange_connector.py
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
"""
|
||||||
|
Base interface for exchange WebSocket connectors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Callable, List, Optional
|
||||||
|
from ..models.core import ConnectionStatus, OrderBookSnapshot, TradeEvent
|
||||||
|
|
||||||
|
|
||||||
|
class ExchangeConnector(ABC):
|
||||||
|
"""Base interface for exchange WebSocket connectors"""
|
||||||
|
|
||||||
|
def __init__(self, exchange_name: str):
|
||||||
|
self.exchange_name = exchange_name
|
||||||
|
self._data_callbacks: List[Callable] = []
|
||||||
|
self._status_callbacks: List[Callable] = []
|
||||||
|
self._connection_status = ConnectionStatus.DISCONNECTED
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def connect(self) -> bool:
|
||||||
|
"""
|
||||||
|
Establish connection to the exchange WebSocket.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if connection successful, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def disconnect(self) -> None:
|
||||||
|
"""Disconnect from the exchange WebSocket."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def subscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def subscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Subscribe to trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def unsubscribe_orderbook(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from order book updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def unsubscribe_trades(self, symbol: str) -> None:
|
||||||
|
"""
|
||||||
|
Unsubscribe from trade updates for a symbol.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol (e.g., 'BTCUSDT')
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_connection_status(self) -> ConnectionStatus:
|
||||||
|
"""
|
||||||
|
Get current connection status.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ConnectionStatus: Current connection status
|
||||||
|
"""
|
||||||
|
return self._connection_status
|
||||||
|
|
||||||
|
def add_data_callback(self, callback: Callable) -> None:
|
||||||
|
"""
|
||||||
|
Add callback for data updates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
callback: Function to call when data is received
|
||||||
|
Signature: callback(data: Union[OrderBookSnapshot, TradeEvent])
|
||||||
|
"""
|
||||||
|
if callback not in self._data_callbacks:
|
||||||
|
self._data_callbacks.append(callback)
|
||||||
|
|
||||||
|
def remove_data_callback(self, callback: Callable) -> None:
|
||||||
|
"""
|
||||||
|
Remove data callback.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
callback: Callback function to remove
|
||||||
|
"""
|
||||||
|
if callback in self._data_callbacks:
|
||||||
|
self._data_callbacks.remove(callback)
|
||||||
|
|
||||||
|
def add_status_callback(self, callback: Callable) -> None:
|
||||||
|
"""
|
||||||
|
Add callback for status updates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
callback: Function to call when status changes
|
||||||
|
Signature: callback(exchange: str, status: ConnectionStatus)
|
||||||
|
"""
|
||||||
|
if callback not in self._status_callbacks:
|
||||||
|
self._status_callbacks.append(callback)
|
||||||
|
|
||||||
|
def remove_status_callback(self, callback: Callable) -> None:
|
||||||
|
"""
|
||||||
|
Remove status callback.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
callback: Callback function to remove
|
||||||
|
"""
|
||||||
|
if callback in self._status_callbacks:
|
||||||
|
self._status_callbacks.remove(callback)
|
||||||
|
|
||||||
|
def _notify_data_callbacks(self, data):
|
||||||
|
"""Notify all data callbacks of new data."""
|
||||||
|
for callback in self._data_callbacks:
|
||||||
|
try:
|
||||||
|
callback(data)
|
||||||
|
except Exception as e:
|
||||||
|
# Log error but don't stop other callbacks
|
||||||
|
print(f"Error in data callback: {e}")
|
||||||
|
|
||||||
|
def _notify_status_callbacks(self, status: ConnectionStatus):
|
||||||
|
"""Notify all status callbacks of status change."""
|
||||||
|
self._connection_status = status
|
||||||
|
for callback in self._status_callbacks:
|
||||||
|
try:
|
||||||
|
callback(self.exchange_name, status)
|
||||||
|
except Exception as e:
|
||||||
|
# Log error but don't stop other callbacks
|
||||||
|
print(f"Error in status callback: {e}")
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_symbols(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available trading symbols.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of available symbols
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def normalize_symbol(self, symbol: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize symbol to exchange format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Standard symbol format (e.g., 'BTCUSDT')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Exchange-specific symbol format
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_orderbook_snapshot(self, symbol: str, depth: int = 20) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get current order book snapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
depth: Number of price levels to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Current order book or None if unavailable
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
"""Get exchange name."""
|
||||||
|
return self.exchange_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_connected(self) -> bool:
|
||||||
|
"""Check if connector is connected."""
|
||||||
|
return self._connection_status == ConnectionStatus.CONNECTED
|
||||||
212
COBY/interfaces/replay_manager.py
Normal file
212
COBY/interfaces/replay_manager.py
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
"""
|
||||||
|
Interface for historical data replay functionality.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Optional, Callable, Dict, Any
|
||||||
|
from ..models.core import ReplaySession, ReplayStatus
|
||||||
|
|
||||||
|
|
||||||
|
class ReplayManager(ABC):
|
||||||
|
"""Provides historical data replay functionality"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def create_replay_session(self, start_time: datetime, end_time: datetime,
|
||||||
|
speed: float = 1.0, symbols: Optional[List[str]] = None,
|
||||||
|
exchanges: Optional[List[str]] = None) -> str:
|
||||||
|
"""
|
||||||
|
Create a new replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_time: Replay start time
|
||||||
|
end_time: Replay end time
|
||||||
|
speed: Playback speed multiplier (1.0 = real-time)
|
||||||
|
symbols: List of symbols to replay (None = all)
|
||||||
|
exchanges: List of exchanges to replay (None = all)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Session ID
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def start_replay(self, session_id: str) -> None:
|
||||||
|
"""
|
||||||
|
Start replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID to start
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def pause_replay(self, session_id: str) -> None:
|
||||||
|
"""
|
||||||
|
Pause replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID to pause
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def resume_replay(self, session_id: str) -> None:
|
||||||
|
"""
|
||||||
|
Resume paused replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID to resume
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def stop_replay(self, session_id: str) -> None:
|
||||||
|
"""
|
||||||
|
Stop replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID to stop
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_replay_status(self, session_id: str) -> Optional[ReplaySession]:
|
||||||
|
"""
|
||||||
|
Get replay session status.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ReplaySession: Session status or None if not found
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list_replay_sessions(self) -> List[ReplaySession]:
|
||||||
|
"""
|
||||||
|
List all replay sessions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[ReplaySession]: List of all sessions
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_replay_session(self, session_id: str) -> bool:
|
||||||
|
"""
|
||||||
|
Delete replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID to delete
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if deleted successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def set_replay_speed(self, session_id: str, speed: float) -> bool:
|
||||||
|
"""
|
||||||
|
Change replay speed for active session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID
|
||||||
|
speed: New playback speed multiplier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if speed changed successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def seek_replay(self, session_id: str, timestamp: datetime) -> bool:
|
||||||
|
"""
|
||||||
|
Seek to specific timestamp in replay.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID
|
||||||
|
timestamp: Target timestamp
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if seek successful, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_data_callback(self, session_id: str, callback: Callable) -> bool:
|
||||||
|
"""
|
||||||
|
Add callback for replay data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID
|
||||||
|
callback: Function to call with replay data
|
||||||
|
Signature: callback(data: Union[OrderBookSnapshot, TradeEvent])
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if callback added successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def remove_data_callback(self, session_id: str, callback: Callable) -> bool:
|
||||||
|
"""
|
||||||
|
Remove data callback from replay session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID
|
||||||
|
callback: Callback function to remove
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if callback removed successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add_status_callback(self, session_id: str, callback: Callable) -> bool:
|
||||||
|
"""
|
||||||
|
Add callback for replay status changes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session ID
|
||||||
|
callback: Function to call on status change
|
||||||
|
Signature: callback(session_id: str, status: ReplayStatus)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if callback added successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_available_data_range(self, symbol: str,
|
||||||
|
exchange: Optional[str] = None) -> Optional[Dict[str, datetime]]:
|
||||||
|
"""
|
||||||
|
Get available data time range for replay.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name (None = all exchanges)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: {'start': datetime, 'end': datetime} or None if no data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def validate_replay_request(self, start_time: datetime, end_time: datetime,
|
||||||
|
symbols: Optional[List[str]] = None,
|
||||||
|
exchanges: Optional[List[str]] = None) -> List[str]:
|
||||||
|
"""
|
||||||
|
Validate replay request parameters.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_time: Requested start time
|
||||||
|
end_time: Requested end time
|
||||||
|
symbols: Requested symbols
|
||||||
|
exchanges: Requested exchanges
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: List of validation errors (empty if valid)
|
||||||
|
"""
|
||||||
|
pass
|
||||||
215
COBY/interfaces/storage_manager.py
Normal file
215
COBY/interfaces/storage_manager.py
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
"""
|
||||||
|
Interface for data storage and retrieval.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Dict, Optional, Any
|
||||||
|
from ..models.core import OrderBookSnapshot, TradeEvent, HeatmapData, SystemMetrics
|
||||||
|
|
||||||
|
|
||||||
|
class StorageManager(ABC):
|
||||||
|
"""Manages data persistence and retrieval"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def store_orderbook(self, data: OrderBookSnapshot) -> bool:
|
||||||
|
"""
|
||||||
|
Store order book snapshot to database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Order book snapshot to store
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if stored successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def store_trade(self, data: TradeEvent) -> bool:
|
||||||
|
"""
|
||||||
|
Store trade event to database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Trade event to store
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if stored successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def store_heatmap(self, data: HeatmapData) -> bool:
|
||||||
|
"""
|
||||||
|
Store heatmap data to database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Heatmap data to store
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if stored successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def store_metrics(self, data: SystemMetrics) -> bool:
|
||||||
|
"""
|
||||||
|
Store system metrics to database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: System metrics to store
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if stored successfully, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_historical_orderbooks(self, symbol: str, exchange: str,
|
||||||
|
start: datetime, end: datetime,
|
||||||
|
limit: Optional[int] = None) -> List[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Retrieve historical order book data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
start: Start timestamp
|
||||||
|
end: End timestamp
|
||||||
|
limit: Maximum number of records to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[OrderBookSnapshot]: Historical order book data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_historical_trades(self, symbol: str, exchange: str,
|
||||||
|
start: datetime, end: datetime,
|
||||||
|
limit: Optional[int] = None) -> List[TradeEvent]:
|
||||||
|
"""
|
||||||
|
Retrieve historical trade data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
start: Start timestamp
|
||||||
|
end: End timestamp
|
||||||
|
limit: Maximum number of records to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[TradeEvent]: Historical trade data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_latest_orderbook(self, symbol: str, exchange: str) -> Optional[OrderBookSnapshot]:
|
||||||
|
"""
|
||||||
|
Get latest order book snapshot.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OrderBookSnapshot: Latest order book or None if not found
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_latest_heatmap(self, symbol: str, bucket_size: float) -> Optional[HeatmapData]:
|
||||||
|
"""
|
||||||
|
Get latest heatmap data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
bucket_size: Price bucket size
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
HeatmapData: Latest heatmap or None if not found
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_ohlcv_data(self, symbol: str, exchange: str, timeframe: str,
|
||||||
|
start: datetime, end: datetime) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get OHLCV candlestick data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol: Trading symbol
|
||||||
|
exchange: Exchange name
|
||||||
|
timeframe: Timeframe (e.g., '1m', '5m', '1h')
|
||||||
|
start: Start timestamp
|
||||||
|
end: End timestamp
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict]: OHLCV data
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def batch_store_orderbooks(self, data: List[OrderBookSnapshot]) -> int:
|
||||||
|
"""
|
||||||
|
Store multiple order book snapshots in batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: List of order book snapshots
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Number of records stored successfully
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def batch_store_trades(self, data: List[TradeEvent]) -> int:
|
||||||
|
"""
|
||||||
|
Store multiple trade events in batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: List of trade events
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Number of records stored successfully
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def setup_database_schema(self) -> None:
|
||||||
|
"""
|
||||||
|
Set up database schema and tables.
|
||||||
|
Should be idempotent - safe to call multiple times.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def cleanup_old_data(self, retention_days: int) -> int:
|
||||||
|
"""
|
||||||
|
Clean up old data based on retention policy.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
retention_days: Number of days to retain data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Number of records deleted
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def get_storage_stats(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get storage statistics.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Storage statistics (table sizes, record counts, etc.)
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def health_check(self) -> bool:
|
||||||
|
"""
|
||||||
|
Check storage system health.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if healthy, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
311
COBY/main.py
Normal file
311
COBY/main.py
Normal file
@@ -0,0 +1,311 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
COBY Multi-Exchange Data Aggregation System
|
||||||
|
Main application entry point for Docker deployment
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import signal
|
||||||
|
import argparse
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
# Add current directory to path for imports
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
from utils.logging import get_logger, setup_logging
|
||||||
|
from simple_config import Config
|
||||||
|
from monitoring.metrics_collector import metrics_collector
|
||||||
|
from monitoring.performance_monitor import get_performance_monitor
|
||||||
|
from monitoring.memory_monitor import memory_monitor
|
||||||
|
from api.rest_api import create_app
|
||||||
|
from api.websocket_server import websocket_manager
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Global reference for API access
|
||||||
|
_app_instance = None
|
||||||
|
|
||||||
|
class COBYApplication:
|
||||||
|
"""Main COBY application orchestrator"""
|
||||||
|
|
||||||
|
def __init__(self, config: Config):
|
||||||
|
global _app_instance
|
||||||
|
self.config = config
|
||||||
|
self.running = False
|
||||||
|
self.tasks = []
|
||||||
|
self.websocket_manager = websocket_manager
|
||||||
|
self.connectors = {}
|
||||||
|
_app_instance = self
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
"""Start all application components"""
|
||||||
|
try:
|
||||||
|
logger.info("Starting COBY Multi-Exchange Data Aggregation System")
|
||||||
|
|
||||||
|
# Start monitoring systems
|
||||||
|
logger.info("Starting monitoring systems...")
|
||||||
|
metrics_collector.start_collection()
|
||||||
|
get_performance_monitor().start_monitoring()
|
||||||
|
memory_monitor.start_monitoring()
|
||||||
|
|
||||||
|
# WebSocket server is handled by FastAPI
|
||||||
|
logger.info("WebSocket manager initialized")
|
||||||
|
|
||||||
|
# Start REST API server (includes static file serving)
|
||||||
|
logger.info("Starting REST API server with static file serving...")
|
||||||
|
app = create_app(self.config)
|
||||||
|
api_task = asyncio.create_task(
|
||||||
|
self._run_api_server(app, self.config.api.host, self.config.api.port)
|
||||||
|
)
|
||||||
|
self.tasks.append(api_task)
|
||||||
|
|
||||||
|
# Start exchange connectors
|
||||||
|
logger.info("Starting exchange connectors...")
|
||||||
|
await self._start_exchange_connectors()
|
||||||
|
|
||||||
|
# Start data processing pipeline
|
||||||
|
logger.info("Starting data processing pipeline...")
|
||||||
|
await self._start_data_processing()
|
||||||
|
|
||||||
|
self.running = True
|
||||||
|
logger.info("COBY system started successfully")
|
||||||
|
|
||||||
|
# Wait for all tasks
|
||||||
|
await asyncio.gather(*self.tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error starting COBY application: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
"""Stop all application components"""
|
||||||
|
if not self.running:
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info("Stopping COBY Multi-Exchange Data Aggregation System")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Stop exchange connectors
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
try:
|
||||||
|
logger.info(f"Stopping {name} connector...")
|
||||||
|
await connector.disconnect()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error stopping {name} connector: {e}")
|
||||||
|
|
||||||
|
# WebSocket connections will be closed automatically
|
||||||
|
|
||||||
|
# Cancel all tasks
|
||||||
|
for task in self.tasks:
|
||||||
|
if not task.done():
|
||||||
|
task.cancel()
|
||||||
|
|
||||||
|
# Wait for tasks to complete
|
||||||
|
if self.tasks:
|
||||||
|
await asyncio.gather(*self.tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
# Stop monitoring systems
|
||||||
|
memory_monitor.stop_monitoring()
|
||||||
|
get_performance_monitor().stop_monitoring()
|
||||||
|
metrics_collector.stop_collection()
|
||||||
|
|
||||||
|
self.running = False
|
||||||
|
logger.info("COBY system stopped successfully")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error stopping COBY application: {e}")
|
||||||
|
|
||||||
|
async def _start_exchange_connectors(self):
|
||||||
|
"""Start exchange connectors"""
|
||||||
|
try:
|
||||||
|
# Import real exchange connectors
|
||||||
|
from connectors.binance_connector import BinanceConnector
|
||||||
|
|
||||||
|
# Initialize real exchange connectors
|
||||||
|
self.connectors = {
|
||||||
|
'binance': BinanceConnector()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Start connectors
|
||||||
|
for name, connector in self.connectors.items():
|
||||||
|
try:
|
||||||
|
logger.info(f"Starting {name} connector...")
|
||||||
|
# Set up data callback to broadcast to WebSocket
|
||||||
|
connector.add_data_callback(self._handle_connector_data)
|
||||||
|
connector.add_status_callback(self._handle_connector_status)
|
||||||
|
|
||||||
|
connector_task = asyncio.create_task(self._run_connector(connector))
|
||||||
|
self.tasks.append(connector_task)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to start {name} connector: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error starting exchange connectors: {e}")
|
||||||
|
|
||||||
|
async def _run_connector(self, connector):
|
||||||
|
"""Run a single connector"""
|
||||||
|
try:
|
||||||
|
# Connect to exchange
|
||||||
|
if await connector.connect():
|
||||||
|
logger.info(f"Connected to {connector.exchange_name}")
|
||||||
|
|
||||||
|
# Subscribe to default symbols
|
||||||
|
default_symbols = ['BTCUSDT', 'ETHUSDT', 'ADAUSDT', 'SOLUSDT']
|
||||||
|
for symbol in default_symbols:
|
||||||
|
try:
|
||||||
|
await connector.subscribe_orderbook(symbol)
|
||||||
|
await connector.subscribe_trades(symbol)
|
||||||
|
logger.info(f"Subscribed to {symbol} on {connector.exchange_name}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to subscribe to {symbol} on {connector.exchange_name}: {e}")
|
||||||
|
|
||||||
|
# Keep connector running
|
||||||
|
while connector.is_connected:
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to connect to {connector.exchange_name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error running {connector.exchange_name} connector: {e}")
|
||||||
|
|
||||||
|
async def _handle_connector_data(self, data_type: str, data):
|
||||||
|
"""Handle data from exchange connectors"""
|
||||||
|
try:
|
||||||
|
if data_type == 'orderbook':
|
||||||
|
# Broadcast order book data
|
||||||
|
await self.websocket_manager.broadcast_update(
|
||||||
|
data.symbol, 'orderbook', data
|
||||||
|
)
|
||||||
|
logger.debug(f"Broadcasted orderbook data for {data.symbol}")
|
||||||
|
|
||||||
|
elif data_type == 'trade':
|
||||||
|
# Broadcast trade data
|
||||||
|
await self.websocket_manager.broadcast_update(
|
||||||
|
data.symbol, 'trade', data
|
||||||
|
)
|
||||||
|
logger.debug(f"Broadcasted trade data for {data.symbol}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling connector data: {e}")
|
||||||
|
|
||||||
|
def _handle_connector_status(self, exchange_name: str, status):
|
||||||
|
"""Handle status updates from exchange connectors"""
|
||||||
|
try:
|
||||||
|
logger.info(f"Connector {exchange_name} status: {status.value}")
|
||||||
|
# Could broadcast status updates to dashboard here
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error handling connector status: {e}")
|
||||||
|
|
||||||
|
async def _start_data_processing(self):
|
||||||
|
"""Start data processing pipeline"""
|
||||||
|
try:
|
||||||
|
# Start data aggregation task
|
||||||
|
aggregation_task = asyncio.create_task(self._run_data_aggregation())
|
||||||
|
self.tasks.append(aggregation_task)
|
||||||
|
|
||||||
|
logger.info("Data processing pipeline started")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error starting data processing pipeline: {e}")
|
||||||
|
|
||||||
|
async def _run_data_aggregation(self):
|
||||||
|
"""Run data aggregation process"""
|
||||||
|
try:
|
||||||
|
while self.running:
|
||||||
|
# Placeholder for data aggregation logic
|
||||||
|
# This would collect data from connectors and process it
|
||||||
|
await asyncio.sleep(5)
|
||||||
|
|
||||||
|
# Log status
|
||||||
|
logger.debug("Data aggregation tick - simple data generator running")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in data aggregation: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def _run_api_server(self, app, host: str, port: int):
|
||||||
|
"""Run the API server"""
|
||||||
|
try:
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
|
config = uvicorn.Config(
|
||||||
|
app,
|
||||||
|
host=host,
|
||||||
|
port=port,
|
||||||
|
log_level="info",
|
||||||
|
access_log=True
|
||||||
|
)
|
||||||
|
server = uvicorn.Server(config)
|
||||||
|
await server.serve()
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
logger.error("uvicorn not available, falling back to basic server")
|
||||||
|
# Fallback implementation would go here
|
||||||
|
await asyncio.sleep(3600) # Keep running for an hour
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main application entry point"""
|
||||||
|
parser = argparse.ArgumentParser(description='COBY Multi-Exchange Data Aggregation System')
|
||||||
|
parser.add_argument('--debug', action='store_true', help='Enable debug mode')
|
||||||
|
parser.add_argument('--reload', action='store_true', help='Enable auto-reload (development)')
|
||||||
|
parser.add_argument('--config', type=str, help='Configuration file path')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
log_level = 'DEBUG' if args.debug else 'INFO'
|
||||||
|
setup_logging(level=log_level)
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
config = Config()
|
||||||
|
if args.debug:
|
||||||
|
config.debug = True
|
||||||
|
config.logging.level = 'DEBUG'
|
||||||
|
|
||||||
|
# Create and start application
|
||||||
|
app = COBYApplication(config)
|
||||||
|
|
||||||
|
# Setup signal handlers
|
||||||
|
def signal_handler(signum, frame):
|
||||||
|
logger.info(f"Received signal {signum}, shutting down...")
|
||||||
|
asyncio.create_task(app.stop())
|
||||||
|
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await app.start()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("Received keyboard interrupt, shutting down...")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Application error: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
finally:
|
||||||
|
await app.stop()
|
||||||
|
|
||||||
|
|
||||||
|
def get_app_instance():
|
||||||
|
"""Get the global application instance"""
|
||||||
|
return _app_instance
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Ensure we're running in the correct directory
|
||||||
|
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
# Run the application
|
||||||
|
try:
|
||||||
|
asyncio.run(main())
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nShutdown complete")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Fatal error: {e}")
|
||||||
|
sys.exit(1)
|
||||||
31
COBY/models/__init__.py
Normal file
31
COBY/models/__init__.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
"""
|
||||||
|
Data models for the multi-exchange data aggregation system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .core import (
|
||||||
|
OrderBookSnapshot,
|
||||||
|
PriceLevel,
|
||||||
|
TradeEvent,
|
||||||
|
PriceBuckets,
|
||||||
|
HeatmapData,
|
||||||
|
HeatmapPoint,
|
||||||
|
ConnectionStatus,
|
||||||
|
OrderBookMetrics,
|
||||||
|
ImbalanceMetrics,
|
||||||
|
ConsolidatedOrderBook,
|
||||||
|
ReplayStatus
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'OrderBookSnapshot',
|
||||||
|
'PriceLevel',
|
||||||
|
'TradeEvent',
|
||||||
|
'PriceBuckets',
|
||||||
|
'HeatmapData',
|
||||||
|
'HeatmapPoint',
|
||||||
|
'ConnectionStatus',
|
||||||
|
'OrderBookMetrics',
|
||||||
|
'ImbalanceMetrics',
|
||||||
|
'ConsolidatedOrderBook',
|
||||||
|
'ReplayStatus'
|
||||||
|
]
|
||||||
324
COBY/models/core.py
Normal file
324
COBY/models/core.py
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
"""
|
||||||
|
Core data models for the multi-exchange data aggregation system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Dict, Optional, Any
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionStatus(Enum):
|
||||||
|
"""Exchange connection status"""
|
||||||
|
DISCONNECTED = "disconnected"
|
||||||
|
CONNECTING = "connecting"
|
||||||
|
CONNECTED = "connected"
|
||||||
|
RECONNECTING = "reconnecting"
|
||||||
|
ERROR = "error"
|
||||||
|
|
||||||
|
|
||||||
|
class ReplayStatus(Enum):
|
||||||
|
"""Replay session status"""
|
||||||
|
CREATED = "created"
|
||||||
|
RUNNING = "running"
|
||||||
|
PAUSED = "paused"
|
||||||
|
STOPPED = "stopped"
|
||||||
|
COMPLETED = "completed"
|
||||||
|
ERROR = "error"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PriceLevel:
|
||||||
|
"""Individual price level in order book"""
|
||||||
|
price: float
|
||||||
|
size: float
|
||||||
|
count: Optional[int] = None
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate price level data"""
|
||||||
|
if self.price <= 0:
|
||||||
|
raise ValueError("Price must be positive")
|
||||||
|
if self.size < 0:
|
||||||
|
raise ValueError("Size cannot be negative")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OrderBookSnapshot:
|
||||||
|
"""Standardized order book snapshot"""
|
||||||
|
symbol: str
|
||||||
|
exchange: str
|
||||||
|
timestamp: datetime
|
||||||
|
bids: List[PriceLevel]
|
||||||
|
asks: List[PriceLevel]
|
||||||
|
sequence_id: Optional[int] = None
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate and sort order book data"""
|
||||||
|
if not self.symbol:
|
||||||
|
raise ValueError("Symbol cannot be empty")
|
||||||
|
if not self.exchange:
|
||||||
|
raise ValueError("Exchange cannot be empty")
|
||||||
|
|
||||||
|
# Sort bids descending (highest price first)
|
||||||
|
self.bids.sort(key=lambda x: x.price, reverse=True)
|
||||||
|
# Sort asks ascending (lowest price first)
|
||||||
|
self.asks.sort(key=lambda x: x.price)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def mid_price(self) -> Optional[float]:
|
||||||
|
"""Calculate mid price"""
|
||||||
|
if self.bids and self.asks:
|
||||||
|
return (self.bids[0].price + self.asks[0].price) / 2
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def spread(self) -> Optional[float]:
|
||||||
|
"""Calculate bid-ask spread"""
|
||||||
|
if self.bids and self.asks:
|
||||||
|
return self.asks[0].price - self.bids[0].price
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def bid_volume(self) -> float:
|
||||||
|
"""Total bid volume"""
|
||||||
|
return sum(level.size for level in self.bids)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ask_volume(self) -> float:
|
||||||
|
"""Total ask volume"""
|
||||||
|
return sum(level.size for level in self.asks)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TradeEvent:
|
||||||
|
"""Standardized trade event"""
|
||||||
|
symbol: str
|
||||||
|
exchange: str
|
||||||
|
timestamp: datetime
|
||||||
|
price: float
|
||||||
|
size: float
|
||||||
|
side: str # 'buy' or 'sell'
|
||||||
|
trade_id: str
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate trade event data"""
|
||||||
|
if not self.symbol:
|
||||||
|
raise ValueError("Symbol cannot be empty")
|
||||||
|
if not self.exchange:
|
||||||
|
raise ValueError("Exchange cannot be empty")
|
||||||
|
if self.price <= 0:
|
||||||
|
raise ValueError("Price must be positive")
|
||||||
|
if self.size <= 0:
|
||||||
|
raise ValueError("Size must be positive")
|
||||||
|
if self.side not in ['buy', 'sell']:
|
||||||
|
raise ValueError("Side must be 'buy' or 'sell'")
|
||||||
|
if not self.trade_id:
|
||||||
|
raise ValueError("Trade ID cannot be empty")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PriceBuckets:
|
||||||
|
"""Aggregated price buckets for heatmap"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
bucket_size: float
|
||||||
|
bid_buckets: Dict[float, float] = field(default_factory=dict) # price -> volume
|
||||||
|
ask_buckets: Dict[float, float] = field(default_factory=dict) # price -> volume
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate price buckets"""
|
||||||
|
if self.bucket_size <= 0:
|
||||||
|
raise ValueError("Bucket size must be positive")
|
||||||
|
|
||||||
|
def get_bucket_price(self, price: float) -> float:
|
||||||
|
"""Get bucket price for a given price"""
|
||||||
|
return round(price / self.bucket_size) * self.bucket_size
|
||||||
|
|
||||||
|
def add_bid(self, price: float, volume: float):
|
||||||
|
"""Add bid volume to appropriate bucket"""
|
||||||
|
bucket_price = self.get_bucket_price(price)
|
||||||
|
self.bid_buckets[bucket_price] = self.bid_buckets.get(bucket_price, 0) + volume
|
||||||
|
|
||||||
|
def add_ask(self, price: float, volume: float):
|
||||||
|
"""Add ask volume to appropriate bucket"""
|
||||||
|
bucket_price = self.get_bucket_price(price)
|
||||||
|
self.ask_buckets[bucket_price] = self.ask_buckets.get(bucket_price, 0) + volume
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HeatmapPoint:
|
||||||
|
"""Individual heatmap data point"""
|
||||||
|
price: float
|
||||||
|
volume: float
|
||||||
|
intensity: float # 0.0 to 1.0
|
||||||
|
side: str # 'bid' or 'ask'
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate heatmap point"""
|
||||||
|
if self.price <= 0:
|
||||||
|
raise ValueError("Price must be positive")
|
||||||
|
if self.volume < 0:
|
||||||
|
raise ValueError("Volume cannot be negative")
|
||||||
|
if not 0 <= self.intensity <= 1:
|
||||||
|
raise ValueError("Intensity must be between 0 and 1")
|
||||||
|
if self.side not in ['bid', 'ask']:
|
||||||
|
raise ValueError("Side must be 'bid' or 'ask'")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HeatmapData:
|
||||||
|
"""Heatmap visualization data"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
bucket_size: float
|
||||||
|
data: List[HeatmapPoint] = field(default_factory=list)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate heatmap data"""
|
||||||
|
if self.bucket_size <= 0:
|
||||||
|
raise ValueError("Bucket size must be positive")
|
||||||
|
|
||||||
|
def add_point(self, price: float, volume: float, side: str, max_volume: float = None):
|
||||||
|
"""Add a heatmap point with calculated intensity"""
|
||||||
|
if max_volume is None:
|
||||||
|
max_volume = max((point.volume for point in self.data), default=volume)
|
||||||
|
|
||||||
|
intensity = min(volume / max_volume, 1.0) if max_volume > 0 else 0.0
|
||||||
|
point = HeatmapPoint(price=price, volume=volume, intensity=intensity, side=side)
|
||||||
|
self.data.append(point)
|
||||||
|
|
||||||
|
def get_bids(self) -> List[HeatmapPoint]:
|
||||||
|
"""Get bid points sorted by price descending"""
|
||||||
|
bids = [point for point in self.data if point.side == 'bid']
|
||||||
|
return sorted(bids, key=lambda x: x.price, reverse=True)
|
||||||
|
|
||||||
|
def get_asks(self) -> List[HeatmapPoint]:
|
||||||
|
"""Get ask points sorted by price ascending"""
|
||||||
|
asks = [point for point in self.data if point.side == 'ask']
|
||||||
|
return sorted(asks, key=lambda x: x.price)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OrderBookMetrics:
|
||||||
|
"""Order book analysis metrics"""
|
||||||
|
symbol: str
|
||||||
|
exchange: str
|
||||||
|
timestamp: datetime
|
||||||
|
mid_price: float
|
||||||
|
spread: float
|
||||||
|
spread_percentage: float
|
||||||
|
bid_volume: float
|
||||||
|
ask_volume: float
|
||||||
|
volume_imbalance: float # (bid_volume - ask_volume) / (bid_volume + ask_volume)
|
||||||
|
depth_10: float # Volume within 10 price levels
|
||||||
|
depth_50: float # Volume within 50 price levels
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate metrics"""
|
||||||
|
if self.mid_price <= 0:
|
||||||
|
raise ValueError("Mid price must be positive")
|
||||||
|
if self.spread < 0:
|
||||||
|
raise ValueError("Spread cannot be negative")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ImbalanceMetrics:
|
||||||
|
"""Order book imbalance metrics"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
volume_imbalance: float
|
||||||
|
price_imbalance: float
|
||||||
|
depth_imbalance: float
|
||||||
|
momentum_score: float # Derived from recent imbalance changes
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate imbalance metrics"""
|
||||||
|
if not -1 <= self.volume_imbalance <= 1:
|
||||||
|
raise ValueError("Volume imbalance must be between -1 and 1")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ConsolidatedOrderBook:
|
||||||
|
"""Consolidated order book from multiple exchanges"""
|
||||||
|
symbol: str
|
||||||
|
timestamp: datetime
|
||||||
|
exchanges: List[str]
|
||||||
|
bids: List[PriceLevel]
|
||||||
|
asks: List[PriceLevel]
|
||||||
|
weighted_mid_price: float
|
||||||
|
total_bid_volume: float
|
||||||
|
total_ask_volume: float
|
||||||
|
exchange_weights: Dict[str, float] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate consolidated order book"""
|
||||||
|
if not self.exchanges:
|
||||||
|
raise ValueError("At least one exchange must be specified")
|
||||||
|
if self.weighted_mid_price <= 0:
|
||||||
|
raise ValueError("Weighted mid price must be positive")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExchangeStatus:
|
||||||
|
"""Exchange connection and health status"""
|
||||||
|
exchange: str
|
||||||
|
status: ConnectionStatus
|
||||||
|
last_message_time: Optional[datetime] = None
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
connection_count: int = 0
|
||||||
|
uptime_percentage: float = 0.0
|
||||||
|
message_rate: float = 0.0 # Messages per second
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate exchange status"""
|
||||||
|
if not self.exchange:
|
||||||
|
raise ValueError("Exchange name cannot be empty")
|
||||||
|
if not 0 <= self.uptime_percentage <= 100:
|
||||||
|
raise ValueError("Uptime percentage must be between 0 and 100")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SystemMetrics:
|
||||||
|
"""System performance metrics"""
|
||||||
|
timestamp: datetime
|
||||||
|
cpu_usage: float
|
||||||
|
memory_usage: float
|
||||||
|
disk_usage: float
|
||||||
|
network_io: Dict[str, float] = field(default_factory=dict)
|
||||||
|
database_connections: int = 0
|
||||||
|
redis_connections: int = 0
|
||||||
|
active_websockets: int = 0
|
||||||
|
messages_per_second: float = 0.0
|
||||||
|
processing_latency: float = 0.0 # Milliseconds
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate system metrics"""
|
||||||
|
if not 0 <= self.cpu_usage <= 100:
|
||||||
|
raise ValueError("CPU usage must be between 0 and 100")
|
||||||
|
if not 0 <= self.memory_usage <= 100:
|
||||||
|
raise ValueError("Memory usage must be between 0 and 100")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ReplaySession:
|
||||||
|
"""Historical data replay session"""
|
||||||
|
session_id: str
|
||||||
|
start_time: datetime
|
||||||
|
end_time: datetime
|
||||||
|
speed: float # Playback speed multiplier
|
||||||
|
status: ReplayStatus
|
||||||
|
current_time: Optional[datetime] = None
|
||||||
|
progress: float = 0.0 # 0.0 to 1.0
|
||||||
|
symbols: List[str] = field(default_factory=list)
|
||||||
|
exchanges: List[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate replay session"""
|
||||||
|
if not self.session_id:
|
||||||
|
raise ValueError("Session ID cannot be empty")
|
||||||
|
if self.start_time >= self.end_time:
|
||||||
|
raise ValueError("Start time must be before end time")
|
||||||
|
if self.speed <= 0:
|
||||||
|
raise ValueError("Speed must be positive")
|
||||||
|
if not 0 <= self.progress <= 1:
|
||||||
|
raise ValueError("Progress must be between 0 and 1")
|
||||||
6
COBY/monitoring/__init__.py
Normal file
6
COBY/monitoring/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
"""
|
||||||
|
Performance monitoring and optimization module.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Simplified imports to avoid circular dependencies
|
||||||
|
__all__ = []
|
||||||
675
COBY/monitoring/alert_manager.py
Normal file
675
COBY/monitoring/alert_manager.py
Normal file
@@ -0,0 +1,675 @@
|
|||||||
|
"""
|
||||||
|
Alert management system for performance degradation and system issues.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import smtplib
|
||||||
|
import json
|
||||||
|
from typing import Dict, List, Optional, Any, Callable
|
||||||
|
from collections import defaultdict, deque
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
from email.mime.multipart import MIMEMultipart
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.timing import get_current_timestamp
|
||||||
|
except ImportError:
|
||||||
|
from utils.logging import get_logger
|
||||||
|
from utils.timing import get_current_timestamp
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AlertSeverity(Enum):
|
||||||
|
"""Alert severity levels"""
|
||||||
|
INFO = "info"
|
||||||
|
WARNING = "warning"
|
||||||
|
CRITICAL = "critical"
|
||||||
|
|
||||||
|
|
||||||
|
class AlertStatus(Enum):
|
||||||
|
"""Alert status"""
|
||||||
|
ACTIVE = "active"
|
||||||
|
RESOLVED = "resolved"
|
||||||
|
ACKNOWLEDGED = "acknowledged"
|
||||||
|
SUPPRESSED = "suppressed"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Alert:
|
||||||
|
"""Alert definition"""
|
||||||
|
id: str
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
severity: AlertSeverity
|
||||||
|
metric_name: str
|
||||||
|
threshold: float
|
||||||
|
comparison: str # 'gt', 'lt', 'eq', 'ne'
|
||||||
|
duration_seconds: int
|
||||||
|
status: AlertStatus = AlertStatus.ACTIVE
|
||||||
|
triggered_at: Optional[datetime] = None
|
||||||
|
resolved_at: Optional[datetime] = None
|
||||||
|
acknowledged_at: Optional[datetime] = None
|
||||||
|
acknowledged_by: Optional[str] = None
|
||||||
|
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
"""Convert alert to dictionary"""
|
||||||
|
return {
|
||||||
|
'id': self.id,
|
||||||
|
'name': self.name,
|
||||||
|
'description': self.description,
|
||||||
|
'severity': self.severity.value,
|
||||||
|
'metric_name': self.metric_name,
|
||||||
|
'threshold': self.threshold,
|
||||||
|
'comparison': self.comparison,
|
||||||
|
'duration_seconds': self.duration_seconds,
|
||||||
|
'status': self.status.value,
|
||||||
|
'triggered_at': self.triggered_at.isoformat() if self.triggered_at else None,
|
||||||
|
'resolved_at': self.resolved_at.isoformat() if self.resolved_at else None,
|
||||||
|
'acknowledged_at': self.acknowledged_at.isoformat() if self.acknowledged_at else None,
|
||||||
|
'acknowledged_by': self.acknowledged_by,
|
||||||
|
'metadata': self.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AlertRule:
|
||||||
|
"""Alert rule configuration"""
|
||||||
|
name: str
|
||||||
|
metric_name: str
|
||||||
|
threshold: float
|
||||||
|
comparison: str
|
||||||
|
duration_seconds: int
|
||||||
|
severity: AlertSeverity
|
||||||
|
description: str = ""
|
||||||
|
enabled: bool = True
|
||||||
|
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class NotificationChannel:
|
||||||
|
"""Notification channel configuration"""
|
||||||
|
name: str
|
||||||
|
type: str # 'email', 'webhook', 'slack'
|
||||||
|
config: Dict[str, Any]
|
||||||
|
enabled: bool = True
|
||||||
|
severity_filter: List[AlertSeverity] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
class AlertManager:
|
||||||
|
"""
|
||||||
|
Manages alerts, notifications, and alert lifecycle.
|
||||||
|
|
||||||
|
Provides comprehensive alerting with multiple notification channels
|
||||||
|
and alert suppression capabilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize alert manager"""
|
||||||
|
# Alert storage
|
||||||
|
self.alert_rules: Dict[str, AlertRule] = {}
|
||||||
|
self.active_alerts: Dict[str, Alert] = {}
|
||||||
|
self.alert_history: deque = deque(maxlen=10000)
|
||||||
|
|
||||||
|
# Notification channels
|
||||||
|
self.notification_channels: Dict[str, NotificationChannel] = {}
|
||||||
|
|
||||||
|
# Alert state tracking
|
||||||
|
self.metric_values: Dict[str, deque] = defaultdict(lambda: deque(maxlen=100))
|
||||||
|
self.alert_triggers: Dict[str, datetime] = {}
|
||||||
|
|
||||||
|
# Suppression rules
|
||||||
|
self.suppression_rules: Dict[str, Dict[str, Any]] = {}
|
||||||
|
|
||||||
|
# Callbacks
|
||||||
|
self.alert_callbacks: List[Callable[[Alert], None]] = []
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.alerts_triggered = 0
|
||||||
|
self.alerts_resolved = 0
|
||||||
|
self.notifications_sent = 0
|
||||||
|
|
||||||
|
logger.info("Alert manager initialized")
|
||||||
|
|
||||||
|
def add_alert_rule(self, rule: AlertRule) -> None:
|
||||||
|
"""Add an alert rule"""
|
||||||
|
self.alert_rules[rule.name] = rule
|
||||||
|
logger.info(f"Added alert rule: {rule.name}")
|
||||||
|
|
||||||
|
def remove_alert_rule(self, rule_name: str) -> None:
|
||||||
|
"""Remove an alert rule"""
|
||||||
|
if rule_name in self.alert_rules:
|
||||||
|
del self.alert_rules[rule_name]
|
||||||
|
# Also remove any active alerts for this rule
|
||||||
|
alerts_to_remove = [
|
||||||
|
alert_id for alert_id, alert in self.active_alerts.items()
|
||||||
|
if alert.name == rule_name
|
||||||
|
]
|
||||||
|
for alert_id in alerts_to_remove:
|
||||||
|
del self.active_alerts[alert_id]
|
||||||
|
logger.info(f"Removed alert rule: {rule_name}")
|
||||||
|
|
||||||
|
def add_notification_channel(self, channel: NotificationChannel) -> None:
|
||||||
|
"""Add a notification channel"""
|
||||||
|
self.notification_channels[channel.name] = channel
|
||||||
|
logger.info(f"Added notification channel: {channel.name} ({channel.type})")
|
||||||
|
|
||||||
|
def remove_notification_channel(self, channel_name: str) -> None:
|
||||||
|
"""Remove a notification channel"""
|
||||||
|
if channel_name in self.notification_channels:
|
||||||
|
del self.notification_channels[channel_name]
|
||||||
|
logger.info(f"Removed notification channel: {channel_name}")
|
||||||
|
|
||||||
|
def update_metric_value(self, metric_name: str, value: float) -> None:
|
||||||
|
"""Update metric value and check alerts"""
|
||||||
|
timestamp = get_current_timestamp()
|
||||||
|
self.metric_values[metric_name].append((timestamp, value))
|
||||||
|
|
||||||
|
# Check all alert rules for this metric
|
||||||
|
for rule_name, rule in self.alert_rules.items():
|
||||||
|
if rule.metric_name == metric_name and rule.enabled:
|
||||||
|
self._check_alert_rule(rule, value, timestamp)
|
||||||
|
|
||||||
|
def _check_alert_rule(self, rule: AlertRule, value: float, timestamp: datetime) -> None:
|
||||||
|
"""Check if an alert rule should be triggered"""
|
||||||
|
try:
|
||||||
|
# Check if condition is met
|
||||||
|
condition_met = self._evaluate_condition(rule.comparison, value, rule.threshold)
|
||||||
|
|
||||||
|
alert_id = f"{rule.name}_{rule.metric_name}"
|
||||||
|
|
||||||
|
if condition_met:
|
||||||
|
# Check if we need to wait for duration
|
||||||
|
if alert_id not in self.alert_triggers:
|
||||||
|
self.alert_triggers[alert_id] = timestamp
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check if duration has passed
|
||||||
|
trigger_time = self.alert_triggers[alert_id]
|
||||||
|
if (timestamp - trigger_time).total_seconds() >= rule.duration_seconds:
|
||||||
|
# Trigger alert if not already active
|
||||||
|
if alert_id not in self.active_alerts:
|
||||||
|
self._trigger_alert(rule, value, timestamp)
|
||||||
|
else:
|
||||||
|
# Condition not met - clear trigger time and resolve alert if active
|
||||||
|
self.alert_triggers.pop(alert_id, None)
|
||||||
|
if alert_id in self.active_alerts:
|
||||||
|
self._resolve_alert(alert_id, timestamp)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error checking alert rule {rule.name}: {e}")
|
||||||
|
|
||||||
|
def _evaluate_condition(self, comparison: str, value: float, threshold: float) -> bool:
|
||||||
|
"""Evaluate alert condition"""
|
||||||
|
if comparison == 'gt':
|
||||||
|
return value > threshold
|
||||||
|
elif comparison == 'lt':
|
||||||
|
return value < threshold
|
||||||
|
elif comparison == 'eq':
|
||||||
|
return abs(value - threshold) < 0.001
|
||||||
|
elif comparison == 'ne':
|
||||||
|
return abs(value - threshold) >= 0.001
|
||||||
|
elif comparison == 'gte':
|
||||||
|
return value >= threshold
|
||||||
|
elif comparison == 'lte':
|
||||||
|
return value <= threshold
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unknown comparison operator: {comparison}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _trigger_alert(self, rule: AlertRule, value: float, timestamp: datetime) -> None:
|
||||||
|
"""Trigger an alert"""
|
||||||
|
try:
|
||||||
|
alert_id = f"{rule.name}_{rule.metric_name}"
|
||||||
|
|
||||||
|
# Create alert
|
||||||
|
alert = Alert(
|
||||||
|
id=alert_id,
|
||||||
|
name=rule.name,
|
||||||
|
description=rule.description or f"{rule.metric_name} {rule.comparison} {rule.threshold}",
|
||||||
|
severity=rule.severity,
|
||||||
|
metric_name=rule.metric_name,
|
||||||
|
threshold=rule.threshold,
|
||||||
|
comparison=rule.comparison,
|
||||||
|
duration_seconds=rule.duration_seconds,
|
||||||
|
triggered_at=timestamp,
|
||||||
|
metadata={
|
||||||
|
'current_value': value,
|
||||||
|
'rule_metadata': rule.metadata
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check suppression rules
|
||||||
|
if self._is_suppressed(alert):
|
||||||
|
alert.status = AlertStatus.SUPPRESSED
|
||||||
|
logger.info(f"Alert suppressed: {alert.name}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Store alert
|
||||||
|
self.active_alerts[alert_id] = alert
|
||||||
|
self.alert_history.append(alert)
|
||||||
|
self.alerts_triggered += 1
|
||||||
|
|
||||||
|
logger.warning(f"Alert triggered: {alert.name} - {alert.description}")
|
||||||
|
|
||||||
|
# Send notifications
|
||||||
|
self._send_notifications(alert)
|
||||||
|
|
||||||
|
# Call callbacks
|
||||||
|
for callback in self.alert_callbacks:
|
||||||
|
try:
|
||||||
|
callback(alert)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in alert callback: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error triggering alert: {e}")
|
||||||
|
|
||||||
|
def _resolve_alert(self, alert_id: str, timestamp: datetime) -> None:
|
||||||
|
"""Resolve an alert"""
|
||||||
|
try:
|
||||||
|
if alert_id in self.active_alerts:
|
||||||
|
alert = self.active_alerts[alert_id]
|
||||||
|
alert.status = AlertStatus.RESOLVED
|
||||||
|
alert.resolved_at = timestamp
|
||||||
|
|
||||||
|
# Move to history and remove from active
|
||||||
|
self.alert_history.append(alert)
|
||||||
|
del self.active_alerts[alert_id]
|
||||||
|
self.alerts_resolved += 1
|
||||||
|
|
||||||
|
logger.info(f"Alert resolved: {alert.name}")
|
||||||
|
|
||||||
|
# Send resolution notifications
|
||||||
|
self._send_resolution_notifications(alert)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error resolving alert {alert_id}: {e}")
|
||||||
|
|
||||||
|
def _is_suppressed(self, alert: Alert) -> bool:
|
||||||
|
"""Check if alert should be suppressed"""
|
||||||
|
for rule_name, rule in self.suppression_rules.items():
|
||||||
|
try:
|
||||||
|
# Check if suppression rule applies
|
||||||
|
if self._matches_suppression_rule(alert, rule):
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error checking suppression rule {rule_name}: {e}")
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _matches_suppression_rule(self, alert: Alert, rule: Dict[str, Any]) -> bool:
|
||||||
|
"""Check if alert matches suppression rule"""
|
||||||
|
# Check alert name pattern
|
||||||
|
if 'alert_pattern' in rule:
|
||||||
|
import re
|
||||||
|
if not re.match(rule['alert_pattern'], alert.name):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check severity
|
||||||
|
if 'severity' in rule:
|
||||||
|
if alert.severity.value not in rule['severity']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check time window
|
||||||
|
if 'time_window' in rule:
|
||||||
|
start_time = datetime.fromisoformat(rule['time_window']['start'])
|
||||||
|
end_time = datetime.fromisoformat(rule['time_window']['end'])
|
||||||
|
current_time = get_current_timestamp()
|
||||||
|
|
||||||
|
if not (start_time <= current_time <= end_time):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _send_notifications(self, alert: Alert) -> None:
|
||||||
|
"""Send notifications for an alert"""
|
||||||
|
for channel_name, channel in self.notification_channels.items():
|
||||||
|
try:
|
||||||
|
if not channel.enabled:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check severity filter
|
||||||
|
if channel.severity_filter and alert.severity not in channel.severity_filter:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Send notification based on channel type
|
||||||
|
if channel.type == 'email':
|
||||||
|
self._send_email_notification(alert, channel)
|
||||||
|
elif channel.type == 'webhook':
|
||||||
|
self._send_webhook_notification(alert, channel)
|
||||||
|
elif channel.type == 'slack':
|
||||||
|
self._send_slack_notification(alert, channel)
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unknown notification channel type: {channel.type}")
|
||||||
|
|
||||||
|
self.notifications_sent += 1
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending notification via {channel_name}: {e}")
|
||||||
|
|
||||||
|
def _send_resolution_notifications(self, alert: Alert) -> None:
|
||||||
|
"""Send resolution notifications"""
|
||||||
|
for channel_name, channel in self.notification_channels.items():
|
||||||
|
try:
|
||||||
|
if not channel.enabled:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Send resolution notification
|
||||||
|
if channel.type == 'email':
|
||||||
|
self._send_email_resolution(alert, channel)
|
||||||
|
elif channel.type == 'webhook':
|
||||||
|
self._send_webhook_resolution(alert, channel)
|
||||||
|
elif channel.type == 'slack':
|
||||||
|
self._send_slack_resolution(alert, channel)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending resolution notification via {channel_name}: {e}")
|
||||||
|
|
||||||
|
def _send_email_notification(self, alert: Alert, channel: NotificationChannel) -> None:
|
||||||
|
"""Send email notification"""
|
||||||
|
try:
|
||||||
|
config = channel.config
|
||||||
|
|
||||||
|
# Create message
|
||||||
|
msg = MIMEMultipart()
|
||||||
|
msg['From'] = config['from_email']
|
||||||
|
msg['To'] = ', '.join(config['to_emails'])
|
||||||
|
msg['Subject'] = f"[{alert.severity.value.upper()}] {alert.name}"
|
||||||
|
|
||||||
|
# Create body
|
||||||
|
body = f"""
|
||||||
|
Alert: {alert.name}
|
||||||
|
Severity: {alert.severity.value.upper()}
|
||||||
|
Description: {alert.description}
|
||||||
|
Metric: {alert.metric_name}
|
||||||
|
Current Value: {alert.metadata.get('current_value', 'N/A')}
|
||||||
|
Threshold: {alert.threshold}
|
||||||
|
Triggered At: {alert.triggered_at.isoformat() if alert.triggered_at else 'N/A'}
|
||||||
|
|
||||||
|
Alert ID: {alert.id}
|
||||||
|
"""
|
||||||
|
|
||||||
|
msg.attach(MIMEText(body, 'plain'))
|
||||||
|
|
||||||
|
# Send email
|
||||||
|
with smtplib.SMTP(config['smtp_server'], config['smtp_port']) as server:
|
||||||
|
if config.get('use_tls', True):
|
||||||
|
server.starttls()
|
||||||
|
if 'username' in config and 'password' in config:
|
||||||
|
server.login(config['username'], config['password'])
|
||||||
|
server.send_message(msg)
|
||||||
|
|
||||||
|
logger.info(f"Email notification sent for alert: {alert.name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending email notification: {e}")
|
||||||
|
|
||||||
|
def _send_webhook_notification(self, alert: Alert, channel: NotificationChannel) -> None:
|
||||||
|
"""Send webhook notification"""
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
|
||||||
|
config = channel.config
|
||||||
|
payload = {
|
||||||
|
'alert': alert.to_dict(),
|
||||||
|
'type': 'alert_triggered'
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
config['url'],
|
||||||
|
json=payload,
|
||||||
|
headers=config.get('headers', {}),
|
||||||
|
timeout=config.get('timeout', 10)
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
logger.info(f"Webhook notification sent for alert: {alert.name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending webhook notification: {e}")
|
||||||
|
|
||||||
|
def _send_slack_notification(self, alert: Alert, channel: NotificationChannel) -> None:
|
||||||
|
"""Send Slack notification"""
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
|
||||||
|
config = channel.config
|
||||||
|
|
||||||
|
# Create Slack message
|
||||||
|
color = {
|
||||||
|
AlertSeverity.INFO: 'good',
|
||||||
|
AlertSeverity.WARNING: 'warning',
|
||||||
|
AlertSeverity.CRITICAL: 'danger'
|
||||||
|
}.get(alert.severity, 'warning')
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
'channel': config['channel'],
|
||||||
|
'username': config.get('username', 'AlertBot'),
|
||||||
|
'attachments': [{
|
||||||
|
'color': color,
|
||||||
|
'title': f"{alert.severity.value.upper()}: {alert.name}",
|
||||||
|
'text': alert.description,
|
||||||
|
'fields': [
|
||||||
|
{'title': 'Metric', 'value': alert.metric_name, 'short': True},
|
||||||
|
{'title': 'Current Value', 'value': str(alert.metadata.get('current_value', 'N/A')), 'short': True},
|
||||||
|
{'title': 'Threshold', 'value': str(alert.threshold), 'short': True},
|
||||||
|
{'title': 'Triggered At', 'value': alert.triggered_at.isoformat() if alert.triggered_at else 'N/A', 'short': True}
|
||||||
|
],
|
||||||
|
'footer': f"Alert ID: {alert.id}"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
config['webhook_url'],
|
||||||
|
json=payload,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
logger.info(f"Slack notification sent for alert: {alert.name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending Slack notification: {e}")
|
||||||
|
|
||||||
|
def _send_email_resolution(self, alert: Alert, channel: NotificationChannel) -> None:
|
||||||
|
"""Send email resolution notification"""
|
||||||
|
try:
|
||||||
|
config = channel.config
|
||||||
|
|
||||||
|
# Create message
|
||||||
|
msg = MIMEMultipart()
|
||||||
|
msg['From'] = config['from_email']
|
||||||
|
msg['To'] = ', '.join(config['to_emails'])
|
||||||
|
msg['Subject'] = f"[RESOLVED] {alert.name}"
|
||||||
|
|
||||||
|
# Create body
|
||||||
|
duration = ""
|
||||||
|
if alert.triggered_at and alert.resolved_at:
|
||||||
|
duration = str(alert.resolved_at - alert.triggered_at)
|
||||||
|
|
||||||
|
body = f"""
|
||||||
|
Alert RESOLVED: {alert.name}
|
||||||
|
Severity: {alert.severity.value.upper()}
|
||||||
|
Description: {alert.description}
|
||||||
|
Metric: {alert.metric_name}
|
||||||
|
Threshold: {alert.threshold}
|
||||||
|
Triggered At: {alert.triggered_at.isoformat() if alert.triggered_at else 'N/A'}
|
||||||
|
Resolved At: {alert.resolved_at.isoformat() if alert.resolved_at else 'N/A'}
|
||||||
|
Duration: {duration}
|
||||||
|
|
||||||
|
Alert ID: {alert.id}
|
||||||
|
"""
|
||||||
|
|
||||||
|
msg.attach(MIMEText(body, 'plain'))
|
||||||
|
|
||||||
|
# Send email
|
||||||
|
with smtplib.SMTP(config['smtp_server'], config['smtp_port']) as server:
|
||||||
|
if config.get('use_tls', True):
|
||||||
|
server.starttls()
|
||||||
|
if 'username' in config and 'password' in config:
|
||||||
|
server.login(config['username'], config['password'])
|
||||||
|
server.send_message(msg)
|
||||||
|
|
||||||
|
logger.info(f"Email resolution notification sent for alert: {alert.name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending email resolution notification: {e}")
|
||||||
|
|
||||||
|
def _send_webhook_resolution(self, alert: Alert, channel: NotificationChannel) -> None:
|
||||||
|
"""Send webhook resolution notification"""
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
|
||||||
|
config = channel.config
|
||||||
|
payload = {
|
||||||
|
'alert': alert.to_dict(),
|
||||||
|
'type': 'alert_resolved'
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
config['url'],
|
||||||
|
json=payload,
|
||||||
|
headers=config.get('headers', {}),
|
||||||
|
timeout=config.get('timeout', 10)
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
logger.info(f"Webhook resolution notification sent for alert: {alert.name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending webhook resolution notification: {e}")
|
||||||
|
|
||||||
|
def _send_slack_resolution(self, alert: Alert, channel: NotificationChannel) -> None:
|
||||||
|
"""Send Slack resolution notification"""
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
|
||||||
|
config = channel.config
|
||||||
|
|
||||||
|
duration = ""
|
||||||
|
if alert.triggered_at and alert.resolved_at:
|
||||||
|
duration = str(alert.resolved_at - alert.triggered_at)
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
'channel': config['channel'],
|
||||||
|
'username': config.get('username', 'AlertBot'),
|
||||||
|
'attachments': [{
|
||||||
|
'color': 'good',
|
||||||
|
'title': f"RESOLVED: {alert.name}",
|
||||||
|
'text': f"Alert has been resolved: {alert.description}",
|
||||||
|
'fields': [
|
||||||
|
{'title': 'Duration', 'value': duration, 'short': True},
|
||||||
|
{'title': 'Resolved At', 'value': alert.resolved_at.isoformat() if alert.resolved_at else 'N/A', 'short': True}
|
||||||
|
],
|
||||||
|
'footer': f"Alert ID: {alert.id}"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
config['webhook_url'],
|
||||||
|
json=payload,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
logger.info(f"Slack resolution notification sent for alert: {alert.name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error sending Slack resolution notification: {e}")
|
||||||
|
|
||||||
|
def acknowledge_alert(self, alert_id: str, acknowledged_by: str) -> bool:
|
||||||
|
"""Acknowledge an alert"""
|
||||||
|
if alert_id in self.active_alerts:
|
||||||
|
alert = self.active_alerts[alert_id]
|
||||||
|
alert.status = AlertStatus.ACKNOWLEDGED
|
||||||
|
alert.acknowledged_at = get_current_timestamp()
|
||||||
|
alert.acknowledged_by = acknowledged_by
|
||||||
|
|
||||||
|
logger.info(f"Alert acknowledged by {acknowledged_by}: {alert.name}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def suppress_alert(self, alert_id: str) -> bool:
|
||||||
|
"""Suppress an alert"""
|
||||||
|
if alert_id in self.active_alerts:
|
||||||
|
alert = self.active_alerts[alert_id]
|
||||||
|
alert.status = AlertStatus.SUPPRESSED
|
||||||
|
|
||||||
|
logger.info(f"Alert suppressed: {alert.name}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def add_suppression_rule(self, name: str, rule: Dict[str, Any]) -> None:
|
||||||
|
"""Add alert suppression rule"""
|
||||||
|
self.suppression_rules[name] = rule
|
||||||
|
logger.info(f"Added suppression rule: {name}")
|
||||||
|
|
||||||
|
def remove_suppression_rule(self, name: str) -> None:
|
||||||
|
"""Remove alert suppression rule"""
|
||||||
|
if name in self.suppression_rules:
|
||||||
|
del self.suppression_rules[name]
|
||||||
|
logger.info(f"Removed suppression rule: {name}")
|
||||||
|
|
||||||
|
def get_active_alerts(self, severity: AlertSeverity = None) -> List[Alert]:
|
||||||
|
"""Get active alerts, optionally filtered by severity"""
|
||||||
|
alerts = list(self.active_alerts.values())
|
||||||
|
|
||||||
|
if severity:
|
||||||
|
alerts = [alert for alert in alerts if alert.severity == severity]
|
||||||
|
|
||||||
|
return sorted(alerts, key=lambda x: x.triggered_at or datetime.min, reverse=True)
|
||||||
|
|
||||||
|
def get_alert_history(self, limit: int = 100, severity: AlertSeverity = None) -> List[Alert]:
|
||||||
|
"""Get alert history"""
|
||||||
|
alerts = list(self.alert_history)
|
||||||
|
|
||||||
|
if severity:
|
||||||
|
alerts = [alert for alert in alerts if alert.severity == severity]
|
||||||
|
|
||||||
|
return sorted(alerts, key=lambda x: x.triggered_at or datetime.min, reverse=True)[:limit]
|
||||||
|
|
||||||
|
def get_alert_summary(self) -> Dict[str, Any]:
|
||||||
|
"""Get alert summary statistics"""
|
||||||
|
active_by_severity = defaultdict(int)
|
||||||
|
for alert in self.active_alerts.values():
|
||||||
|
active_by_severity[alert.severity.value] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
'active_alerts': len(self.active_alerts),
|
||||||
|
'active_by_severity': dict(active_by_severity),
|
||||||
|
'total_triggered': self.alerts_triggered,
|
||||||
|
'total_resolved': self.alerts_resolved,
|
||||||
|
'notifications_sent': self.notifications_sent,
|
||||||
|
'alert_rules': len(self.alert_rules),
|
||||||
|
'notification_channels': len(self.notification_channels),
|
||||||
|
'suppression_rules': len(self.suppression_rules)
|
||||||
|
}
|
||||||
|
|
||||||
|
def register_callback(self, callback: Callable[[Alert], None]) -> None:
|
||||||
|
"""Register alert callback"""
|
||||||
|
self.alert_callbacks.append(callback)
|
||||||
|
logger.info(f"Registered alert callback: {callback.__name__}")
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get alert manager statistics"""
|
||||||
|
return {
|
||||||
|
'alert_rules': len(self.alert_rules),
|
||||||
|
'active_alerts': len(self.active_alerts),
|
||||||
|
'alert_history_count': len(self.alert_history),
|
||||||
|
'notification_channels': len(self.notification_channels),
|
||||||
|
'suppression_rules': len(self.suppression_rules),
|
||||||
|
'alerts_triggered': self.alerts_triggered,
|
||||||
|
'alerts_resolved': self.alerts_resolved,
|
||||||
|
'notifications_sent': self.notifications_sent,
|
||||||
|
'registered_callbacks': len(self.alert_callbacks)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global alert manager instance
|
||||||
|
alert_manager = AlertManager()
|
||||||
501
COBY/monitoring/latency_tracker.py
Normal file
501
COBY/monitoring/latency_tracker.py
Normal file
@@ -0,0 +1,501 @@
|
|||||||
|
"""
|
||||||
|
End-to-end latency tracking for data processing pipeline.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
from typing import Dict, List, Optional, Any, ContextManager
|
||||||
|
from collections import defaultdict, deque
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ..utils.logging import get_logger, set_correlation_id
|
||||||
|
from ..utils.timing import get_current_timestamp
|
||||||
|
except ImportError:
|
||||||
|
from utils.logging import get_logger, set_correlation_id
|
||||||
|
from utils.timing import get_current_timestamp
|
||||||
|
# Import will be done lazily to avoid circular imports
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LatencyMeasurement:
|
||||||
|
"""Individual latency measurement"""
|
||||||
|
operation: str
|
||||||
|
start_time: float
|
||||||
|
end_time: float
|
||||||
|
duration_ms: float
|
||||||
|
correlation_id: Optional[str] = None
|
||||||
|
metadata: Dict[str, Any] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def duration_seconds(self) -> float:
|
||||||
|
"""Get duration in seconds"""
|
||||||
|
return self.duration_ms / 1000.0
|
||||||
|
|
||||||
|
|
||||||
|
class LatencyTracker:
|
||||||
|
"""
|
||||||
|
Tracks end-to-end latency for various operations in the system.
|
||||||
|
|
||||||
|
Provides context managers for easy latency measurement and
|
||||||
|
comprehensive latency analytics.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, max_measurements: int = 10000):
|
||||||
|
"""
|
||||||
|
Initialize latency tracker.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
max_measurements: Maximum number of measurements to keep in memory
|
||||||
|
"""
|
||||||
|
self.max_measurements = max_measurements
|
||||||
|
|
||||||
|
# Latency storage
|
||||||
|
self.measurements: Dict[str, deque] = defaultdict(
|
||||||
|
lambda: deque(maxlen=max_measurements)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Active measurements (for tracking ongoing operations)
|
||||||
|
self.active_measurements: Dict[str, Dict[str, float]] = defaultdict(dict)
|
||||||
|
|
||||||
|
# Thread safety
|
||||||
|
self._lock = threading.RLock()
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.total_measurements = 0
|
||||||
|
|
||||||
|
logger.info(f"Latency tracker initialized with max {max_measurements} measurements")
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def measure(self, operation: str, correlation_id: str = None,
|
||||||
|
metadata: Dict[str, Any] = None) -> ContextManager[None]:
|
||||||
|
"""
|
||||||
|
Context manager for measuring operation latency.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Name of the operation being measured
|
||||||
|
correlation_id: Optional correlation ID for tracking
|
||||||
|
metadata: Optional metadata to store with measurement
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
with latency_tracker.measure('data_processing'):
|
||||||
|
# Your code here
|
||||||
|
process_data()
|
||||||
|
"""
|
||||||
|
start_time = time.perf_counter()
|
||||||
|
measurement_id = f"{operation}_{start_time}_{threading.get_ident()}"
|
||||||
|
|
||||||
|
# Store active measurement
|
||||||
|
with self._lock:
|
||||||
|
self.active_measurements[operation][measurement_id] = start_time
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
end_time = time.perf_counter()
|
||||||
|
duration_ms = (end_time - start_time) * 1000
|
||||||
|
|
||||||
|
# Create measurement
|
||||||
|
measurement = LatencyMeasurement(
|
||||||
|
operation=operation,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
duration_ms=duration_ms,
|
||||||
|
correlation_id=correlation_id,
|
||||||
|
metadata=metadata or {}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store measurement
|
||||||
|
with self._lock:
|
||||||
|
self.measurements[operation].append(measurement)
|
||||||
|
self.active_measurements[operation].pop(measurement_id, None)
|
||||||
|
self.total_measurements += 1
|
||||||
|
|
||||||
|
# Record in metrics collector
|
||||||
|
try:
|
||||||
|
from .metrics_collector import metrics_collector
|
||||||
|
metrics_collector.observe_histogram(
|
||||||
|
f"{operation}_latency_ms",
|
||||||
|
duration_ms,
|
||||||
|
labels={'operation': operation}
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
pass # Metrics collector not available
|
||||||
|
|
||||||
|
logger.debug(f"Measured {operation}: {duration_ms:.2f}ms")
|
||||||
|
|
||||||
|
def start_measurement(self, operation: str, measurement_id: str = None,
|
||||||
|
correlation_id: str = None) -> str:
|
||||||
|
"""
|
||||||
|
Start a manual latency measurement.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Name of the operation
|
||||||
|
measurement_id: Optional custom measurement ID
|
||||||
|
correlation_id: Optional correlation ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Measurement ID for ending the measurement
|
||||||
|
"""
|
||||||
|
start_time = time.perf_counter()
|
||||||
|
|
||||||
|
if measurement_id is None:
|
||||||
|
measurement_id = f"{operation}_{start_time}_{threading.get_ident()}"
|
||||||
|
|
||||||
|
with self._lock:
|
||||||
|
self.active_measurements[operation][measurement_id] = start_time
|
||||||
|
|
||||||
|
logger.debug(f"Started measurement {measurement_id} for {operation}")
|
||||||
|
return measurement_id
|
||||||
|
|
||||||
|
def end_measurement(self, operation: str, measurement_id: str,
|
||||||
|
metadata: Dict[str, Any] = None) -> Optional[LatencyMeasurement]:
|
||||||
|
"""
|
||||||
|
End a manual latency measurement.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Name of the operation
|
||||||
|
measurement_id: Measurement ID from start_measurement
|
||||||
|
metadata: Optional metadata to store
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
LatencyMeasurement: The completed measurement, or None if not found
|
||||||
|
"""
|
||||||
|
end_time = time.perf_counter()
|
||||||
|
|
||||||
|
with self._lock:
|
||||||
|
start_time = self.active_measurements[operation].pop(measurement_id, None)
|
||||||
|
|
||||||
|
if start_time is None:
|
||||||
|
logger.warning(f"No active measurement found: {measurement_id}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
duration_ms = (end_time - start_time) * 1000
|
||||||
|
|
||||||
|
# Create measurement
|
||||||
|
measurement = LatencyMeasurement(
|
||||||
|
operation=operation,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
duration_ms=duration_ms,
|
||||||
|
metadata=metadata or {}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store measurement
|
||||||
|
with self._lock:
|
||||||
|
self.measurements[operation].append(measurement)
|
||||||
|
self.total_measurements += 1
|
||||||
|
|
||||||
|
# Record in metrics collector
|
||||||
|
try:
|
||||||
|
from .metrics_collector import metrics_collector
|
||||||
|
metrics_collector.observe_histogram(
|
||||||
|
f"{operation}_latency_ms",
|
||||||
|
duration_ms,
|
||||||
|
labels={'operation': operation}
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
pass # Metrics collector not available
|
||||||
|
|
||||||
|
logger.debug(f"Completed measurement {measurement_id}: {duration_ms:.2f}ms")
|
||||||
|
return measurement
|
||||||
|
|
||||||
|
def get_latency_stats(self, operation: str) -> Dict[str, float]:
|
||||||
|
"""
|
||||||
|
Get latency statistics for an operation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Operation name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Latency statistics
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
measurements = list(self.measurements[operation])
|
||||||
|
|
||||||
|
if not measurements:
|
||||||
|
return {
|
||||||
|
'count': 0,
|
||||||
|
'avg_ms': 0.0,
|
||||||
|
'min_ms': 0.0,
|
||||||
|
'max_ms': 0.0,
|
||||||
|
'p50_ms': 0.0,
|
||||||
|
'p95_ms': 0.0,
|
||||||
|
'p99_ms': 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
durations = [m.duration_ms for m in measurements]
|
||||||
|
durations.sort()
|
||||||
|
|
||||||
|
count = len(durations)
|
||||||
|
avg_ms = sum(durations) / count
|
||||||
|
min_ms = durations[0]
|
||||||
|
max_ms = durations[-1]
|
||||||
|
|
||||||
|
# Calculate percentiles
|
||||||
|
p50_ms = durations[int(0.50 * count)]
|
||||||
|
p95_ms = durations[int(0.95 * count)]
|
||||||
|
p99_ms = durations[int(0.99 * count)]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'count': count,
|
||||||
|
'avg_ms': avg_ms,
|
||||||
|
'min_ms': min_ms,
|
||||||
|
'max_ms': max_ms,
|
||||||
|
'p50_ms': p50_ms,
|
||||||
|
'p95_ms': p95_ms,
|
||||||
|
'p99_ms': p99_ms
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_all_latency_stats(self) -> Dict[str, Dict[str, float]]:
|
||||||
|
"""Get latency statistics for all operations"""
|
||||||
|
with self._lock:
|
||||||
|
operations = list(self.measurements.keys())
|
||||||
|
|
||||||
|
return {
|
||||||
|
operation: self.get_latency_stats(operation)
|
||||||
|
for operation in operations
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_recent_measurements(self, operation: str, limit: int = 100) -> List[LatencyMeasurement]:
|
||||||
|
"""
|
||||||
|
Get recent measurements for an operation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Operation name
|
||||||
|
limit: Maximum number of measurements to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[LatencyMeasurement]: Recent measurements
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
measurements = list(self.measurements[operation])
|
||||||
|
|
||||||
|
return measurements[-limit:]
|
||||||
|
|
||||||
|
def get_slow_operations(self, threshold_ms: float = 100.0) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get operations that are slower than threshold.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
threshold_ms: Latency threshold in milliseconds
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List: Slow operations with their stats
|
||||||
|
"""
|
||||||
|
slow_operations = []
|
||||||
|
|
||||||
|
for operation in self.measurements.keys():
|
||||||
|
stats = self.get_latency_stats(operation)
|
||||||
|
if stats['avg_ms'] > threshold_ms:
|
||||||
|
slow_operations.append({
|
||||||
|
'operation': operation,
|
||||||
|
'avg_latency_ms': stats['avg_ms'],
|
||||||
|
'p95_latency_ms': stats['p95_ms'],
|
||||||
|
'count': stats['count']
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort by average latency (descending)
|
||||||
|
slow_operations.sort(key=lambda x: x['avg_latency_ms'], reverse=True)
|
||||||
|
return slow_operations
|
||||||
|
|
||||||
|
def get_latency_trends(self, operation: str, window_size: int = 100) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get latency trends for an operation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Operation name
|
||||||
|
window_size: Number of recent measurements to analyze
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Trend analysis
|
||||||
|
"""
|
||||||
|
recent_measurements = self.get_recent_measurements(operation, window_size)
|
||||||
|
|
||||||
|
if len(recent_measurements) < 2:
|
||||||
|
return {'trend': 'insufficient_data'}
|
||||||
|
|
||||||
|
# Split into two halves for trend analysis
|
||||||
|
mid_point = len(recent_measurements) // 2
|
||||||
|
first_half = recent_measurements[:mid_point]
|
||||||
|
second_half = recent_measurements[mid_point:]
|
||||||
|
|
||||||
|
first_avg = sum(m.duration_ms for m in first_half) / len(first_half)
|
||||||
|
second_avg = sum(m.duration_ms for m in second_half) / len(second_half)
|
||||||
|
|
||||||
|
# Calculate trend
|
||||||
|
change_percent = ((second_avg - first_avg) / first_avg) * 100
|
||||||
|
|
||||||
|
if abs(change_percent) < 5:
|
||||||
|
trend = 'stable'
|
||||||
|
elif change_percent > 0:
|
||||||
|
trend = 'increasing'
|
||||||
|
else:
|
||||||
|
trend = 'decreasing'
|
||||||
|
|
||||||
|
return {
|
||||||
|
'trend': trend,
|
||||||
|
'change_percent': change_percent,
|
||||||
|
'first_half_avg_ms': first_avg,
|
||||||
|
'second_half_avg_ms': second_avg,
|
||||||
|
'sample_size': len(recent_measurements)
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_active_measurements_count(self) -> Dict[str, int]:
|
||||||
|
"""Get count of currently active measurements by operation"""
|
||||||
|
with self._lock:
|
||||||
|
return {
|
||||||
|
operation: len(measurements)
|
||||||
|
for operation, measurements in self.active_measurements.items()
|
||||||
|
if measurements
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_latency_distribution(self, operation: str, bucket_size_ms: float = 10.0) -> Dict[str, int]:
|
||||||
|
"""
|
||||||
|
Get latency distribution in buckets.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Operation name
|
||||||
|
bucket_size_ms: Size of each bucket in milliseconds
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Latency distribution buckets
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
measurements = list(self.measurements[operation])
|
||||||
|
|
||||||
|
if not measurements:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Create buckets
|
||||||
|
distribution = defaultdict(int)
|
||||||
|
|
||||||
|
for measurement in measurements:
|
||||||
|
bucket = int(measurement.duration_ms // bucket_size_ms) * bucket_size_ms
|
||||||
|
bucket_label = f"{bucket:.0f}-{bucket + bucket_size_ms:.0f}ms"
|
||||||
|
distribution[bucket_label] += 1
|
||||||
|
|
||||||
|
return dict(distribution)
|
||||||
|
|
||||||
|
def export_measurements(self, operation: str = None,
|
||||||
|
format: str = 'json') -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Export measurements for analysis.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Specific operation to export (None for all)
|
||||||
|
format: Export format ('json', 'csv')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List: Exported measurement data
|
||||||
|
"""
|
||||||
|
exported_data = []
|
||||||
|
|
||||||
|
operations = [operation] if operation else list(self.measurements.keys())
|
||||||
|
|
||||||
|
for op in operations:
|
||||||
|
with self._lock:
|
||||||
|
measurements = list(self.measurements[op])
|
||||||
|
|
||||||
|
for measurement in measurements:
|
||||||
|
data = {
|
||||||
|
'operation': measurement.operation,
|
||||||
|
'duration_ms': measurement.duration_ms,
|
||||||
|
'start_time': measurement.start_time,
|
||||||
|
'end_time': measurement.end_time,
|
||||||
|
'correlation_id': measurement.correlation_id,
|
||||||
|
'metadata': measurement.metadata
|
||||||
|
}
|
||||||
|
exported_data.append(data)
|
||||||
|
|
||||||
|
return exported_data
|
||||||
|
|
||||||
|
def clear_measurements(self, operation: str = None) -> None:
|
||||||
|
"""
|
||||||
|
Clear measurements for an operation or all operations.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Specific operation to clear (None for all)
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
if operation:
|
||||||
|
self.measurements[operation].clear()
|
||||||
|
logger.info(f"Cleared measurements for operation: {operation}")
|
||||||
|
else:
|
||||||
|
self.measurements.clear()
|
||||||
|
self.total_measurements = 0
|
||||||
|
logger.info("Cleared all measurements")
|
||||||
|
|
||||||
|
def get_performance_impact(self, operation: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Analyze performance impact of an operation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Operation name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Performance impact analysis
|
||||||
|
"""
|
||||||
|
stats = self.get_latency_stats(operation)
|
||||||
|
trends = self.get_latency_trends(operation)
|
||||||
|
|
||||||
|
# Determine impact level
|
||||||
|
avg_latency = stats['avg_ms']
|
||||||
|
if avg_latency < 10:
|
||||||
|
impact_level = 'low'
|
||||||
|
elif avg_latency < 100:
|
||||||
|
impact_level = 'medium'
|
||||||
|
else:
|
||||||
|
impact_level = 'high'
|
||||||
|
|
||||||
|
# Check for performance degradation
|
||||||
|
degradation = trends.get('trend') == 'increasing' and trends.get('change_percent', 0) > 20
|
||||||
|
|
||||||
|
return {
|
||||||
|
'operation': operation,
|
||||||
|
'impact_level': impact_level,
|
||||||
|
'avg_latency_ms': avg_latency,
|
||||||
|
'p95_latency_ms': stats['p95_ms'],
|
||||||
|
'measurement_count': stats['count'],
|
||||||
|
'trend': trends.get('trend', 'unknown'),
|
||||||
|
'performance_degradation': degradation,
|
||||||
|
'recommendations': self._get_performance_recommendations(stats, trends)
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_performance_recommendations(self, stats: Dict[str, float],
|
||||||
|
trends: Dict[str, Any]) -> List[str]:
|
||||||
|
"""Get performance recommendations based on stats and trends"""
|
||||||
|
recommendations = []
|
||||||
|
|
||||||
|
if stats['avg_ms'] > 100:
|
||||||
|
recommendations.append("Consider optimizing this operation - average latency is high")
|
||||||
|
|
||||||
|
if stats['p95_ms'] > stats['avg_ms'] * 3:
|
||||||
|
recommendations.append("High latency variance detected - investigate outliers")
|
||||||
|
|
||||||
|
if trends.get('trend') == 'increasing':
|
||||||
|
recommendations.append("Latency is trending upward - monitor for performance degradation")
|
||||||
|
|
||||||
|
if stats['count'] < 10:
|
||||||
|
recommendations.append("Insufficient data for reliable analysis - collect more measurements")
|
||||||
|
|
||||||
|
return recommendations
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get latency tracker statistics"""
|
||||||
|
with self._lock:
|
||||||
|
return {
|
||||||
|
'total_measurements': self.total_measurements,
|
||||||
|
'operations_tracked': len(self.measurements),
|
||||||
|
'active_measurements': sum(len(m) for m in self.active_measurements.values()),
|
||||||
|
'max_measurements': self.max_measurements,
|
||||||
|
'operations': list(self.measurements.keys())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global latency tracker instance
|
||||||
|
latency_tracker = LatencyTracker()
|
||||||
652
COBY/monitoring/memory_monitor.py
Normal file
652
COBY/monitoring/memory_monitor.py
Normal file
@@ -0,0 +1,652 @@
|
|||||||
|
"""
|
||||||
|
Memory usage monitoring and garbage collection optimization.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import gc
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import tracemalloc
|
||||||
|
from typing import Dict, List, Optional, Any, Tuple
|
||||||
|
from collections import defaultdict, deque
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.timing import get_current_timestamp
|
||||||
|
except ImportError:
|
||||||
|
from utils.logging import get_logger
|
||||||
|
from utils.timing import get_current_timestamp
|
||||||
|
# Import will be done lazily to avoid circular imports
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MemorySnapshot:
|
||||||
|
"""Memory usage snapshot"""
|
||||||
|
timestamp: datetime
|
||||||
|
total_memory_mb: float
|
||||||
|
available_memory_mb: float
|
||||||
|
process_memory_mb: float
|
||||||
|
gc_collections: Dict[int, int]
|
||||||
|
gc_objects: int
|
||||||
|
tracemalloc_current_mb: Optional[float] = None
|
||||||
|
tracemalloc_peak_mb: Optional[float] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MemoryLeak:
|
||||||
|
"""Memory leak detection result"""
|
||||||
|
object_type: str
|
||||||
|
count_increase: int
|
||||||
|
size_increase_mb: float
|
||||||
|
growth_rate_per_hour: float
|
||||||
|
severity: str # 'low', 'medium', 'high'
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryMonitor:
|
||||||
|
"""
|
||||||
|
Monitors memory usage, detects leaks, and optimizes garbage collection.
|
||||||
|
|
||||||
|
Provides detailed memory analytics and automatic GC optimization.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, enable_tracemalloc: bool = False, snapshot_interval: float = 60.0):
|
||||||
|
"""
|
||||||
|
Initialize memory monitor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
enable_tracemalloc: Whether to enable detailed memory tracing
|
||||||
|
snapshot_interval: How often to take memory snapshots (seconds)
|
||||||
|
"""
|
||||||
|
self.enable_tracemalloc = enable_tracemalloc
|
||||||
|
self.snapshot_interval = snapshot_interval
|
||||||
|
|
||||||
|
# Memory tracking
|
||||||
|
self.memory_snapshots: deque = deque(maxlen=1000)
|
||||||
|
self.object_counts: Dict[str, deque] = defaultdict(lambda: deque(maxlen=100))
|
||||||
|
|
||||||
|
# GC optimization
|
||||||
|
self.gc_stats: Dict[str, Any] = {}
|
||||||
|
self.gc_thresholds = gc.get_threshold()
|
||||||
|
self.auto_gc_enabled = True
|
||||||
|
|
||||||
|
# Leak detection
|
||||||
|
self.leak_detection_enabled = True
|
||||||
|
self.detected_leaks: List[MemoryLeak] = []
|
||||||
|
|
||||||
|
# Monitoring control
|
||||||
|
self._monitoring = False
|
||||||
|
self._monitor_thread: Optional[threading.Thread] = None
|
||||||
|
|
||||||
|
# Initialize tracemalloc if enabled
|
||||||
|
if self.enable_tracemalloc and not tracemalloc.is_tracing():
|
||||||
|
tracemalloc.start()
|
||||||
|
logger.info("Started tracemalloc for detailed memory tracking")
|
||||||
|
|
||||||
|
logger.info(f"Memory monitor initialized (tracemalloc: {self.enable_tracemalloc})")
|
||||||
|
|
||||||
|
def start_monitoring(self) -> None:
|
||||||
|
"""Start memory monitoring"""
|
||||||
|
if self._monitoring:
|
||||||
|
logger.warning("Memory monitoring already running")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._monitoring = True
|
||||||
|
self._monitor_thread = threading.Thread(
|
||||||
|
target=self._monitoring_loop,
|
||||||
|
name="MemoryMonitor",
|
||||||
|
daemon=True
|
||||||
|
)
|
||||||
|
self._monitor_thread.start()
|
||||||
|
logger.info("Started memory monitoring")
|
||||||
|
|
||||||
|
def stop_monitoring(self) -> None:
|
||||||
|
"""Stop memory monitoring"""
|
||||||
|
if not self._monitoring:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._monitoring = False
|
||||||
|
if self._monitor_thread:
|
||||||
|
self._monitor_thread.join(timeout=5.0)
|
||||||
|
logger.info("Stopped memory monitoring")
|
||||||
|
|
||||||
|
def _monitoring_loop(self) -> None:
|
||||||
|
"""Main monitoring loop"""
|
||||||
|
import time
|
||||||
|
|
||||||
|
while self._monitoring:
|
||||||
|
try:
|
||||||
|
# Take memory snapshot
|
||||||
|
self._take_memory_snapshot()
|
||||||
|
|
||||||
|
# Update object counts
|
||||||
|
self._update_object_counts()
|
||||||
|
|
||||||
|
# Check for memory leaks
|
||||||
|
if self.leak_detection_enabled:
|
||||||
|
self._check_for_leaks()
|
||||||
|
|
||||||
|
# Optimize garbage collection
|
||||||
|
if self.auto_gc_enabled:
|
||||||
|
self._optimize_gc()
|
||||||
|
|
||||||
|
# Periodic cleanup to prevent memory leaks in the monitor itself
|
||||||
|
if hasattr(self, '_cleanup_counter'):
|
||||||
|
self._cleanup_counter += 1
|
||||||
|
else:
|
||||||
|
self._cleanup_counter = 1
|
||||||
|
|
||||||
|
if self._cleanup_counter % 10 == 0: # Every 10 cycles
|
||||||
|
self._cleanup_monitor_data()
|
||||||
|
|
||||||
|
time.sleep(self.snapshot_interval)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in memory monitoring loop: {e}")
|
||||||
|
time.sleep(self.snapshot_interval)
|
||||||
|
|
||||||
|
def _take_memory_snapshot(self) -> None:
|
||||||
|
"""Take a memory usage snapshot"""
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
# Get system memory info
|
||||||
|
memory = psutil.virtual_memory()
|
||||||
|
|
||||||
|
# Get process memory info
|
||||||
|
process = psutil.Process()
|
||||||
|
process_memory = process.memory_info()
|
||||||
|
|
||||||
|
# Get GC stats
|
||||||
|
gc_collections = {i: gc.get_count()[i] for i in range(3)}
|
||||||
|
gc_objects = len(gc.get_objects())
|
||||||
|
|
||||||
|
# Get tracemalloc stats if enabled
|
||||||
|
tracemalloc_current_mb = None
|
||||||
|
tracemalloc_peak_mb = None
|
||||||
|
|
||||||
|
if self.enable_tracemalloc and tracemalloc.is_tracing():
|
||||||
|
current, peak = tracemalloc.get_traced_memory()
|
||||||
|
tracemalloc_current_mb = current / (1024 * 1024)
|
||||||
|
tracemalloc_peak_mb = peak / (1024 * 1024)
|
||||||
|
|
||||||
|
# Create snapshot
|
||||||
|
snapshot = MemorySnapshot(
|
||||||
|
timestamp=get_current_timestamp(),
|
||||||
|
total_memory_mb=memory.total / (1024 * 1024),
|
||||||
|
available_memory_mb=memory.available / (1024 * 1024),
|
||||||
|
process_memory_mb=process_memory.rss / (1024 * 1024),
|
||||||
|
gc_collections=gc_collections,
|
||||||
|
gc_objects=gc_objects,
|
||||||
|
tracemalloc_current_mb=tracemalloc_current_mb,
|
||||||
|
tracemalloc_peak_mb=tracemalloc_peak_mb
|
||||||
|
)
|
||||||
|
|
||||||
|
self.memory_snapshots.append(snapshot)
|
||||||
|
|
||||||
|
# Update metrics
|
||||||
|
try:
|
||||||
|
from .metrics_collector import metrics_collector
|
||||||
|
metrics_collector.set_gauge('memory_total_mb', snapshot.total_memory_mb)
|
||||||
|
metrics_collector.set_gauge('memory_available_mb', snapshot.available_memory_mb)
|
||||||
|
metrics_collector.set_gauge('memory_process_mb', snapshot.process_memory_mb)
|
||||||
|
metrics_collector.set_gauge('memory_gc_objects', snapshot.gc_objects)
|
||||||
|
|
||||||
|
if tracemalloc_current_mb is not None:
|
||||||
|
metrics_collector.set_gauge('memory_tracemalloc_current_mb', tracemalloc_current_mb)
|
||||||
|
metrics_collector.set_gauge('memory_tracemalloc_peak_mb', tracemalloc_peak_mb)
|
||||||
|
except ImportError:
|
||||||
|
pass # Metrics collector not available
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error taking memory snapshot: {e}")
|
||||||
|
|
||||||
|
def _update_object_counts(self) -> None:
|
||||||
|
"""Update object counts by type (limited to prevent memory leaks)"""
|
||||||
|
try:
|
||||||
|
# Only track specific object types to avoid creating too many objects
|
||||||
|
tracked_types = {
|
||||||
|
'dict', 'list', 'tuple', 'str', 'function', 'type',
|
||||||
|
'SystemMetrics', 'MetricPoint', 'MemorySnapshot'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Count only tracked object types
|
||||||
|
object_counts = {}
|
||||||
|
all_objects = gc.get_objects()
|
||||||
|
|
||||||
|
for tracked_type in tracked_types:
|
||||||
|
count = sum(1 for obj in all_objects if type(obj).__name__ == tracked_type)
|
||||||
|
if count > 0:
|
||||||
|
object_counts[tracked_type] = count
|
||||||
|
|
||||||
|
# Store counts with timestamp (only for tracked types)
|
||||||
|
timestamp = get_current_timestamp()
|
||||||
|
for obj_type, count in object_counts.items():
|
||||||
|
self.object_counts[obj_type].append((timestamp, count))
|
||||||
|
|
||||||
|
# Clean up old entries to prevent memory growth
|
||||||
|
for obj_type in list(self.object_counts.keys()):
|
||||||
|
if len(self.object_counts[obj_type]) > 50: # Keep only last 50 entries
|
||||||
|
# Remove oldest entries
|
||||||
|
while len(self.object_counts[obj_type]) > 50:
|
||||||
|
self.object_counts[obj_type].popleft()
|
||||||
|
|
||||||
|
# Update metrics for common types (less frequently)
|
||||||
|
try:
|
||||||
|
from .metrics_collector import metrics_collector
|
||||||
|
# Only update metrics every 5th call to reduce object creation
|
||||||
|
if not hasattr(self, '_metrics_update_counter'):
|
||||||
|
self._metrics_update_counter = 0
|
||||||
|
|
||||||
|
self._metrics_update_counter += 1
|
||||||
|
if self._metrics_update_counter % 5 == 0:
|
||||||
|
for obj_type, count in object_counts.items():
|
||||||
|
metrics_collector.set_gauge(f'memory_objects_{obj_type}', count)
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
pass # Metrics collector not available
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating object counts: {e}")
|
||||||
|
|
||||||
|
def _check_for_leaks(self) -> None:
|
||||||
|
"""Check for potential memory leaks (less aggressive)"""
|
||||||
|
try:
|
||||||
|
if len(self.memory_snapshots) < 20: # Need more data for reliable detection
|
||||||
|
return
|
||||||
|
|
||||||
|
# Only check every 10th call to reduce overhead
|
||||||
|
if not hasattr(self, '_leak_check_counter'):
|
||||||
|
self._leak_check_counter = 0
|
||||||
|
|
||||||
|
self._leak_check_counter += 1
|
||||||
|
if self._leak_check_counter % 10 != 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check for consistent memory growth over longer period
|
||||||
|
recent_snapshots = list(self.memory_snapshots)[-20:]
|
||||||
|
memory_values = [s.process_memory_mb for s in recent_snapshots]
|
||||||
|
|
||||||
|
# More conservative growth detection
|
||||||
|
if self._is_memory_growing(memory_values, threshold=20.0): # Increased threshold
|
||||||
|
# Check object count growth
|
||||||
|
potential_leaks = self._analyze_object_growth()
|
||||||
|
|
||||||
|
for leak in potential_leaks:
|
||||||
|
# Check if we already reported this leak recently
|
||||||
|
existing_leak = next(
|
||||||
|
(l for l in self.detected_leaks if l.object_type == leak.object_type),
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
|
if not existing_leak and leak.severity in ['medium', 'high']:
|
||||||
|
self.detected_leaks.append(leak)
|
||||||
|
logger.warning(f"Potential memory leak detected: {leak.object_type}")
|
||||||
|
|
||||||
|
# Record leak detection
|
||||||
|
try:
|
||||||
|
from .metrics_collector import metrics_collector
|
||||||
|
metrics_collector.increment_counter('memory_leaks_detected')
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Clean up old leak reports (keep only last 10)
|
||||||
|
if len(self.detected_leaks) > 10:
|
||||||
|
self.detected_leaks = self.detected_leaks[-10:]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error checking for leaks: {e}")
|
||||||
|
|
||||||
|
def _is_memory_growing(self, memory_values: List[float], threshold: float = 5.0) -> bool:
|
||||||
|
"""Check if memory is consistently growing"""
|
||||||
|
if len(memory_values) < 5:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if memory increased by more than threshold MB
|
||||||
|
growth = memory_values[-1] - memory_values[0]
|
||||||
|
return growth > threshold
|
||||||
|
|
||||||
|
def _analyze_object_growth(self) -> List[MemoryLeak]:
|
||||||
|
"""Analyze object count growth to identify potential leaks (more conservative)"""
|
||||||
|
leaks = []
|
||||||
|
|
||||||
|
for obj_type, counts in self.object_counts.items():
|
||||||
|
if len(counts) < 20: # Need more data points
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get recent counts over longer period
|
||||||
|
recent_counts = list(counts)[-20:]
|
||||||
|
timestamps = [item[0] for item in recent_counts]
|
||||||
|
count_values = [item[1] for item in recent_counts]
|
||||||
|
|
||||||
|
# Check for sustained growth
|
||||||
|
if len(count_values) >= 10:
|
||||||
|
# Calculate growth over the period
|
||||||
|
start_avg = sum(count_values[:5]) / 5 # Average of first 5 values
|
||||||
|
end_avg = sum(count_values[-5:]) / 5 # Average of last 5 values
|
||||||
|
growth = end_avg - start_avg
|
||||||
|
|
||||||
|
time_diff = (timestamps[-1] - timestamps[0]).total_seconds() / 3600 # hours
|
||||||
|
|
||||||
|
# More conservative thresholds
|
||||||
|
if growth > 500 and time_diff > 0.5: # More than 500 objects growth over 30+ minutes
|
||||||
|
growth_rate = growth / time_diff
|
||||||
|
|
||||||
|
# Skip common types that naturally fluctuate
|
||||||
|
if obj_type in ['dict', 'list', 'tuple', 'str']:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Determine severity with higher thresholds
|
||||||
|
if growth_rate > 2000:
|
||||||
|
severity = 'high'
|
||||||
|
elif growth_rate > 500:
|
||||||
|
severity = 'medium'
|
||||||
|
else:
|
||||||
|
severity = 'low'
|
||||||
|
|
||||||
|
# Only report medium and high severity leaks
|
||||||
|
if severity in ['medium', 'high']:
|
||||||
|
leak = MemoryLeak(
|
||||||
|
object_type=obj_type,
|
||||||
|
count_increase=int(growth),
|
||||||
|
size_increase_mb=growth * 0.001, # Rough estimate
|
||||||
|
growth_rate_per_hour=growth_rate,
|
||||||
|
severity=severity
|
||||||
|
)
|
||||||
|
leaks.append(leak)
|
||||||
|
|
||||||
|
return leaks
|
||||||
|
|
||||||
|
def _optimize_gc(self) -> None:
|
||||||
|
"""Optimize garbage collection based on memory usage"""
|
||||||
|
try:
|
||||||
|
if not self.memory_snapshots:
|
||||||
|
return
|
||||||
|
|
||||||
|
latest_snapshot = self.memory_snapshots[-1]
|
||||||
|
memory_usage_percent = (
|
||||||
|
(latest_snapshot.total_memory_mb - latest_snapshot.available_memory_mb) /
|
||||||
|
latest_snapshot.total_memory_mb * 100
|
||||||
|
)
|
||||||
|
|
||||||
|
# Adjust GC thresholds based on memory pressure
|
||||||
|
if memory_usage_percent > 85:
|
||||||
|
# High memory pressure - more aggressive GC
|
||||||
|
new_thresholds = (500, 10, 10)
|
||||||
|
if gc.get_threshold() != new_thresholds:
|
||||||
|
gc.set_threshold(*new_thresholds)
|
||||||
|
logger.info("Enabled aggressive garbage collection due to high memory usage")
|
||||||
|
|
||||||
|
# Force collection
|
||||||
|
collected = gc.collect()
|
||||||
|
metrics_collector.increment_counter('memory_gc_forced')
|
||||||
|
logger.debug(f"Forced GC collected {collected} objects")
|
||||||
|
|
||||||
|
elif memory_usage_percent < 50:
|
||||||
|
# Low memory pressure - less aggressive GC
|
||||||
|
new_thresholds = (1000, 20, 20)
|
||||||
|
if gc.get_threshold() != new_thresholds:
|
||||||
|
gc.set_threshold(*new_thresholds)
|
||||||
|
logger.info("Reduced garbage collection frequency due to low memory usage")
|
||||||
|
|
||||||
|
# Update GC stats
|
||||||
|
self.gc_stats = {
|
||||||
|
'threshold': gc.get_threshold(),
|
||||||
|
'counts': gc.get_count(),
|
||||||
|
'collections': gc.get_stats()
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error optimizing GC: {e}")
|
||||||
|
|
||||||
|
def _cleanup_monitor_data(self) -> None:
|
||||||
|
"""Clean up monitor data to prevent memory leaks"""
|
||||||
|
try:
|
||||||
|
# Limit memory snapshots
|
||||||
|
if len(self.memory_snapshots) > 500:
|
||||||
|
# Keep only the most recent 300 snapshots
|
||||||
|
while len(self.memory_snapshots) > 300:
|
||||||
|
self.memory_snapshots.popleft()
|
||||||
|
|
||||||
|
# Clean up object counts
|
||||||
|
for obj_type in list(self.object_counts.keys()):
|
||||||
|
if len(self.object_counts[obj_type]) > 30:
|
||||||
|
# Keep only the most recent 20 entries
|
||||||
|
while len(self.object_counts[obj_type]) > 20:
|
||||||
|
self.object_counts[obj_type].popleft()
|
||||||
|
|
||||||
|
# Remove empty deques
|
||||||
|
if len(self.object_counts[obj_type]) == 0:
|
||||||
|
del self.object_counts[obj_type]
|
||||||
|
|
||||||
|
# Limit detected leaks
|
||||||
|
if len(self.detected_leaks) > 5:
|
||||||
|
self.detected_leaks = self.detected_leaks[-5:]
|
||||||
|
|
||||||
|
# Force a small garbage collection
|
||||||
|
gc.collect()
|
||||||
|
|
||||||
|
logger.debug("Cleaned up memory monitor data")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error cleaning up monitor data: {e}")
|
||||||
|
|
||||||
|
def force_garbage_collection(self) -> Dict[str, int]:
|
||||||
|
"""Force garbage collection and return statistics"""
|
||||||
|
try:
|
||||||
|
# Get counts before collection
|
||||||
|
before_counts = gc.get_count()
|
||||||
|
before_objects = len(gc.get_objects())
|
||||||
|
|
||||||
|
# Force collection for all generations
|
||||||
|
collected = [gc.collect(generation) for generation in range(3)]
|
||||||
|
total_collected = sum(collected)
|
||||||
|
|
||||||
|
# Get counts after collection
|
||||||
|
after_counts = gc.get_count()
|
||||||
|
after_objects = len(gc.get_objects())
|
||||||
|
|
||||||
|
# Update metrics
|
||||||
|
try:
|
||||||
|
from .metrics_collector import metrics_collector
|
||||||
|
metrics_collector.increment_counter('memory_gc_manual')
|
||||||
|
metrics_collector.set_gauge('memory_gc_objects_collected', total_collected)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
result = {
|
||||||
|
'total_collected': total_collected,
|
||||||
|
'by_generation': collected,
|
||||||
|
'objects_before': before_objects,
|
||||||
|
'objects_after': after_objects,
|
||||||
|
'objects_freed': before_objects - after_objects,
|
||||||
|
'counts_before': before_counts,
|
||||||
|
'counts_after': after_counts
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Manual GC collected {total_collected} objects, freed {result['objects_freed']} objects")
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during forced garbage collection: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def get_memory_usage_summary(self) -> Dict[str, Any]:
|
||||||
|
"""Get current memory usage summary"""
|
||||||
|
if not self.memory_snapshots:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
latest = self.memory_snapshots[-1]
|
||||||
|
|
||||||
|
# Calculate memory usage percentage
|
||||||
|
memory_usage_percent = (
|
||||||
|
(latest.total_memory_mb - latest.available_memory_mb) /
|
||||||
|
latest.total_memory_mb * 100
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'timestamp': latest.timestamp.isoformat(),
|
||||||
|
'total_memory_mb': latest.total_memory_mb,
|
||||||
|
'available_memory_mb': latest.available_memory_mb,
|
||||||
|
'used_memory_mb': latest.total_memory_mb - latest.available_memory_mb,
|
||||||
|
'memory_usage_percent': memory_usage_percent,
|
||||||
|
'process_memory_mb': latest.process_memory_mb,
|
||||||
|
'gc_objects': latest.gc_objects,
|
||||||
|
'gc_collections': latest.gc_collections,
|
||||||
|
'tracemalloc_current_mb': latest.tracemalloc_current_mb,
|
||||||
|
'tracemalloc_peak_mb': latest.tracemalloc_peak_mb
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_memory_trends(self, hours: int = 1) -> Dict[str, Any]:
|
||||||
|
"""Get memory usage trends over specified time period"""
|
||||||
|
if not self.memory_snapshots:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
from datetime import timedelta
|
||||||
|
cutoff_time = get_current_timestamp() - timedelta(hours=hours)
|
||||||
|
|
||||||
|
# Filter snapshots
|
||||||
|
recent_snapshots = [
|
||||||
|
s for s in self.memory_snapshots
|
||||||
|
if s.timestamp >= cutoff_time
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(recent_snapshots) < 2:
|
||||||
|
return {'trend': 'insufficient_data'}
|
||||||
|
|
||||||
|
# Calculate trends
|
||||||
|
process_memory_values = [s.process_memory_mb for s in recent_snapshots]
|
||||||
|
gc_object_values = [s.gc_objects for s in recent_snapshots]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'process_memory': {
|
||||||
|
'start_mb': process_memory_values[0],
|
||||||
|
'end_mb': process_memory_values[-1],
|
||||||
|
'change_mb': process_memory_values[-1] - process_memory_values[0],
|
||||||
|
'max_mb': max(process_memory_values),
|
||||||
|
'min_mb': min(process_memory_values),
|
||||||
|
'avg_mb': sum(process_memory_values) / len(process_memory_values)
|
||||||
|
},
|
||||||
|
'gc_objects': {
|
||||||
|
'start_count': gc_object_values[0],
|
||||||
|
'end_count': gc_object_values[-1],
|
||||||
|
'change_count': gc_object_values[-1] - gc_object_values[0],
|
||||||
|
'max_count': max(gc_object_values),
|
||||||
|
'min_count': min(gc_object_values),
|
||||||
|
'avg_count': sum(gc_object_values) / len(gc_object_values)
|
||||||
|
},
|
||||||
|
'sample_count': len(recent_snapshots),
|
||||||
|
'time_period_hours': hours
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_top_memory_consumers(self, limit: int = 10) -> List[Dict[str, Any]]:
|
||||||
|
"""Get top memory consuming object types"""
|
||||||
|
if not self.object_counts:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Get latest counts
|
||||||
|
latest_counts = {}
|
||||||
|
for obj_type, counts in self.object_counts.items():
|
||||||
|
if counts:
|
||||||
|
latest_counts[obj_type] = counts[-1][1] # Get count from (timestamp, count) tuple
|
||||||
|
|
||||||
|
# Sort by count
|
||||||
|
sorted_types = sorted(
|
||||||
|
latest_counts.items(),
|
||||||
|
key=lambda x: x[1],
|
||||||
|
reverse=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
'object_type': obj_type,
|
||||||
|
'count': count,
|
||||||
|
'estimated_size_mb': count * 0.001 # Rough estimate
|
||||||
|
}
|
||||||
|
for obj_type, count in sorted_types[:limit]
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_detected_leaks(self) -> List[Dict[str, Any]]:
|
||||||
|
"""Get detected memory leaks"""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
'object_type': leak.object_type,
|
||||||
|
'count_increase': leak.count_increase,
|
||||||
|
'size_increase_mb': leak.size_increase_mb,
|
||||||
|
'growth_rate_per_hour': leak.growth_rate_per_hour,
|
||||||
|
'severity': leak.severity
|
||||||
|
}
|
||||||
|
for leak in self.detected_leaks
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_tracemalloc_top(self, limit: int = 10) -> List[Dict[str, Any]]:
|
||||||
|
"""Get top memory allocations from tracemalloc"""
|
||||||
|
if not self.enable_tracemalloc or not tracemalloc.is_tracing():
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
snapshot = tracemalloc.take_snapshot()
|
||||||
|
top_stats = snapshot.statistics('lineno')
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
'filename': stat.traceback.format()[0],
|
||||||
|
'size_mb': stat.size / (1024 * 1024),
|
||||||
|
'count': stat.count
|
||||||
|
}
|
||||||
|
for stat in top_stats[:limit]
|
||||||
|
]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting tracemalloc top: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def clear_leak_history(self) -> None:
|
||||||
|
"""Clear detected leak history"""
|
||||||
|
self.detected_leaks.clear()
|
||||||
|
logger.info("Cleared memory leak history")
|
||||||
|
|
||||||
|
def get_gc_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get garbage collection statistics"""
|
||||||
|
return {
|
||||||
|
'thresholds': gc.get_threshold(),
|
||||||
|
'counts': gc.get_count(),
|
||||||
|
'stats': gc.get_stats(),
|
||||||
|
'auto_gc_enabled': self.auto_gc_enabled,
|
||||||
|
'is_enabled': gc.isenabled()
|
||||||
|
}
|
||||||
|
|
||||||
|
def set_gc_thresholds(self, gen0: int, gen1: int, gen2: int) -> None:
|
||||||
|
"""Set garbage collection thresholds"""
|
||||||
|
gc.set_threshold(gen0, gen1, gen2)
|
||||||
|
logger.info(f"Set GC thresholds to ({gen0}, {gen1}, {gen2})")
|
||||||
|
|
||||||
|
def enable_auto_gc_optimization(self, enabled: bool = True) -> None:
|
||||||
|
"""Enable or disable automatic GC optimization"""
|
||||||
|
self.auto_gc_enabled = enabled
|
||||||
|
logger.info(f"Auto GC optimization {'enabled' if enabled else 'disabled'}")
|
||||||
|
|
||||||
|
def enable_leak_detection(self, enabled: bool = True) -> None:
|
||||||
|
"""Enable or disable memory leak detection"""
|
||||||
|
self.leak_detection_enabled = enabled
|
||||||
|
logger.info(f"Memory leak detection {'enabled' if enabled else 'disabled'}")
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get memory monitor statistics"""
|
||||||
|
return {
|
||||||
|
'monitoring': self._monitoring,
|
||||||
|
'snapshot_interval': self.snapshot_interval,
|
||||||
|
'snapshots_count': len(self.memory_snapshots),
|
||||||
|
'object_types_tracked': len(self.object_counts),
|
||||||
|
'detected_leaks': len(self.detected_leaks),
|
||||||
|
'tracemalloc_enabled': self.enable_tracemalloc and tracemalloc.is_tracing(),
|
||||||
|
'auto_gc_enabled': self.auto_gc_enabled,
|
||||||
|
'leak_detection_enabled': self.leak_detection_enabled,
|
||||||
|
'gc_thresholds': gc.get_threshold()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global memory monitor instance
|
||||||
|
memory_monitor = MemoryMonitor()
|
||||||
399
COBY/monitoring/metrics_collector.py
Normal file
399
COBY/monitoring/metrics_collector.py
Normal file
@@ -0,0 +1,399 @@
|
|||||||
|
"""
|
||||||
|
Comprehensive metrics collection for all system components.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
import psutil
|
||||||
|
import threading
|
||||||
|
from typing import Dict, List, Optional, Any, Callable
|
||||||
|
from collections import defaultdict, deque
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.timing import get_current_timestamp
|
||||||
|
except ImportError:
|
||||||
|
from utils.logging import get_logger
|
||||||
|
from utils.timing import get_current_timestamp
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MetricPoint:
|
||||||
|
"""Individual metric data point"""
|
||||||
|
name: str
|
||||||
|
value: float
|
||||||
|
timestamp: datetime
|
||||||
|
labels: Dict[str, str] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def to_prometheus_format(self) -> str:
|
||||||
|
"""Convert to Prometheus format"""
|
||||||
|
labels_str = ""
|
||||||
|
if self.labels:
|
||||||
|
label_pairs = [f'{k}="{v}"' for k, v in self.labels.items()]
|
||||||
|
labels_str = "{" + ",".join(label_pairs) + "}"
|
||||||
|
|
||||||
|
return f"{self.name}{labels_str} {self.value} {int(self.timestamp.timestamp() * 1000)}"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SystemMetrics:
|
||||||
|
"""System-level metrics"""
|
||||||
|
cpu_usage: float
|
||||||
|
memory_usage: float
|
||||||
|
memory_available: float
|
||||||
|
disk_usage: float
|
||||||
|
network_bytes_sent: int
|
||||||
|
network_bytes_recv: int
|
||||||
|
active_connections: int
|
||||||
|
timestamp: datetime
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsCollector:
|
||||||
|
"""
|
||||||
|
Collects and manages performance metrics from all system components.
|
||||||
|
|
||||||
|
Provides Prometheus-compatible metrics and real-time monitoring data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, collection_interval: float = 1.0, max_history: int = 10000):
|
||||||
|
"""
|
||||||
|
Initialize metrics collector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
collection_interval: How often to collect system metrics (seconds)
|
||||||
|
max_history: Maximum number of metric points to keep in memory
|
||||||
|
"""
|
||||||
|
self.collection_interval = collection_interval
|
||||||
|
self.max_history = max_history
|
||||||
|
|
||||||
|
# Metric storage
|
||||||
|
self.metrics: Dict[str, deque] = defaultdict(lambda: deque(maxlen=max_history))
|
||||||
|
self.counters: Dict[str, float] = defaultdict(float)
|
||||||
|
self.gauges: Dict[str, float] = defaultdict(float)
|
||||||
|
self.histograms: Dict[str, List[float]] = defaultdict(list)
|
||||||
|
|
||||||
|
# System metrics
|
||||||
|
self.system_metrics_history: deque = deque(maxlen=max_history)
|
||||||
|
|
||||||
|
# Collection control
|
||||||
|
self._collecting = False
|
||||||
|
self._collection_thread: Optional[threading.Thread] = None
|
||||||
|
self._lock = threading.RLock()
|
||||||
|
|
||||||
|
# Callbacks for custom metrics
|
||||||
|
self.metric_callbacks: List[Callable[[], Dict[str, float]]] = []
|
||||||
|
|
||||||
|
logger.info(f"Metrics collector initialized with {collection_interval}s interval")
|
||||||
|
|
||||||
|
def start_collection(self) -> None:
|
||||||
|
"""Start automatic metrics collection"""
|
||||||
|
if self._collecting:
|
||||||
|
logger.warning("Metrics collection already running")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._collecting = True
|
||||||
|
self._collection_thread = threading.Thread(
|
||||||
|
target=self._collection_loop,
|
||||||
|
name="MetricsCollector",
|
||||||
|
daemon=True
|
||||||
|
)
|
||||||
|
self._collection_thread.start()
|
||||||
|
logger.info("Started metrics collection")
|
||||||
|
|
||||||
|
def stop_collection(self) -> None:
|
||||||
|
"""Stop automatic metrics collection"""
|
||||||
|
if not self._collecting:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._collecting = False
|
||||||
|
if self._collection_thread:
|
||||||
|
self._collection_thread.join(timeout=5.0)
|
||||||
|
logger.info("Stopped metrics collection")
|
||||||
|
|
||||||
|
def _collection_loop(self) -> None:
|
||||||
|
"""Main collection loop"""
|
||||||
|
while self._collecting:
|
||||||
|
try:
|
||||||
|
# Collect system metrics
|
||||||
|
self._collect_system_metrics()
|
||||||
|
|
||||||
|
# Collect custom metrics from callbacks
|
||||||
|
self._collect_custom_metrics()
|
||||||
|
|
||||||
|
time.sleep(self.collection_interval)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in metrics collection loop: {e}")
|
||||||
|
time.sleep(self.collection_interval)
|
||||||
|
|
||||||
|
def _collect_system_metrics(self) -> None:
|
||||||
|
"""Collect system-level metrics"""
|
||||||
|
try:
|
||||||
|
# CPU usage
|
||||||
|
cpu_percent = psutil.cpu_percent(interval=None)
|
||||||
|
|
||||||
|
# Memory usage
|
||||||
|
memory = psutil.virtual_memory()
|
||||||
|
|
||||||
|
# Disk usage (root partition)
|
||||||
|
disk = psutil.disk_usage('/')
|
||||||
|
|
||||||
|
# Network stats
|
||||||
|
network = psutil.net_io_counters()
|
||||||
|
|
||||||
|
# Active connections
|
||||||
|
connections = len(psutil.net_connections())
|
||||||
|
|
||||||
|
# Create system metrics object
|
||||||
|
sys_metrics = SystemMetrics(
|
||||||
|
cpu_usage=cpu_percent,
|
||||||
|
memory_usage=memory.percent,
|
||||||
|
memory_available=memory.available / (1024**3), # GB
|
||||||
|
disk_usage=disk.percent,
|
||||||
|
network_bytes_sent=network.bytes_sent,
|
||||||
|
network_bytes_recv=network.bytes_recv,
|
||||||
|
active_connections=connections,
|
||||||
|
timestamp=get_current_timestamp()
|
||||||
|
)
|
||||||
|
|
||||||
|
with self._lock:
|
||||||
|
self.system_metrics_history.append(sys_metrics)
|
||||||
|
|
||||||
|
# Update gauges
|
||||||
|
self.gauges['system_cpu_usage'] = cpu_percent
|
||||||
|
self.gauges['system_memory_usage'] = memory.percent
|
||||||
|
self.gauges['system_memory_available_gb'] = memory.available / (1024**3)
|
||||||
|
self.gauges['system_disk_usage'] = disk.percent
|
||||||
|
self.gauges['system_active_connections'] = connections
|
||||||
|
|
||||||
|
# Update counters (cumulative)
|
||||||
|
self.counters['system_network_bytes_sent'] = network.bytes_sent
|
||||||
|
self.counters['system_network_bytes_recv'] = network.bytes_recv
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error collecting system metrics: {e}")
|
||||||
|
|
||||||
|
def _collect_custom_metrics(self) -> None:
|
||||||
|
"""Collect metrics from registered callbacks"""
|
||||||
|
for callback in self.metric_callbacks:
|
||||||
|
try:
|
||||||
|
custom_metrics = callback()
|
||||||
|
if isinstance(custom_metrics, dict):
|
||||||
|
with self._lock:
|
||||||
|
for name, value in custom_metrics.items():
|
||||||
|
self.record_gauge(name, value)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error collecting custom metrics: {e}")
|
||||||
|
|
||||||
|
def record_counter(self, name: str, value: float = 1.0, labels: Dict[str, str] = None) -> None:
|
||||||
|
"""Record a counter metric (cumulative)"""
|
||||||
|
with self._lock:
|
||||||
|
self.counters[name] += value
|
||||||
|
|
||||||
|
# Store metric point
|
||||||
|
point = MetricPoint(
|
||||||
|
name=name,
|
||||||
|
value=self.counters[name],
|
||||||
|
timestamp=get_current_timestamp(),
|
||||||
|
labels=labels or {}
|
||||||
|
)
|
||||||
|
self.metrics[name].append(point)
|
||||||
|
|
||||||
|
def record_gauge(self, name: str, value: float, labels: Dict[str, str] = None) -> None:
|
||||||
|
"""Record a gauge metric (current value)"""
|
||||||
|
with self._lock:
|
||||||
|
self.gauges[name] = value
|
||||||
|
|
||||||
|
# Store metric point
|
||||||
|
point = MetricPoint(
|
||||||
|
name=name,
|
||||||
|
value=value,
|
||||||
|
timestamp=get_current_timestamp(),
|
||||||
|
labels=labels or {}
|
||||||
|
)
|
||||||
|
self.metrics[name].append(point)
|
||||||
|
|
||||||
|
def record_histogram(self, name: str, value: float, labels: Dict[str, str] = None) -> None:
|
||||||
|
"""Record a histogram metric (for latency, sizes, etc.)"""
|
||||||
|
with self._lock:
|
||||||
|
self.histograms[name].append(value)
|
||||||
|
|
||||||
|
# Keep only recent values
|
||||||
|
if len(self.histograms[name]) > 1000:
|
||||||
|
self.histograms[name] = self.histograms[name][-1000:]
|
||||||
|
|
||||||
|
# Store metric point
|
||||||
|
point = MetricPoint(
|
||||||
|
name=name,
|
||||||
|
value=value,
|
||||||
|
timestamp=get_current_timestamp(),
|
||||||
|
labels=labels or {}
|
||||||
|
)
|
||||||
|
self.metrics[name].append(point)
|
||||||
|
|
||||||
|
def increment_counter(self, name: str, labels: Dict[str, str] = None) -> None:
|
||||||
|
"""Increment a counter by 1"""
|
||||||
|
self.record_counter(name, 1.0, labels)
|
||||||
|
|
||||||
|
def set_gauge(self, name: str, value: float, labels: Dict[str, str] = None) -> None:
|
||||||
|
"""Set a gauge value"""
|
||||||
|
self.record_gauge(name, value, labels)
|
||||||
|
|
||||||
|
def observe_histogram(self, name: str, value: float, labels: Dict[str, str] = None) -> None:
|
||||||
|
"""Observe a value in a histogram"""
|
||||||
|
self.record_histogram(name, value, labels)
|
||||||
|
|
||||||
|
def get_current_metrics(self) -> Dict[str, Any]:
|
||||||
|
"""Get current metric values"""
|
||||||
|
with self._lock:
|
||||||
|
return {
|
||||||
|
'counters': dict(self.counters),
|
||||||
|
'gauges': dict(self.gauges),
|
||||||
|
'histograms': {
|
||||||
|
name: {
|
||||||
|
'count': len(values),
|
||||||
|
'sum': sum(values),
|
||||||
|
'avg': sum(values) / len(values) if values else 0,
|
||||||
|
'min': min(values) if values else 0,
|
||||||
|
'max': max(values) if values else 0,
|
||||||
|
'p50': self._percentile(values, 50) if values else 0,
|
||||||
|
'p95': self._percentile(values, 95) if values else 0,
|
||||||
|
'p99': self._percentile(values, 99) if values else 0
|
||||||
|
}
|
||||||
|
for name, values in self.histograms.items()
|
||||||
|
},
|
||||||
|
'system': self.get_latest_system_metrics()
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_latest_system_metrics(self) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get the latest system metrics"""
|
||||||
|
with self._lock:
|
||||||
|
if not self.system_metrics_history:
|
||||||
|
return None
|
||||||
|
|
||||||
|
latest = self.system_metrics_history[-1]
|
||||||
|
return {
|
||||||
|
'cpu_usage': latest.cpu_usage,
|
||||||
|
'memory_usage': latest.memory_usage,
|
||||||
|
'memory_available_gb': latest.memory_available,
|
||||||
|
'disk_usage': latest.disk_usage,
|
||||||
|
'network_bytes_sent': latest.network_bytes_sent,
|
||||||
|
'network_bytes_recv': latest.network_bytes_recv,
|
||||||
|
'active_connections': latest.active_connections,
|
||||||
|
'timestamp': latest.timestamp.isoformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_metric_history(self, name: str, limit: int = 100) -> List[Dict[str, Any]]:
|
||||||
|
"""Get historical values for a specific metric"""
|
||||||
|
with self._lock:
|
||||||
|
if name not in self.metrics:
|
||||||
|
return []
|
||||||
|
|
||||||
|
points = list(self.metrics[name])[-limit:]
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
'value': point.value,
|
||||||
|
'timestamp': point.timestamp.isoformat(),
|
||||||
|
'labels': point.labels
|
||||||
|
}
|
||||||
|
for point in points
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_prometheus_metrics(self) -> str:
|
||||||
|
"""Export metrics in Prometheus format"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
with self._lock:
|
||||||
|
# Export counters
|
||||||
|
for name, value in self.counters.items():
|
||||||
|
lines.append(f"# TYPE {name} counter")
|
||||||
|
lines.append(f"{name} {value}")
|
||||||
|
|
||||||
|
# Export gauges
|
||||||
|
for name, value in self.gauges.items():
|
||||||
|
lines.append(f"# TYPE {name} gauge")
|
||||||
|
lines.append(f"{name} {value}")
|
||||||
|
|
||||||
|
# Export histograms
|
||||||
|
for name, values in self.histograms.items():
|
||||||
|
if values:
|
||||||
|
lines.append(f"# TYPE {name} histogram")
|
||||||
|
lines.append(f"{name}_count {len(values)}")
|
||||||
|
lines.append(f"{name}_sum {sum(values)}")
|
||||||
|
|
||||||
|
# Add percentiles
|
||||||
|
for percentile in [50, 95, 99]:
|
||||||
|
p_value = self._percentile(values, percentile)
|
||||||
|
lines.append(f"{name}_percentile{{quantile=\"0.{percentile:02d}\"}} {p_value}")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def register_callback(self, callback: Callable[[], Dict[str, float]]) -> None:
|
||||||
|
"""Register a callback for custom metrics collection"""
|
||||||
|
self.metric_callbacks.append(callback)
|
||||||
|
logger.info(f"Registered metrics callback: {callback.__name__}")
|
||||||
|
|
||||||
|
def get_performance_summary(self) -> Dict[str, Any]:
|
||||||
|
"""Get a performance summary"""
|
||||||
|
current_metrics = self.get_current_metrics()
|
||||||
|
|
||||||
|
# Calculate rates and trends
|
||||||
|
summary = {
|
||||||
|
'timestamp': get_current_timestamp().isoformat(),
|
||||||
|
'system': current_metrics.get('system', {}),
|
||||||
|
'counters': current_metrics.get('counters', {}),
|
||||||
|
'gauges': current_metrics.get('gauges', {}),
|
||||||
|
'performance_indicators': {}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add performance indicators
|
||||||
|
histograms = current_metrics.get('histograms', {})
|
||||||
|
for name, stats in histograms.items():
|
||||||
|
if 'latency' in name.lower():
|
||||||
|
summary['performance_indicators'][f"{name}_avg_ms"] = stats['avg']
|
||||||
|
summary['performance_indicators'][f"{name}_p95_ms"] = stats['p95']
|
||||||
|
|
||||||
|
return summary
|
||||||
|
|
||||||
|
def _percentile(self, values: List[float], percentile: int) -> float:
|
||||||
|
"""Calculate percentile of values"""
|
||||||
|
if not values:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
sorted_values = sorted(values)
|
||||||
|
index = int((percentile / 100.0) * len(sorted_values))
|
||||||
|
index = min(index, len(sorted_values) - 1)
|
||||||
|
return sorted_values[index]
|
||||||
|
|
||||||
|
def reset_metrics(self) -> None:
|
||||||
|
"""Reset all metrics (useful for testing)"""
|
||||||
|
with self._lock:
|
||||||
|
self.metrics.clear()
|
||||||
|
self.counters.clear()
|
||||||
|
self.gauges.clear()
|
||||||
|
self.histograms.clear()
|
||||||
|
self.system_metrics_history.clear()
|
||||||
|
|
||||||
|
logger.info("All metrics reset")
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get collector statistics"""
|
||||||
|
with self._lock:
|
||||||
|
return {
|
||||||
|
'collecting': self._collecting,
|
||||||
|
'collection_interval': self.collection_interval,
|
||||||
|
'max_history': self.max_history,
|
||||||
|
'total_metrics': len(self.metrics),
|
||||||
|
'total_counters': len(self.counters),
|
||||||
|
'total_gauges': len(self.gauges),
|
||||||
|
'total_histograms': len(self.histograms),
|
||||||
|
'system_metrics_count': len(self.system_metrics_history),
|
||||||
|
'registered_callbacks': len(self.metric_callbacks)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global metrics collector instance
|
||||||
|
metrics_collector = MetricsCollector()
|
||||||
561
COBY/monitoring/performance_monitor.py
Normal file
561
COBY/monitoring/performance_monitor.py
Normal file
@@ -0,0 +1,561 @@
|
|||||||
|
"""
|
||||||
|
Performance monitoring dashboard and real-time performance tracking.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
import threading
|
||||||
|
from typing import Dict, List, Optional, Any, Callable
|
||||||
|
from collections import defaultdict, deque
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ..utils.logging import get_logger
|
||||||
|
from ..utils.timing import get_current_timestamp
|
||||||
|
from .metrics_collector import MetricsCollector
|
||||||
|
except ImportError:
|
||||||
|
from utils.logging import get_logger
|
||||||
|
from utils.timing import get_current_timestamp
|
||||||
|
from monitoring.metrics_collector import MetricsCollector
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PerformanceAlert:
|
||||||
|
"""Performance alert definition"""
|
||||||
|
name: str
|
||||||
|
metric_name: str
|
||||||
|
threshold: float
|
||||||
|
comparison: str # 'gt', 'lt', 'eq'
|
||||||
|
duration: int # seconds
|
||||||
|
message: str
|
||||||
|
severity: str = 'warning' # 'info', 'warning', 'critical'
|
||||||
|
triggered_at: Optional[datetime] = None
|
||||||
|
resolved_at: Optional[datetime] = None
|
||||||
|
|
||||||
|
def is_triggered(self, value: float) -> bool:
|
||||||
|
"""Check if alert should be triggered"""
|
||||||
|
if self.comparison == 'gt':
|
||||||
|
return value > self.threshold
|
||||||
|
elif self.comparison == 'lt':
|
||||||
|
return value < self.threshold
|
||||||
|
elif self.comparison == 'eq':
|
||||||
|
return abs(value - self.threshold) < 0.001
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PerformanceThresholds:
|
||||||
|
"""Performance threshold configuration"""
|
||||||
|
max_cpu_usage: float = 80.0 # %
|
||||||
|
max_memory_usage: float = 85.0 # %
|
||||||
|
min_memory_available: float = 1.0 # GB
|
||||||
|
max_latency_ms: float = 100.0 # milliseconds
|
||||||
|
max_error_rate: float = 5.0 # %
|
||||||
|
min_throughput: float = 100.0 # operations/second
|
||||||
|
|
||||||
|
|
||||||
|
class PerformanceMonitor:
|
||||||
|
"""
|
||||||
|
Real-time performance monitoring with alerting and dashboard data.
|
||||||
|
|
||||||
|
Monitors system performance, tracks KPIs, and provides alerts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, metrics_collector: MetricsCollector = None):
|
||||||
|
"""
|
||||||
|
Initialize performance monitor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metrics_collector: Metrics collector instance
|
||||||
|
"""
|
||||||
|
if metrics_collector is None:
|
||||||
|
from .metrics_collector import metrics_collector as default_collector
|
||||||
|
self.metrics_collector = default_collector
|
||||||
|
else:
|
||||||
|
self.metrics_collector = metrics_collector
|
||||||
|
self.thresholds = PerformanceThresholds()
|
||||||
|
|
||||||
|
# Alert management
|
||||||
|
self.alerts: Dict[str, PerformanceAlert] = {}
|
||||||
|
self.active_alerts: Dict[str, PerformanceAlert] = {}
|
||||||
|
self.alert_history: deque = deque(maxlen=1000)
|
||||||
|
|
||||||
|
# Performance tracking
|
||||||
|
self.performance_history: deque = deque(maxlen=10000)
|
||||||
|
self.kpi_history: Dict[str, deque] = defaultdict(lambda: deque(maxlen=1000))
|
||||||
|
|
||||||
|
# Monitoring control
|
||||||
|
self._monitoring = False
|
||||||
|
self._monitor_thread: Optional[threading.Thread] = None
|
||||||
|
self._monitor_interval = 5.0 # seconds
|
||||||
|
|
||||||
|
# Alert callbacks
|
||||||
|
self.alert_callbacks: List[Callable[[PerformanceAlert], None]] = []
|
||||||
|
|
||||||
|
# Initialize default alerts
|
||||||
|
self._setup_default_alerts()
|
||||||
|
|
||||||
|
logger.info("Performance monitor initialized")
|
||||||
|
|
||||||
|
def _setup_default_alerts(self) -> None:
|
||||||
|
"""Setup default performance alerts"""
|
||||||
|
default_alerts = [
|
||||||
|
PerformanceAlert(
|
||||||
|
name="high_cpu_usage",
|
||||||
|
metric_name="system_cpu_usage",
|
||||||
|
threshold=self.thresholds.max_cpu_usage,
|
||||||
|
comparison="gt",
|
||||||
|
duration=30,
|
||||||
|
message="CPU usage is above {threshold}%",
|
||||||
|
severity="warning"
|
||||||
|
),
|
||||||
|
PerformanceAlert(
|
||||||
|
name="high_memory_usage",
|
||||||
|
metric_name="system_memory_usage",
|
||||||
|
threshold=self.thresholds.max_memory_usage,
|
||||||
|
comparison="gt",
|
||||||
|
duration=30,
|
||||||
|
message="Memory usage is above {threshold}%",
|
||||||
|
severity="warning"
|
||||||
|
),
|
||||||
|
PerformanceAlert(
|
||||||
|
name="low_memory_available",
|
||||||
|
metric_name="system_memory_available_gb",
|
||||||
|
threshold=self.thresholds.min_memory_available,
|
||||||
|
comparison="lt",
|
||||||
|
duration=60,
|
||||||
|
message="Available memory is below {threshold}GB",
|
||||||
|
severity="critical"
|
||||||
|
),
|
||||||
|
PerformanceAlert(
|
||||||
|
name="high_latency",
|
||||||
|
metric_name="processing_latency_ms",
|
||||||
|
threshold=self.thresholds.max_latency_ms,
|
||||||
|
comparison="gt",
|
||||||
|
duration=60,
|
||||||
|
message="Processing latency is above {threshold}ms",
|
||||||
|
severity="warning"
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
for alert in default_alerts:
|
||||||
|
self.add_alert(alert)
|
||||||
|
|
||||||
|
def start_monitoring(self) -> None:
|
||||||
|
"""Start performance monitoring"""
|
||||||
|
if self._monitoring:
|
||||||
|
logger.warning("Performance monitoring already running")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._monitoring = True
|
||||||
|
self._monitor_thread = threading.Thread(
|
||||||
|
target=self._monitoring_loop,
|
||||||
|
name="PerformanceMonitor",
|
||||||
|
daemon=True
|
||||||
|
)
|
||||||
|
self._monitor_thread.start()
|
||||||
|
logger.info("Started performance monitoring")
|
||||||
|
|
||||||
|
def stop_monitoring(self) -> None:
|
||||||
|
"""Stop performance monitoring"""
|
||||||
|
if not self._monitoring:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._monitoring = False
|
||||||
|
if self._monitor_thread:
|
||||||
|
self._monitor_thread.join(timeout=5.0)
|
||||||
|
logger.info("Stopped performance monitoring")
|
||||||
|
|
||||||
|
def _monitoring_loop(self) -> None:
|
||||||
|
"""Main monitoring loop"""
|
||||||
|
while self._monitoring:
|
||||||
|
try:
|
||||||
|
# Collect current performance data
|
||||||
|
self._collect_performance_data()
|
||||||
|
|
||||||
|
# Check alerts
|
||||||
|
self._check_alerts()
|
||||||
|
|
||||||
|
# Update KPIs
|
||||||
|
self._update_kpis()
|
||||||
|
|
||||||
|
time.sleep(self._monitor_interval)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in performance monitoring loop: {e}")
|
||||||
|
time.sleep(self._monitor_interval)
|
||||||
|
|
||||||
|
def _collect_performance_data(self) -> None:
|
||||||
|
"""Collect current performance data"""
|
||||||
|
try:
|
||||||
|
if self.metrics_collector is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
current_metrics = self.metrics_collector.get_current_metrics()
|
||||||
|
|
||||||
|
if current_metrics is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create performance snapshot
|
||||||
|
performance_data = {
|
||||||
|
'timestamp': get_current_timestamp(),
|
||||||
|
'system': current_metrics.get('system', {}),
|
||||||
|
'counters': current_metrics.get('counters', {}),
|
||||||
|
'gauges': current_metrics.get('gauges', {}),
|
||||||
|
'histograms': current_metrics.get('histograms', {})
|
||||||
|
}
|
||||||
|
|
||||||
|
self.performance_history.append(performance_data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error collecting performance data: {e}")
|
||||||
|
|
||||||
|
def _check_alerts(self) -> None:
|
||||||
|
"""Check all alerts against current metrics"""
|
||||||
|
if self.metrics_collector is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
current_metrics = self.metrics_collector.get_current_metrics()
|
||||||
|
if current_metrics is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
current_time = get_current_timestamp()
|
||||||
|
|
||||||
|
for alert_name, alert in self.alerts.items():
|
||||||
|
try:
|
||||||
|
# Get metric value
|
||||||
|
metric_value = self._get_metric_value(alert.metric_name, current_metrics)
|
||||||
|
if metric_value is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if alert should be triggered
|
||||||
|
should_trigger = alert.is_triggered(metric_value)
|
||||||
|
|
||||||
|
if should_trigger and alert_name not in self.active_alerts:
|
||||||
|
# Trigger alert
|
||||||
|
alert.triggered_at = current_time
|
||||||
|
self.active_alerts[alert_name] = alert
|
||||||
|
self.alert_history.append(alert)
|
||||||
|
|
||||||
|
# Format message
|
||||||
|
message = alert.message.format(
|
||||||
|
threshold=alert.threshold,
|
||||||
|
value=metric_value
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.warning(f"Performance alert triggered: {alert.name} - {message}")
|
||||||
|
|
||||||
|
# Notify callbacks
|
||||||
|
for callback in self.alert_callbacks:
|
||||||
|
try:
|
||||||
|
callback(alert)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in alert callback: {e}")
|
||||||
|
|
||||||
|
elif not should_trigger and alert_name in self.active_alerts:
|
||||||
|
# Resolve alert
|
||||||
|
resolved_alert = self.active_alerts.pop(alert_name)
|
||||||
|
resolved_alert.resolved_at = current_time
|
||||||
|
|
||||||
|
logger.info(f"Performance alert resolved: {alert.name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error checking alert {alert_name}: {e}")
|
||||||
|
|
||||||
|
def _get_metric_value(self, metric_name: str, metrics: Dict[str, Any]) -> Optional[float]:
|
||||||
|
"""Get metric value from metrics data"""
|
||||||
|
if not metrics:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Check gauges first
|
||||||
|
gauges = metrics.get('gauges', {})
|
||||||
|
if gauges and metric_name in gauges:
|
||||||
|
return gauges[metric_name]
|
||||||
|
|
||||||
|
# Check counters
|
||||||
|
counters = metrics.get('counters', {})
|
||||||
|
if counters and metric_name in counters:
|
||||||
|
return counters[metric_name]
|
||||||
|
|
||||||
|
# Check histograms (use average)
|
||||||
|
histograms = metrics.get('histograms', {})
|
||||||
|
if histograms and metric_name in histograms:
|
||||||
|
hist_data = histograms[metric_name]
|
||||||
|
if hist_data and isinstance(hist_data, dict):
|
||||||
|
return hist_data.get('avg', 0)
|
||||||
|
|
||||||
|
# Check system metrics
|
||||||
|
system_metrics = metrics.get('system', {})
|
||||||
|
if system_metrics and metric_name in system_metrics:
|
||||||
|
return system_metrics[metric_name]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _update_kpis(self) -> None:
|
||||||
|
"""Update key performance indicators"""
|
||||||
|
try:
|
||||||
|
if self.metrics_collector is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
current_metrics = self.metrics_collector.get_current_metrics()
|
||||||
|
if current_metrics is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
timestamp = get_current_timestamp()
|
||||||
|
|
||||||
|
# Calculate throughput (operations per second)
|
||||||
|
throughput = self._calculate_throughput()
|
||||||
|
self.kpi_history['throughput_ops_per_sec'].append({
|
||||||
|
'value': throughput,
|
||||||
|
'timestamp': timestamp
|
||||||
|
})
|
||||||
|
|
||||||
|
# Calculate error rate
|
||||||
|
error_rate = self._calculate_error_rate()
|
||||||
|
self.kpi_history['error_rate_percent'].append({
|
||||||
|
'value': error_rate,
|
||||||
|
'timestamp': timestamp
|
||||||
|
})
|
||||||
|
|
||||||
|
# Calculate average latency
|
||||||
|
avg_latency = self._calculate_average_latency()
|
||||||
|
self.kpi_history['avg_latency_ms'].append({
|
||||||
|
'value': avg_latency,
|
||||||
|
'timestamp': timestamp
|
||||||
|
})
|
||||||
|
|
||||||
|
# Update metrics collector with KPIs
|
||||||
|
self.metrics_collector.set_gauge('kpi_throughput_ops_per_sec', throughput)
|
||||||
|
self.metrics_collector.set_gauge('kpi_error_rate_percent', error_rate)
|
||||||
|
self.metrics_collector.set_gauge('kpi_avg_latency_ms', avg_latency)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating KPIs: {e}")
|
||||||
|
|
||||||
|
def _calculate_throughput(self) -> float:
|
||||||
|
"""Calculate operations per second throughput"""
|
||||||
|
try:
|
||||||
|
current_metrics = self.metrics_collector.get_current_metrics()
|
||||||
|
counters = current_metrics.get('counters', {})
|
||||||
|
|
||||||
|
# Sum up relevant operation counters
|
||||||
|
total_ops = 0
|
||||||
|
for name, value in counters.items():
|
||||||
|
if any(keyword in name.lower() for keyword in ['processed', 'handled', 'completed']):
|
||||||
|
total_ops += value
|
||||||
|
|
||||||
|
# Calculate rate (simple approximation)
|
||||||
|
if len(self.performance_history) >= 2:
|
||||||
|
prev_data = self.performance_history[-2]
|
||||||
|
current_data = self.performance_history[-1]
|
||||||
|
|
||||||
|
time_diff = (current_data['timestamp'] - prev_data['timestamp']).total_seconds()
|
||||||
|
if time_diff > 0:
|
||||||
|
prev_ops = sum(
|
||||||
|
value for name, value in prev_data.get('counters', {}).items()
|
||||||
|
if any(keyword in name.lower() for keyword in ['processed', 'handled', 'completed'])
|
||||||
|
)
|
||||||
|
return (total_ops - prev_ops) / time_diff
|
||||||
|
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating throughput: {e}")
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def _calculate_error_rate(self) -> float:
|
||||||
|
"""Calculate error rate percentage"""
|
||||||
|
try:
|
||||||
|
current_metrics = self.metrics_collector.get_current_metrics()
|
||||||
|
counters = current_metrics.get('counters', {})
|
||||||
|
|
||||||
|
# Count errors and total operations
|
||||||
|
total_errors = sum(
|
||||||
|
value for name, value in counters.items()
|
||||||
|
if 'error' in name.lower() or 'failed' in name.lower()
|
||||||
|
)
|
||||||
|
|
||||||
|
total_operations = sum(
|
||||||
|
value for name, value in counters.items()
|
||||||
|
if any(keyword in name.lower() for keyword in ['processed', 'handled', 'completed', 'total'])
|
||||||
|
)
|
||||||
|
|
||||||
|
if total_operations > 0:
|
||||||
|
return (total_errors / total_operations) * 100
|
||||||
|
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating error rate: {e}")
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def _calculate_average_latency(self) -> float:
|
||||||
|
"""Calculate average latency across all operations"""
|
||||||
|
try:
|
||||||
|
current_metrics = self.metrics_collector.get_current_metrics()
|
||||||
|
histograms = current_metrics.get('histograms', {})
|
||||||
|
|
||||||
|
# Find latency histograms
|
||||||
|
latency_values = []
|
||||||
|
for name, stats in histograms.items():
|
||||||
|
if 'latency' in name.lower():
|
||||||
|
latency_values.append(stats.get('avg', 0))
|
||||||
|
|
||||||
|
if latency_values:
|
||||||
|
return sum(latency_values) / len(latency_values)
|
||||||
|
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating average latency: {e}")
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def add_alert(self, alert: PerformanceAlert) -> None:
|
||||||
|
"""Add a performance alert"""
|
||||||
|
self.alerts[alert.name] = alert
|
||||||
|
logger.info(f"Added performance alert: {alert.name}")
|
||||||
|
|
||||||
|
def remove_alert(self, alert_name: str) -> None:
|
||||||
|
"""Remove a performance alert"""
|
||||||
|
if alert_name in self.alerts:
|
||||||
|
del self.alerts[alert_name]
|
||||||
|
# Also remove from active alerts if present
|
||||||
|
self.active_alerts.pop(alert_name, None)
|
||||||
|
logger.info(f"Removed performance alert: {alert_name}")
|
||||||
|
|
||||||
|
def get_active_alerts(self) -> List[PerformanceAlert]:
|
||||||
|
"""Get currently active alerts"""
|
||||||
|
return list(self.active_alerts.values())
|
||||||
|
|
||||||
|
def get_alert_history(self, limit: int = 100) -> List[PerformanceAlert]:
|
||||||
|
"""Get alert history"""
|
||||||
|
return list(self.alert_history)[-limit:]
|
||||||
|
|
||||||
|
def get_performance_dashboard_data(self) -> Dict[str, Any]:
|
||||||
|
"""Get data for performance dashboard"""
|
||||||
|
current_metrics = {}
|
||||||
|
if self.metrics_collector:
|
||||||
|
current_metrics = self.metrics_collector.get_current_metrics() or {}
|
||||||
|
|
||||||
|
system_metrics = current_metrics.get('system', {}) or {}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'timestamp': get_current_timestamp().isoformat(),
|
||||||
|
'system_metrics': system_metrics,
|
||||||
|
'kpis': {
|
||||||
|
name: list(history)[-10:] if history else [] # Last 10 points
|
||||||
|
for name, history in self.kpi_history.items()
|
||||||
|
},
|
||||||
|
'active_alerts': [
|
||||||
|
{
|
||||||
|
'name': alert.name,
|
||||||
|
'message': alert.message,
|
||||||
|
'severity': alert.severity.value if hasattr(alert.severity, 'value') else str(alert.severity),
|
||||||
|
'triggered_at': alert.triggered_at.isoformat() if alert.triggered_at else None
|
||||||
|
}
|
||||||
|
for alert in self.active_alerts.values()
|
||||||
|
],
|
||||||
|
'performance_summary': {
|
||||||
|
'cpu_usage': system_metrics.get('cpu_usage', 0) if system_metrics else 0,
|
||||||
|
'memory_usage': system_metrics.get('memory_usage', 0) if system_metrics else 0,
|
||||||
|
'active_connections': system_metrics.get('active_connections', 0) if system_metrics else 0,
|
||||||
|
'throughput': self.kpi_history['throughput_ops_per_sec'][-1]['value'] if self.kpi_history['throughput_ops_per_sec'] else 0,
|
||||||
|
'error_rate': self.kpi_history['error_rate_percent'][-1]['value'] if self.kpi_history['error_rate_percent'] else 0,
|
||||||
|
'avg_latency': self.kpi_history['avg_latency_ms'][-1]['value'] if self.kpi_history['avg_latency_ms'] else 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def register_alert_callback(self, callback: Callable[[PerformanceAlert], None]) -> None:
|
||||||
|
"""Register callback for alert notifications"""
|
||||||
|
self.alert_callbacks.append(callback)
|
||||||
|
logger.info(f"Registered alert callback: {callback.__name__}")
|
||||||
|
|
||||||
|
def update_thresholds(self, **kwargs) -> None:
|
||||||
|
"""Update performance thresholds"""
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if hasattr(self.thresholds, key):
|
||||||
|
setattr(self.thresholds, key, value)
|
||||||
|
logger.info(f"Updated threshold {key} to {value}")
|
||||||
|
|
||||||
|
def get_performance_trends(self, hours: int = 24) -> Dict[str, Any]:
|
||||||
|
"""Get performance trends over specified time period"""
|
||||||
|
cutoff_time = get_current_timestamp() - timedelta(hours=hours)
|
||||||
|
|
||||||
|
# Filter performance history
|
||||||
|
recent_data = [
|
||||||
|
data for data in self.performance_history
|
||||||
|
if data and data.get('timestamp') and data['timestamp'] >= cutoff_time
|
||||||
|
]
|
||||||
|
|
||||||
|
if not recent_data:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Calculate trends
|
||||||
|
trends = {}
|
||||||
|
|
||||||
|
# CPU usage trend
|
||||||
|
cpu_values = []
|
||||||
|
for data in recent_data:
|
||||||
|
system_data = data.get('system', {})
|
||||||
|
if system_data:
|
||||||
|
cpu_values.append(system_data.get('cpu_usage', 0))
|
||||||
|
|
||||||
|
if cpu_values:
|
||||||
|
trends['cpu_usage'] = {
|
||||||
|
'current': cpu_values[-1],
|
||||||
|
'average': sum(cpu_values) / len(cpu_values),
|
||||||
|
'max': max(cpu_values),
|
||||||
|
'trend': 'increasing' if len(cpu_values) > 1 and cpu_values[-1] > cpu_values[0] else 'stable'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Memory usage trend
|
||||||
|
memory_values = []
|
||||||
|
for data in recent_data:
|
||||||
|
system_data = data.get('system', {})
|
||||||
|
if system_data:
|
||||||
|
memory_values.append(system_data.get('memory_usage', 0))
|
||||||
|
|
||||||
|
if memory_values:
|
||||||
|
trends['memory_usage'] = {
|
||||||
|
'current': memory_values[-1],
|
||||||
|
'average': sum(memory_values) / len(memory_values),
|
||||||
|
'max': max(memory_values),
|
||||||
|
'trend': 'increasing' if len(memory_values) > 1 and memory_values[-1] > memory_values[0] else 'stable'
|
||||||
|
}
|
||||||
|
|
||||||
|
return trends
|
||||||
|
|
||||||
|
def get_stats(self) -> Dict[str, Any]:
|
||||||
|
"""Get performance monitor statistics"""
|
||||||
|
return {
|
||||||
|
'monitoring': self._monitoring,
|
||||||
|
'monitor_interval': self._monitor_interval,
|
||||||
|
'total_alerts': len(self.alerts),
|
||||||
|
'active_alerts': len(self.active_alerts),
|
||||||
|
'alert_history_count': len(self.alert_history),
|
||||||
|
'performance_history_count': len(self.performance_history),
|
||||||
|
'kpi_metrics': list(self.kpi_history.keys()),
|
||||||
|
'registered_callbacks': len(self.alert_callbacks),
|
||||||
|
'thresholds': {
|
||||||
|
'max_cpu_usage': self.thresholds.max_cpu_usage,
|
||||||
|
'max_memory_usage': self.thresholds.max_memory_usage,
|
||||||
|
'min_memory_available': self.thresholds.min_memory_available,
|
||||||
|
'max_latency_ms': self.thresholds.max_latency_ms,
|
||||||
|
'max_error_rate': self.thresholds.max_error_rate,
|
||||||
|
'min_throughput': self.thresholds.min_throughput
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global performance monitor instance (initialized lazily)
|
||||||
|
performance_monitor = None
|
||||||
|
|
||||||
|
def get_performance_monitor():
|
||||||
|
"""Get or create global performance monitor instance"""
|
||||||
|
global performance_monitor
|
||||||
|
if performance_monitor is None:
|
||||||
|
performance_monitor = PerformanceMonitor()
|
||||||
|
return performance_monitor
|
||||||
15
COBY/processing/__init__.py
Normal file
15
COBY/processing/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
"""
|
||||||
|
Data processing and normalization components for the COBY system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .data_processor import StandardDataProcessor
|
||||||
|
from .quality_checker import DataQualityChecker
|
||||||
|
from .anomaly_detector import AnomalyDetector
|
||||||
|
from .metrics_calculator import MetricsCalculator
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'StandardDataProcessor',
|
||||||
|
'DataQualityChecker',
|
||||||
|
'AnomalyDetector',
|
||||||
|
'MetricsCalculator'
|
||||||
|
]
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user