diff --git a/.kiro/specs/multi-exchange-data-aggregation/tasks.md b/.kiro/specs/multi-exchange-data-aggregation/tasks.md
index 9f475c6..2a52dee 100644
--- a/.kiro/specs/multi-exchange-data-aggregation/tasks.md
+++ b/.kiro/specs/multi-exchange-data-aggregation/tasks.md
@@ -143,9 +143,11 @@
+
- [x] 12. Add additional exchange connectors (Coinbase, Kraken)
- Implement Coinbase Pro WebSocket connector with proper authentication
- Create Kraken WebSocket connector with their specific message format
+
- Add exchange-specific data normalization for both exchanges
@@ -153,6 +155,10 @@
- Implement proper error handling for each exchange's quirks
- Write unit tests for both new exchange connectors
+
+
+
+
- _Requirements: 1.1, 1.2, 1.4_
- [x] 13. Implement remaining exchange connectors (Bybit, OKX, Huobi)
diff --git a/COBY/Dockerfile b/COBY/Dockerfile
new file mode 100644
index 0000000..d8afecf
--- /dev/null
+++ b/COBY/Dockerfile
@@ -0,0 +1,83 @@
+# Multi-stage Docker build for COBY Multi-Exchange Data Aggregation System
+FROM python:3.11-slim as base
+
+# Set environment variables
+ENV PYTHONDONTWRITEBYTECODE=1 \
+ PYTHONUNBUFFERED=1 \
+ PYTHONPATH=/app \
+ PIP_NO_CACHE_DIR=1 \
+ PIP_DISABLE_PIP_VERSION_CHECK=1
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ gcc \
+ g++ \
+ libpq-dev \
+ curl \
+ && rm -rf /var/lib/apt/lists/*
+
+# Create app user
+RUN groupadd -r coby && useradd -r -g coby coby
+
+# Set work directory
+WORKDIR /app
+
+# Copy requirements first for better caching
+COPY requirements.txt .
+
+# Install Python dependencies
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy application code
+COPY . .
+
+# Create necessary directories
+RUN mkdir -p logs data && \
+ chown -R coby:coby /app
+
+# Switch to non-root user
+USER coby
+
+# Health check
+HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
+ CMD python -c "import requests; requests.get('http://localhost:8080/health', timeout=5)" || exit 1
+
+# Default command
+CMD ["python", "-m", "COBY.main"]
+
+# Development stage
+FROM base as development
+
+USER root
+
+# Install development dependencies
+RUN pip install --no-cache-dir pytest pytest-asyncio pytest-cov black flake8 mypy
+
+# Install debugging tools
+RUN apt-get update && apt-get install -y \
+ vim \
+ htop \
+ net-tools \
+ && rm -rf /var/lib/apt/lists/*
+
+USER coby
+
+# Override command for development
+CMD ["python", "-m", "COBY.main", "--debug"]
+
+# Production stage
+FROM base as production
+
+# Copy only necessary files for production
+COPY --from=base /app /app
+
+# Set production environment
+ENV ENVIRONMENT=production \
+ DEBUG=false \
+ LOG_LEVEL=INFO
+
+# Expose ports
+EXPOSE 8080 8081
+
+# Use production command
+CMD ["python", "-m", "COBY.main"]
\ No newline at end of file
diff --git a/COBY/PORTAINER_DEPLOYMENT.md b/COBY/PORTAINER_DEPLOYMENT.md
new file mode 100644
index 0000000..ab6f1a6
--- /dev/null
+++ b/COBY/PORTAINER_DEPLOYMENT.md
@@ -0,0 +1,264 @@
+# COBY Portainer Deployment Guide
+
+This guide explains how to deploy the COBY Multi-Exchange Data Aggregation System using Portainer with Git repository integration.
+
+## Prerequisites
+
+- Portainer CE/EE installed and running
+- Docker Swarm or Docker Compose environment
+- Access to the Git repository containing the COBY project
+- Minimum system requirements:
+ - 4GB RAM
+ - 2 CPU cores
+ - 20GB disk space
+
+## Deployment Steps
+
+### 1. Access Portainer
+
+1. Open your Portainer web interface
+2. Navigate to your environment (local Docker or Docker Swarm)
+
+### 2. Create Stack from Git Repository
+
+1. Go to **Stacks** in the left sidebar
+2. Click **Add stack**
+3. Choose **Repository** as the build method
+4. Configure the repository settings:
+
+ **Repository Configuration:**
+ - **Repository URL**: `https://github.com/your-username/your-repo.git`
+ - **Repository reference**: `main` (or your preferred branch)
+ - **Compose path**: `COBY/docker-compose.portainer.yml`
+ - **Additional files**: Leave empty (all configs are embedded)
+
+### 3. Configure Environment Variables
+
+In the **Environment variables** section, add the following variables (optional customizations):
+
+```bash
+# Database Configuration
+DB_PASSWORD=your_secure_database_password
+REDIS_PASSWORD=your_secure_redis_password
+
+# API Configuration
+API_PORT=8080
+WS_PORT=8081
+
+# Monitoring (if using monitoring profile)
+PROMETHEUS_PORT=9090
+GRAFANA_PORT=3001
+GRAFANA_PASSWORD=your_grafana_password
+
+# Performance Tuning
+MAX_CONNECTIONS_PER_EXCHANGE=5
+DATA_BUFFER_SIZE=10000
+BATCH_WRITE_SIZE=1000
+```
+
+### 4. Deploy the Stack
+
+1. **Stack name**: Enter `coby-system` (or your preferred name)
+2. **Environment variables**: Configure as needed (see above)
+3. **Access control**: Set appropriate permissions
+4. Click **Deploy the stack**
+
+### 5. Monitor Deployment
+
+1. Watch the deployment logs in Portainer
+2. Check that all services start successfully:
+ - `coby-timescaledb` (Database)
+ - `coby-redis` (Cache)
+ - `coby-app` (Main application)
+ - `coby-dashboard` (Web interface)
+
+### 6. Verify Installation
+
+Once deployed, verify the installation:
+
+1. **Health Checks**: All services should show as "healthy" in Portainer
+2. **Web Dashboard**: Access `http://your-server:8080/` (served by your reverse proxy)
+3. **API Endpoint**: Check `http://your-server:8080/health`
+4. **Logs**: Review logs for any errors
+
+**Reverse Proxy Configuration**: Configure your reverse proxy to forward requests to the COBY app on port 8080. The application serves both the API and web dashboard from the same port.
+
+## Service Ports
+
+The following ports will be exposed:
+
+- **8080**: REST API + Web Dashboard (served by FastAPI)
+- **8081**: WebSocket API
+- **5432**: TimescaleDB (optional external access)
+- **6379**: Redis (optional external access)
+
+**Note**: The web dashboard is now served directly by the FastAPI application at port 8080, eliminating the need for a separate nginx container since you have a reverse proxy.
+
+## Optional Monitoring Stack
+
+To enable Prometheus and Grafana monitoring:
+
+1. In the stack configuration, add the profile: `monitoring`
+2. Additional ports will be exposed:
+ - **9090**: Prometheus
+ - **3001**: Grafana
+ - **9100**: Node Exporter
+
+## Configuration Options
+
+### Resource Limits
+
+The stack includes resource limits for each service:
+
+- **COBY App**: 2GB RAM, 2 CPU cores (includes web dashboard)
+- **TimescaleDB**: 1GB RAM, 1 CPU core
+- **Redis**: 512MB RAM, 0.5 CPU cores
+
+### Persistent Data
+
+The following volumes are created for persistent data:
+
+- `timescale_data`: Database storage
+- `redis_data`: Redis persistence
+- `coby_logs`: Application logs
+- `coby_data`: Application data
+- `prometheus_data`: Metrics storage (if monitoring enabled)
+- `grafana_data`: Grafana dashboards (if monitoring enabled)
+
+### Network Configuration
+
+- **Network**: `coby-network` (172.20.0.0/16)
+- **Internal communication**: All services communicate via Docker network
+- **External access**: Only specified ports are exposed
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Services not starting**:
+ - Check resource availability
+ - Review service logs in Portainer
+ - Verify environment variables
+
+2. **Database connection issues**:
+ - Ensure TimescaleDB is healthy
+ - Check database credentials
+ - Verify network connectivity
+
+3. **Web dashboard not accessible**:
+ - Confirm port 8080 is accessible through your reverse proxy
+ - Check that coby-app is running and healthy
+ - Verify static files are being served at the root path
+
+### Log Access
+
+Access logs through Portainer:
+
+1. Go to **Containers**
+2. Click on the container name
+3. Select **Logs** tab
+4. Use filters to find specific issues
+
+### Health Checks
+
+Monitor service health:
+
+1. **Portainer Dashboard**: Shows health status
+2. **API Health**: `GET /health` endpoint
+3. **Database**: `pg_isready` command
+4. **Redis**: `redis-cli ping` command
+
+## Scaling and Updates
+
+### Horizontal Scaling
+
+To scale the main application:
+
+1. Go to the stack in Portainer
+2. Edit the stack
+3. Modify the `coby-app` service replicas
+4. Redeploy the stack
+
+### Updates
+
+To update the system:
+
+1. **Git-based updates**: Portainer will pull latest changes
+2. **Manual updates**: Edit stack configuration
+3. **Rolling updates**: Use Docker Swarm mode for zero-downtime updates
+
+### Backup
+
+Regular backups should include:
+
+- **Database**: TimescaleDB data volume
+- **Configuration**: Stack configuration in Portainer
+- **Logs**: Application logs for troubleshooting
+
+## Security Considerations
+
+1. **Change default passwords** for database and Redis
+2. **Use environment variables** for sensitive data
+3. **Limit network exposure** to required ports only
+4. **Regular updates** of base images
+5. **Monitor logs** for security events
+
+## Performance Tuning
+
+### Database Optimization
+
+- Adjust `shared_buffers` in TimescaleDB
+- Configure connection pooling
+- Monitor query performance
+
+### Application Tuning
+
+- Adjust `DATA_BUFFER_SIZE` for throughput
+- Configure `BATCH_WRITE_SIZE` for database writes
+- Monitor memory usage and adjust limits
+
+### Network Optimization
+
+- Use Docker overlay networks for multi-host deployments
+- Configure load balancing for high availability
+- Monitor network latency between services
+
+## Support
+
+For issues and support:
+
+1. Check the application logs
+2. Review Portainer container status
+3. Consult the main project documentation
+4. Submit issues to the project repository
+
+## Example Stack Configuration
+
+Here's a complete example of environment variables for production:
+
+```bash
+# Production Configuration
+ENVIRONMENT=production
+DEBUG=false
+LOG_LEVEL=INFO
+
+# Security
+DB_PASSWORD=prod_secure_db_pass_2024
+REDIS_PASSWORD=prod_secure_redis_pass_2024
+
+# Performance
+MAX_CONNECTIONS_PER_EXCHANGE=10
+DATA_BUFFER_SIZE=20000
+BATCH_WRITE_SIZE=2000
+
+# Monitoring
+PROMETHEUS_PORT=9090
+GRAFANA_PORT=3001
+GRAFANA_PASSWORD=secure_grafana_pass
+
+# Exchange Configuration
+EXCHANGES=binance,coinbase,kraken,bybit,okx,huobi,kucoin,gateio,bitfinex,mexc
+SYMBOLS=BTCUSDT,ETHUSDT,ADAUSDT,DOTUSDT
+```
+
+This configuration provides a robust production deployment suitable for high-throughput cryptocurrency data aggregation.
\ No newline at end of file
diff --git a/COBY/REVERSE_PROXY_CONFIG.md b/COBY/REVERSE_PROXY_CONFIG.md
new file mode 100644
index 0000000..c67cd34
--- /dev/null
+++ b/COBY/REVERSE_PROXY_CONFIG.md
@@ -0,0 +1,274 @@
+# Reverse Proxy Configuration for COBY
+
+Since COBY now serves both the API and web dashboard from port 8080, here are configuration examples for common reverse proxies.
+
+## Nginx Reverse Proxy
+
+```nginx
+# COBY upstream
+upstream coby_backend {
+ server coby-app:8080;
+ # Add more servers for load balancing if needed
+ # server coby-app-2:8080;
+}
+
+server {
+ listen 80;
+ server_name coby.yourdomain.com;
+
+ # Optional: Redirect HTTP to HTTPS
+ # return 301 https://$server_name$request_uri;
+
+ # Main application proxy
+ location / {
+ proxy_pass http://coby_backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache_bypass $http_upgrade;
+ proxy_read_timeout 86400;
+
+ # CORS headers (if needed)
+ add_header Access-Control-Allow-Origin *;
+ add_header Access-Control-Allow-Methods "GET, POST, OPTIONS";
+ add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range";
+ }
+
+ # WebSocket specific configuration (if needed separately)
+ location /ws/ {
+ proxy_pass http://coby_backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_read_timeout 86400;
+ }
+
+ # Health check endpoint
+ location /health {
+ proxy_pass http://coby_backend;
+ access_log off;
+ }
+
+ # Optional: Serve static files with caching
+ location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
+ proxy_pass http://coby_backend;
+ expires 1y;
+ add_header Cache-Control "public, immutable";
+ }
+}
+
+# HTTPS configuration (recommended)
+server {
+ listen 443 ssl http2;
+ server_name coby.yourdomain.com;
+
+ # SSL configuration
+ ssl_certificate /path/to/your/certificate.crt;
+ ssl_certificate_key /path/to/your/private.key;
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384;
+ ssl_prefer_server_ciphers off;
+
+ # Same location blocks as above
+ location / {
+ proxy_pass http://coby_backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache_bypass $http_upgrade;
+ proxy_read_timeout 86400;
+ }
+}
+```
+
+## Apache Reverse Proxy
+
+```apache
+
+ ServerName coby.yourdomain.com
+
+ # Enable required modules
+ # a2enmod proxy proxy_http proxy_wstunnel rewrite
+
+ # Proxy configuration
+ ProxyPreserveHost On
+ ProxyRequests Off
+
+ # Main application
+ ProxyPass / http://coby-app:8080/
+ ProxyPassReverse / http://coby-app:8080/
+
+ # WebSocket support
+ RewriteEngine On
+ RewriteCond %{HTTP:Upgrade} websocket [NC]
+ RewriteCond %{HTTP:Connection} upgrade [NC]
+ RewriteRule ^/?(.*) "ws://coby-app:8080/$1" [P,L]
+
+ # Headers
+ ProxyPassReverse / http://coby-app:8080/
+ ProxyPassReverseMatch ^(/.*) http://coby-app:8080$1
+
+ # Optional: Logging
+ ErrorLog ${APACHE_LOG_DIR}/coby_error.log
+ CustomLog ${APACHE_LOG_DIR}/coby_access.log combined
+
+
+# HTTPS version
+
+ ServerName coby.yourdomain.com
+
+ # SSL configuration
+ SSLEngine on
+ SSLCertificateFile /path/to/your/certificate.crt
+ SSLCertificateKeyFile /path/to/your/private.key
+
+ # Same proxy configuration as above
+ ProxyPreserveHost On
+ ProxyRequests Off
+ ProxyPass / http://coby-app:8080/
+ ProxyPassReverse / http://coby-app:8080/
+
+ # WebSocket support
+ RewriteEngine On
+ RewriteCond %{HTTP:Upgrade} websocket [NC]
+ RewriteCond %{HTTP:Connection} upgrade [NC]
+ RewriteRule ^/?(.*) "ws://coby-app:8080/$1" [P,L]
+
+```
+
+## Traefik (Docker Labels)
+
+If you're using Traefik, add these labels to your COBY app service in docker-compose:
+
+```yaml
+coby-app:
+ # ... other configuration
+ labels:
+ - "traefik.enable=true"
+ - "traefik.http.routers.coby.rule=Host(`coby.yourdomain.com`)"
+ - "traefik.http.routers.coby.entrypoints=websecure"
+ - "traefik.http.routers.coby.tls.certresolver=letsencrypt"
+ - "traefik.http.services.coby.loadbalancer.server.port=8080"
+
+ # WebSocket support
+ - "traefik.http.routers.coby-ws.rule=Host(`coby.yourdomain.com`) && PathPrefix(`/ws`)"
+ - "traefik.http.routers.coby-ws.entrypoints=websecure"
+ - "traefik.http.routers.coby-ws.tls.certresolver=letsencrypt"
+ - "traefik.http.services.coby-ws.loadbalancer.server.port=8081"
+```
+
+## Caddy
+
+```caddy
+coby.yourdomain.com {
+ reverse_proxy coby-app:8080
+
+ # WebSocket support is automatic in Caddy
+
+ # Optional: Custom headers
+ header {
+ # Security headers
+ X-Frame-Options "SAMEORIGIN"
+ X-XSS-Protection "1; mode=block"
+ X-Content-Type-Options "nosniff"
+ Referrer-Policy "no-referrer-when-downgrade"
+ }
+
+ # Optional: Logging
+ log {
+ output file /var/log/caddy/coby.log
+ }
+}
+```
+
+## HAProxy
+
+```haproxy
+global
+ daemon
+
+defaults
+ mode http
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+frontend coby_frontend
+ bind *:80
+ bind *:443 ssl crt /path/to/your/certificate.pem
+ redirect scheme https if !{ ssl_fc }
+
+ # WebSocket detection
+ acl is_websocket hdr(Upgrade) -i websocket
+ acl is_websocket_path path_beg /ws
+
+ use_backend coby_websocket if is_websocket or is_websocket_path
+ default_backend coby_backend
+
+backend coby_backend
+ balance roundrobin
+ option httpchk GET /health
+ server coby1 coby-app:8080 check
+
+backend coby_websocket
+ balance roundrobin
+ server coby1 coby-app:8081 check
+```
+
+## Docker Compose with Reverse Proxy
+
+Here's an example of how to integrate with an existing reverse proxy network:
+
+```yaml
+# Add to your docker-compose.portainer.yml
+networks:
+ coby-network:
+ driver: bridge
+ reverse-proxy:
+ external: true # Your existing reverse proxy network
+
+services:
+ coby-app:
+ # ... existing configuration
+ networks:
+ - coby-network
+ - reverse-proxy # Connect to reverse proxy network
+ # Remove port mappings if using reverse proxy
+ # ports:
+ # - "8080:8080"
+ # - "8081:8081"
+```
+
+## Important Notes
+
+1. **WebSocket Support**: Ensure your reverse proxy supports WebSocket upgrades for real-time features
+2. **Health Checks**: Configure health checks to use `/health` endpoint
+3. **Timeouts**: Set appropriate timeouts for long-running WebSocket connections
+4. **SSL/TLS**: Always use HTTPS in production
+5. **Rate Limiting**: Consider implementing rate limiting at the reverse proxy level
+6. **Caching**: Static assets can be cached at the reverse proxy level
+7. **Load Balancing**: If scaling horizontally, configure load balancing appropriately
+
+## Testing Your Configuration
+
+After configuring your reverse proxy:
+
+1. **Basic connectivity**: `curl http://your-domain/health`
+2. **Web dashboard**: Visit `http://your-domain/` in browser
+3. **API endpoints**: Test `http://your-domain/api/` endpoints
+4. **WebSocket**: Test WebSocket connections to `/ws/` path
+5. **SSL**: Verify HTTPS is working if configured
+
+The COBY application will handle all routing internally, so your reverse proxy just needs to forward all traffic to port 8080.
\ No newline at end of file
diff --git a/COBY/api/rest_api.py b/COBY/api/rest_api.py
index 11a9e6d..8c2ab72 100644
--- a/COBY/api/rest_api.py
+++ b/COBY/api/rest_api.py
@@ -5,8 +5,10 @@ REST API server for COBY system.
from fastapi import FastAPI, HTTPException, Request, Query, Path
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
+from fastapi.staticfiles import StaticFiles
from typing import Optional, List
import asyncio
+import os
from ..config import config
from ..caching.redis_manager import redis_manager
from ..utils.logging import get_logger, set_correlation_id
@@ -17,7 +19,7 @@ from .response_formatter import ResponseFormatter
logger = get_logger(__name__)
-def create_app() -> FastAPI:
+def create_app(config_obj=None) -> FastAPI:
"""Create and configure FastAPI application"""
app = FastAPI(
@@ -28,6 +30,13 @@ def create_app() -> FastAPI:
redoc_url="/redoc"
)
+ # Mount static files for web dashboard (since we removed nginx)
+ static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "web", "static")
+ if os.path.exists(static_path):
+ app.mount("/static", StaticFiles(directory=static_path), name="static")
+ # Serve index.html at root for dashboard
+ app.mount("/", StaticFiles(directory=static_path, html=True), name="dashboard")
+
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
diff --git a/COBY/docker-compose.dev.yml b/COBY/docker-compose.dev.yml
new file mode 100644
index 0000000..751631b
--- /dev/null
+++ b/COBY/docker-compose.dev.yml
@@ -0,0 +1,87 @@
+# Docker Compose configuration for development environment
+version: '3.8'
+
+services:
+ # Override main app for development
+ coby-app:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ target: development
+ environment:
+ # Development overrides
+ ENVIRONMENT: development
+ DEBUG: true
+ LOG_LEVEL: DEBUG
+
+ # Database configuration
+ DB_HOST: timescaledb
+ DB_PORT: 5432
+ DB_NAME: ${DB_NAME:-market_data_dev}
+ DB_USER: ${DB_USER:-market_user}
+ DB_PASSWORD: ${DB_PASSWORD:-dev_password}
+
+ # Redis configuration
+ REDIS_HOST: redis
+ REDIS_PORT: 6379
+ REDIS_PASSWORD: ${REDIS_PASSWORD:-dev_redis}
+
+ # Development settings
+ PYTHONPATH: /app
+ FLASK_ENV: development
+ FLASK_DEBUG: 1
+ volumes:
+ # Mount source code for live reloading
+ - .:/app
+ - ./logs:/app/logs
+ - ./data:/app/data
+ ports:
+ - "8080:8080"
+ - "8081:8081"
+ - "5678:5678" # Debug port
+ command: ["python", "-m", "COBY.main", "--debug", "--reload"]
+
+ # Development database with different settings
+ timescaledb:
+ environment:
+ POSTGRES_DB: ${DB_NAME:-market_data_dev}
+ POSTGRES_USER: ${DB_USER:-market_user}
+ POSTGRES_PASSWORD: ${DB_PASSWORD:-dev_password}
+ POSTGRES_HOST_AUTH_METHOD: trust # Less secure for dev
+ ports:
+ - "5433:5432" # Different port to avoid conflicts
+
+ # Development Redis
+ redis:
+ ports:
+ - "6380:6379" # Different port to avoid conflicts
+ command: redis-server --requirepass ${REDIS_PASSWORD:-dev_redis}
+
+ # Hot-reload web dashboard
+ coby-dashboard-dev:
+ image: nginx:alpine
+ container_name: coby-dashboard-dev
+ ports:
+ - "3000:80"
+ volumes:
+ - ./web/static:/usr/share/nginx/html
+ - ./docker/nginx-dev.conf:/etc/nginx/nginx.conf:ro
+ networks:
+ - coby-network
+ depends_on:
+ - coby-app
+
+ # Development tools container
+ dev-tools:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ target: development
+ container_name: coby-dev-tools
+ volumes:
+ - .:/app
+ networks:
+ - coby-network
+ command: ["tail", "-f", "/dev/null"] # Keep container running
+ profiles:
+ - tools
\ No newline at end of file
diff --git a/COBY/docker-compose.portainer.yml b/COBY/docker-compose.portainer.yml
new file mode 100644
index 0000000..113dadf
--- /dev/null
+++ b/COBY/docker-compose.portainer.yml
@@ -0,0 +1,392 @@
+# Docker Compose for COBY Multi-Exchange Data Aggregation System
+# Optimized for Portainer deployment with Git repository integration
+version: '3.8'
+
+services:
+ # TimescaleDB Database
+ timescaledb:
+ image: timescale/timescaledb:latest-pg15
+ container_name: coby-timescaledb
+ environment:
+ POSTGRES_DB: market_data
+ POSTGRES_USER: market_user
+ POSTGRES_PASSWORD: market_data_secure_pass_2024
+ TIMESCALEDB_TELEMETRY: 'off'
+ ports:
+ - "5432:5432"
+ volumes:
+ - timescale_data:/var/lib/postgresql/data
+ - ./COBY/docker/init-scripts:/docker-entrypoint-initdb.d:ro
+ networks:
+ - coby-network
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U market_user -d market_data"]
+ interval: 30s
+ timeout: 10s
+ retries: 5
+ start_period: 60s
+ deploy:
+ resources:
+ limits:
+ memory: 1G
+ cpus: '1.0'
+ reservations:
+ memory: 512M
+ cpus: '0.5'
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # Redis Cache
+ redis:
+ image: redis:7-alpine
+ container_name: coby-redis
+ command: >
+ redis-server
+ --requirepass market_data_redis_2024
+ --maxmemory 256mb
+ --maxmemory-policy allkeys-lru
+ --save 900 1
+ --save 300 10
+ --save 60 10000
+ ports:
+ - "6379:6379"
+ volumes:
+ - redis_data:/data
+ networks:
+ - coby-network
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "market_data_redis_2024", "ping"]
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ deploy:
+ resources:
+ limits:
+ memory: 512M
+ cpus: '0.5'
+ reservations:
+ memory: 128M
+ cpus: '0.1'
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # COBY Main Application
+ coby-app:
+ build:
+ context: ./COBY
+ dockerfile: Dockerfile
+ target: production
+ container_name: coby-app
+ environment:
+ # Database configuration
+ DB_HOST: timescaledb
+ DB_PORT: 5432
+ DB_NAME: market_data
+ DB_USER: market_user
+ DB_PASSWORD: market_data_secure_pass_2024
+ DB_SCHEMA: market_data
+ DB_POOL_SIZE: 10
+ DB_MAX_OVERFLOW: 20
+ DB_POOL_TIMEOUT: 30
+
+ # Redis configuration
+ REDIS_HOST: redis
+ REDIS_PORT: 6379
+ REDIS_PASSWORD: market_data_redis_2024
+ REDIS_DB: 0
+ REDIS_MAX_CONNECTIONS: 50
+ REDIS_SOCKET_TIMEOUT: 5
+ REDIS_CONNECT_TIMEOUT: 5
+
+ # Application configuration
+ ENVIRONMENT: production
+ DEBUG: false
+ LOG_LEVEL: INFO
+ LOG_FILE: logs/coby.log
+ LOG_MAX_SIZE: 100
+ LOG_BACKUP_COUNT: 5
+ ENABLE_CORRELATION_ID: true
+
+ # API configuration
+ API_HOST: 0.0.0.0
+ API_PORT: 8080
+ WS_PORT: 8081
+ CORS_ORIGINS: "*"
+ RATE_LIMIT: 100
+ MAX_WS_CONNECTIONS: 1000
+
+ # Exchange configuration
+ MAX_CONNECTIONS_PER_EXCHANGE: 5
+ RECONNECT_DELAY: 5
+ MAX_RECONNECT_ATTEMPTS: 10
+ HEARTBEAT_INTERVAL: 30
+ EXCHANGES: "binance,coinbase,kraken,bybit,okx,huobi,kucoin,gateio,bitfinex,mexc"
+ SYMBOLS: "BTCUSDT,ETHUSDT"
+
+ # Aggregation configuration
+ BUCKET_SIZE: 1.0
+ HEATMAP_DEPTH: 50
+ UPDATE_FREQUENCY: 0.5
+ VOLUME_THRESHOLD: 0.01
+
+ # Performance configuration
+ DATA_BUFFER_SIZE: 10000
+ BATCH_WRITE_SIZE: 1000
+ MAX_MEMORY_USAGE: 2048
+ GC_THRESHOLD: 0.8
+ PROCESSING_TIMEOUT: 10
+ MAX_QUEUE_SIZE: 50000
+
+ # Monitoring configuration
+ METRICS_COLLECTION_INTERVAL: 1.0
+ MAX_CPU_USAGE: 80.0
+ MAX_MEMORY_USAGE_PERCENT: 85.0
+ MIN_MEMORY_AVAILABLE_GB: 1.0
+ MAX_LATENCY_MS: 100.0
+ MAX_ERROR_RATE_PERCENT: 5.0
+ MIN_THROUGHPUT_OPS: 100.0
+ ports:
+ - "8080:8080"
+ - "8081:8081"
+ volumes:
+ - coby_logs:/app/logs
+ - coby_data:/app/data
+ networks:
+ - coby-network
+ depends_on:
+ timescaledb:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8080/health', timeout=5)"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ start_period: 120s
+ deploy:
+ resources:
+ limits:
+ memory: 2G
+ cpus: '2.0'
+ reservations:
+ memory: 1G
+ cpus: '1.0'
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "50m"
+ max-file: "5"
+
+
+
+ # Prometheus (Optional - for monitoring)
+ prometheus:
+ image: prom/prometheus:latest
+ container_name: coby-prometheus
+ ports:
+ - "9090:9090"
+ volumes:
+ - prometheus_data:/prometheus
+ configs:
+ - source: prometheus_config
+ target: /etc/prometheus/prometheus.yml
+ - source: alert_rules
+ target: /etc/prometheus/alert_rules.yml
+ networks:
+ - coby-network
+ command:
+ - '--config.file=/etc/prometheus/prometheus.yml'
+ - '--storage.tsdb.path=/prometheus'
+ - '--web.console.libraries=/etc/prometheus/console_libraries'
+ - '--web.console.templates=/etc/prometheus/consoles'
+ - '--storage.tsdb.retention.time=200h'
+ - '--web.enable-lifecycle'
+ - '--web.enable-admin-api'
+ restart: unless-stopped
+ profiles:
+ - monitoring
+ deploy:
+ resources:
+ limits:
+ memory: 512M
+ cpus: '0.5'
+ reservations:
+ memory: 256M
+ cpus: '0.2'
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # Grafana (Optional - for visualization)
+ grafana:
+ image: grafana/grafana:latest
+ container_name: coby-grafana
+ ports:
+ - "3001:3000"
+ environment:
+ GF_SECURITY_ADMIN_PASSWORD: admin123
+ GF_USERS_ALLOW_SIGN_UP: false
+ GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-simple-json-datasource
+ GF_SECURITY_ALLOW_EMBEDDING: true
+ volumes:
+ - grafana_data:/var/lib/grafana
+ networks:
+ - coby-network
+ depends_on:
+ - prometheus
+ restart: unless-stopped
+ profiles:
+ - monitoring
+ deploy:
+ resources:
+ limits:
+ memory: 256M
+ cpus: '0.3'
+ reservations:
+ memory: 128M
+ cpus: '0.1'
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # Node Exporter for system metrics
+ node-exporter:
+ image: prom/node-exporter:latest
+ container_name: coby-node-exporter
+ ports:
+ - "9100:9100"
+ volumes:
+ - /proc:/host/proc:ro
+ - /sys:/host/sys:ro
+ - /:/rootfs:ro
+ command:
+ - '--path.procfs=/host/proc'
+ - '--path.rootfs=/rootfs'
+ - '--path.sysfs=/host/sys'
+ - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
+ networks:
+ - coby-network
+ restart: unless-stopped
+ profiles:
+ - monitoring
+ deploy:
+ resources:
+ limits:
+ memory: 128M
+ cpus: '0.1'
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "5m"
+ max-file: "2"
+
+# Network configuration
+networks:
+ coby-network:
+ driver: bridge
+ ipam:
+ config:
+ - subnet: 172.20.0.0/16
+ labels:
+ - "com.coby.network=main"
+
+# Volume configuration
+volumes:
+ timescale_data:
+ driver: local
+ labels:
+ - "com.coby.volume=database"
+ redis_data:
+ driver: local
+ labels:
+ - "com.coby.volume=cache"
+ prometheus_data:
+ driver: local
+ labels:
+ - "com.coby.volume=monitoring"
+ grafana_data:
+ driver: local
+ labels:
+ - "com.coby.volume=monitoring"
+ coby_logs:
+ driver: local
+ labels:
+ - "com.coby.volume=logs"
+ coby_data:
+ driver: local
+ labels:
+ - "com.coby.volume=data"
+
+# Configuration files
+configs:
+ prometheus_config:
+ content: |
+ global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+ rule_files:
+ - "/etc/prometheus/alert_rules.yml"
+ scrape_configs:
+ - job_name: 'coby-app'
+ static_configs:
+ - targets: ['coby-app:8080']
+ metrics_path: '/metrics'
+ scrape_interval: 10s
+ scrape_timeout: 5s
+ - job_name: 'prometheus'
+ static_configs:
+ - targets: ['localhost:9090']
+ - job_name: 'node'
+ static_configs:
+ - targets: ['node-exporter:9100']
+ scrape_interval: 30s
+
+ alert_rules:
+ content: |
+ groups:
+ - name: coby_alerts
+ rules:
+ - alert: HighCPUUsage
+ expr: system_cpu_usage > 80
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: "High CPU usage detected"
+ description: "CPU usage is above 80% for more than 2 minutes"
+ - alert: HighMemoryUsage
+ expr: system_memory_usage > 85
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: "High memory usage detected"
+ description: "Memory usage is above 85% for more than 2 minutes"
+ - alert: ServiceDown
+ expr: up == 0
+ for: 1m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Service is down"
+ description: "{{ $$labels.job }} service is down"
+
+# Labels for the entire stack
+x-labels: &default-labels
+ com.coby.project: "multi-exchange-data-aggregation"
+ com.coby.version: "1.0.0"
+ com.coby.environment: "production"
\ No newline at end of file
diff --git a/COBY/docker-compose.yml b/COBY/docker-compose.yml
new file mode 100644
index 0000000..04a6c7a
--- /dev/null
+++ b/COBY/docker-compose.yml
@@ -0,0 +1,215 @@
+# Docker Compose configuration for COBY Multi-Exchange Data Aggregation System
+version: '3.8'
+
+services:
+ # TimescaleDB Database
+ timescaledb:
+ image: timescale/timescaledb:latest-pg15
+ container_name: coby-timescaledb
+ environment:
+ POSTGRES_DB: ${DB_NAME:-market_data}
+ POSTGRES_USER: ${DB_USER:-market_user}
+ POSTGRES_PASSWORD: ${DB_PASSWORD:-market_data_secure_pass_2024}
+ TIMESCALEDB_TELEMETRY: 'off'
+ ports:
+ - "${DB_PORT:-5432}:5432"
+ volumes:
+ - timescale_data:/var/lib/postgresql/data
+ - ./docker/init-scripts:/docker-entrypoint-initdb.d
+ networks:
+ - coby-network
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-market_user} -d ${DB_NAME:-market_data}"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # Redis Cache
+ redis:
+ image: redis:7-alpine
+ container_name: coby-redis
+ command: redis-server /usr/local/etc/redis/redis.conf
+ ports:
+ - "${REDIS_PORT:-6379}:6379"
+ volumes:
+ - redis_data:/data
+ - ./docker/redis.conf:/usr/local/etc/redis/redis.conf:ro
+ networks:
+ - coby-network
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "redis-cli", "ping"]
+ interval: 10s
+ timeout: 3s
+ retries: 3
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # COBY Main Application
+ coby-app:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ target: production
+ container_name: coby-app
+ environment:
+ # Database configuration
+ DB_HOST: timescaledb
+ DB_PORT: 5432
+ DB_NAME: ${DB_NAME:-market_data}
+ DB_USER: ${DB_USER:-market_user}
+ DB_PASSWORD: ${DB_PASSWORD:-market_data_secure_pass_2024}
+
+ # Redis configuration
+ REDIS_HOST: redis
+ REDIS_PORT: 6379
+ REDIS_PASSWORD: ${REDIS_PASSWORD:-market_data_redis_2024}
+
+ # Application configuration
+ ENVIRONMENT: ${ENVIRONMENT:-production}
+ DEBUG: ${DEBUG:-false}
+ LOG_LEVEL: ${LOG_LEVEL:-INFO}
+
+ # API configuration
+ API_HOST: 0.0.0.0
+ API_PORT: 8080
+ WS_PORT: 8081
+
+ # Performance configuration
+ MAX_CONNECTIONS_PER_EXCHANGE: ${MAX_CONNECTIONS_PER_EXCHANGE:-5}
+ DATA_BUFFER_SIZE: ${DATA_BUFFER_SIZE:-10000}
+ BATCH_WRITE_SIZE: ${BATCH_WRITE_SIZE:-1000}
+
+ # Monitoring configuration
+ BUCKET_SIZE: ${BUCKET_SIZE:-1.0}
+ HEATMAP_DEPTH: ${HEATMAP_DEPTH:-50}
+ UPDATE_FREQUENCY: ${UPDATE_FREQUENCY:-0.5}
+ ports:
+ - "${API_PORT:-8080}:8080"
+ - "${WS_PORT:-8081}:8081"
+ volumes:
+ - ./logs:/app/logs
+ - ./data:/app/data
+ networks:
+ - coby-network
+ depends_on:
+ timescaledb:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8080/health', timeout=5)"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ start_period: 60s
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "50m"
+ max-file: "5"
+
+ # Web Dashboard (Nginx serving static files)
+ coby-dashboard:
+ image: nginx:alpine
+ container_name: coby-dashboard
+ ports:
+ - "${DASHBOARD_PORT:-3000}:80"
+ volumes:
+ - ./web/static:/usr/share/nginx/html:ro
+ - ./docker/nginx.conf:/etc/nginx/nginx.conf:ro
+ networks:
+ - coby-network
+ depends_on:
+ - coby-app
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # Prometheus (Optional - for monitoring)
+ prometheus:
+ image: prom/prometheus:latest
+ container_name: coby-prometheus
+ ports:
+ - "${PROMETHEUS_PORT:-9090}:9090"
+ volumes:
+ - ./docker/prometheus.yml:/etc/prometheus/prometheus.yml:ro
+ - prometheus_data:/prometheus
+ networks:
+ - coby-network
+ command:
+ - '--config.file=/etc/prometheus/prometheus.yml'
+ - '--storage.tsdb.path=/prometheus'
+ - '--web.console.libraries=/etc/prometheus/console_libraries'
+ - '--web.console.templates=/etc/prometheus/consoles'
+ - '--storage.tsdb.retention.time=200h'
+ - '--web.enable-lifecycle'
+ restart: unless-stopped
+ profiles:
+ - monitoring
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+ # Grafana (Optional - for visualization)
+ grafana:
+ image: grafana/grafana:latest
+ container_name: coby-grafana
+ ports:
+ - "${GRAFANA_PORT:-3001}:3000"
+ environment:
+ GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
+ GF_USERS_ALLOW_SIGN_UP: false
+ volumes:
+ - grafana_data:/var/lib/grafana
+ - ./docker/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
+ - ./docker/grafana/datasources:/etc/grafana/provisioning/datasources:ro
+ networks:
+ - coby-network
+ depends_on:
+ - prometheus
+ restart: unless-stopped
+ profiles:
+ - monitoring
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+networks:
+ coby-network:
+ driver: bridge
+ ipam:
+ config:
+ - subnet: 172.20.0.0/16
+
+volumes:
+ timescale_data:
+ driver: local
+ redis_data:
+ driver: local
+ prometheus_data:
+ driver: local
+ grafana_data:
+ driver: local
\ No newline at end of file
diff --git a/COBY/docker/.env.example b/COBY/docker/.env.example
new file mode 100644
index 0000000..993b7cf
--- /dev/null
+++ b/COBY/docker/.env.example
@@ -0,0 +1,170 @@
+# COBY Multi-Exchange Data Aggregation System Environment Configuration
+# Copy this file to .env and customize the values
+
+# =============================================================================
+# DATABASE CONFIGURATION
+# =============================================================================
+DB_HOST=timescaledb
+DB_PORT=5432
+DB_NAME=market_data
+DB_USER=market_user
+DB_PASSWORD=market_data_secure_pass_2024
+DB_SCHEMA=market_data
+DB_POOL_SIZE=10
+DB_MAX_OVERFLOW=20
+DB_POOL_TIMEOUT=30
+
+# =============================================================================
+# REDIS CONFIGURATION
+# =============================================================================
+REDIS_HOST=redis
+REDIS_PORT=6379
+REDIS_PASSWORD=market_data_redis_2024
+REDIS_DB=0
+REDIS_MAX_CONNECTIONS=50
+REDIS_SOCKET_TIMEOUT=5
+REDIS_CONNECT_TIMEOUT=5
+
+# =============================================================================
+# APPLICATION CONFIGURATION
+# =============================================================================
+ENVIRONMENT=production
+DEBUG=false
+LOG_LEVEL=INFO
+LOG_FILE=logs/coby.log
+LOG_MAX_SIZE=100
+LOG_BACKUP_COUNT=5
+ENABLE_CORRELATION_ID=true
+
+# =============================================================================
+# API CONFIGURATION
+# =============================================================================
+API_HOST=0.0.0.0
+API_PORT=8080
+WS_PORT=8081
+DASHBOARD_PORT=3000
+CORS_ORIGINS=*
+RATE_LIMIT=100
+MAX_WS_CONNECTIONS=1000
+
+# =============================================================================
+# EXCHANGE CONFIGURATION
+# =============================================================================
+MAX_CONNECTIONS_PER_EXCHANGE=5
+RECONNECT_DELAY=5
+MAX_RECONNECT_ATTEMPTS=10
+HEARTBEAT_INTERVAL=30
+
+# Supported exchanges (comma-separated)
+EXCHANGES=binance,coinbase,kraken,bybit,okx,huobi,kucoin,gateio,bitfinex,mexc
+
+# Trading symbols (comma-separated)
+SYMBOLS=BTCUSDT,ETHUSDT
+
+# =============================================================================
+# AGGREGATION CONFIGURATION
+# =============================================================================
+BUCKET_SIZE=1.0
+HEATMAP_DEPTH=50
+UPDATE_FREQUENCY=0.5
+VOLUME_THRESHOLD=0.01
+
+# =============================================================================
+# PERFORMANCE CONFIGURATION
+# =============================================================================
+DATA_BUFFER_SIZE=10000
+BATCH_WRITE_SIZE=1000
+MAX_MEMORY_USAGE=2048
+GC_THRESHOLD=0.8
+PROCESSING_TIMEOUT=10
+MAX_QUEUE_SIZE=50000
+
+# =============================================================================
+# MONITORING CONFIGURATION
+# =============================================================================
+PROMETHEUS_PORT=9090
+GRAFANA_PORT=3001
+GRAFANA_PASSWORD=admin
+
+# Metrics collection interval (seconds)
+METRICS_COLLECTION_INTERVAL=1.0
+
+# Performance thresholds
+MAX_CPU_USAGE=80.0
+MAX_MEMORY_USAGE_PERCENT=85.0
+MIN_MEMORY_AVAILABLE_GB=1.0
+MAX_LATENCY_MS=100.0
+MAX_ERROR_RATE_PERCENT=5.0
+MIN_THROUGHPUT_OPS=100.0
+
+# =============================================================================
+# DOCKER CONFIGURATION
+# =============================================================================
+# Container resource limits
+COBY_APP_MEMORY=2g
+COBY_APP_CPUS=2.0
+TIMESCALEDB_MEMORY=1g
+TIMESCALEDB_CPUS=1.0
+REDIS_MEMORY=512m
+REDIS_CPUS=0.5
+
+# Network configuration
+DOCKER_NETWORK_SUBNET=172.20.0.0/16
+
+# =============================================================================
+# DEVELOPMENT CONFIGURATION
+# =============================================================================
+# Override for development environment
+DEV_DB_NAME=market_data_dev
+DEV_DB_PASSWORD=dev_password
+DEV_REDIS_PASSWORD=dev_redis
+DEV_API_PORT=8080
+DEV_WS_PORT=8081
+DEV_DASHBOARD_PORT=3000
+
+# =============================================================================
+# SECURITY CONFIGURATION
+# =============================================================================
+# API Keys for exchanges (optional, for private data)
+BINANCE_API_KEY=
+BINANCE_API_SECRET=
+COINBASE_API_KEY=
+COINBASE_API_SECRET=
+COINBASE_PASSPHRASE=
+KRAKEN_API_KEY=
+KRAKEN_API_SECRET=
+BYBIT_API_KEY=
+BYBIT_API_SECRET=
+OKX_API_KEY=
+OKX_API_SECRET=
+OKX_PASSPHRASE=
+HUOBI_API_KEY=
+HUOBI_API_SECRET=
+KUCOIN_API_KEY=
+KUCOIN_API_SECRET=
+KUCOIN_PASSPHRASE=
+GATEIO_API_KEY=
+GATEIO_API_SECRET=
+BITFINEX_API_KEY=
+BITFINEX_API_SECRET=
+MEXC_API_KEY=
+MEXC_API_SECRET=
+
+# =============================================================================
+# NOTIFICATION CONFIGURATION
+# =============================================================================
+# Email notifications
+SMTP_SERVER=
+SMTP_PORT=587
+SMTP_USERNAME=
+SMTP_PASSWORD=
+SMTP_FROM_EMAIL=
+SMTP_TO_EMAILS=
+
+# Slack notifications
+SLACK_WEBHOOK_URL=
+SLACK_CHANNEL=
+
+# Webhook notifications
+WEBHOOK_URL=
+WEBHOOK_HEADERS=
\ No newline at end of file
diff --git a/COBY/docker/alert_rules.yml b/COBY/docker/alert_rules.yml
new file mode 100644
index 0000000..6588546
--- /dev/null
+++ b/COBY/docker/alert_rules.yml
@@ -0,0 +1,103 @@
+# Prometheus alert rules for COBY system
+groups:
+ - name: coby_alerts
+ rules:
+ # High CPU usage
+ - alert: HighCPUUsage
+ expr: system_cpu_usage > 80
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: "High CPU usage detected"
+ description: "CPU usage is above 80% for more than 2 minutes"
+
+ # High memory usage
+ - alert: HighMemoryUsage
+ expr: system_memory_usage > 85
+ for: 2m
+ labels:
+ severity: warning
+ annotations:
+ summary: "High memory usage detected"
+ description: "Memory usage is above 85% for more than 2 minutes"
+
+ # Low available memory
+ - alert: LowAvailableMemory
+ expr: system_memory_available_gb < 1
+ for: 1m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Low available memory"
+ description: "Available memory is below 1GB"
+
+ # High latency
+ - alert: HighLatency
+ expr: processing_latency_ms > 100
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "High processing latency"
+ description: "Processing latency is above 100ms for more than 5 minutes"
+
+ # Exchange connection failures
+ - alert: ExchangeConnectionFailure
+ expr: increase(exchange_connection_errors_total[5m]) > 5
+ for: 1m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Exchange connection failures"
+ description: "More than 5 exchange connection errors in the last 5 minutes"
+
+ # Database connection issues
+ - alert: DatabaseConnectionFailure
+ expr: database_connection_errors_total > 0
+ for: 1m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Database connection failure"
+ description: "Database connection errors detected"
+
+ # High error rate
+ - alert: HighErrorRate
+ expr: kpi_error_rate_percent > 5
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "High error rate"
+ description: "Error rate is above 5% for more than 5 minutes"
+
+ # Low throughput
+ - alert: LowThroughput
+ expr: kpi_throughput_ops_per_sec < 10
+ for: 10m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Low system throughput"
+ description: "System throughput is below 10 ops/sec for more than 10 minutes"
+
+ # Service down
+ - alert: ServiceDown
+ expr: up == 0
+ for: 1m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Service is down"
+ description: "{{ $labels.job }} service is down"
+
+ # Disk space low
+ - alert: DiskSpaceLow
+ expr: system_disk_usage > 90
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Disk space low"
+ description: "Disk usage is above 90%"
\ No newline at end of file
diff --git a/COBY/docker/deploy.sh b/COBY/docker/deploy.sh
index 23c500f..344a3a7 100644
--- a/COBY/docker/deploy.sh
+++ b/COBY/docker/deploy.sh
@@ -1,112 +1,416 @@
#!/bin/bash
-# Deployment script for market data infrastructure
-# Run this on your Docker host at 192.168.0.10
+# COBY Multi-Exchange Data Aggregation System Deployment Script
+# This script handles deployment of the COBY system using Docker Compose
-set -e
+set -e # Exit on any error
-echo "š Deploying Market Data Infrastructure..."
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
-# Check if Docker and Docker Compose are available
-if ! command -v docker &> /dev/null; then
- echo "ā Docker is not installed or not in PATH"
- exit 1
-fi
+# Configuration
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+COMPOSE_FILE="$PROJECT_ROOT/docker-compose.yml"
+DEV_COMPOSE_FILE="$PROJECT_ROOT/docker-compose.dev.yml"
+ENV_FILE="$PROJECT_ROOT/docker/.env"
+ENV_EXAMPLE="$PROJECT_ROOT/docker/.env.example"
-if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
- echo "ā Docker Compose is not installed or not in PATH"
- exit 1
-fi
+# Default values
+ENVIRONMENT="production"
+PROFILE=""
+SERVICES=""
+ACTION="up"
+DETACHED=true
+BUILD=false
+PULL=false
+FORCE_RECREATE=false
+REMOVE_ORPHANS=true
-# Set Docker Compose command
-if docker compose version &> /dev/null; then
- DOCKER_COMPOSE="docker compose"
-else
- DOCKER_COMPOSE="docker-compose"
-fi
+# Function to print colored output
+print_status() {
+ echo -e "${BLUE}[INFO]${NC} $1"
+}
-# Create necessary directories
-echo "š Creating directories..."
-mkdir -p ./data/timescale
-mkdir -p ./data/redis
-mkdir -p ./logs
-mkdir -p ./backups
+print_success() {
+ echo -e "${GREEN}[SUCCESS]${NC} $1"
+}
-# Set proper permissions
-echo "š Setting permissions..."
-chmod 755 ./data/timescale
-chmod 755 ./data/redis
-chmod 755 ./logs
-chmod 755 ./backups
+print_warning() {
+ echo -e "${YELLOW}[WARNING]${NC} $1"
+}
-# Copy environment file if it doesn't exist
-if [ ! -f .env ]; then
- echo "š Creating .env file..."
- cp .env.example .env
- echo "ā ļø Please edit .env file with your specific configuration"
- echo "ā ļø Default passwords are set - change them for production!"
-fi
+print_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
-# Pull latest images
-echo "š„ Pulling Docker images..."
-$DOCKER_COMPOSE -f timescaledb-compose.yml pull
+# Function to show usage
+show_usage() {
+ cat << EOF
+COBY Deployment Script
-# Stop existing containers if running
-echo "š Stopping existing containers..."
-$DOCKER_COMPOSE -f timescaledb-compose.yml down
+Usage: $0 [OPTIONS] [ACTION] [SERVICES...]
-# Start the services
-echo "š Starting services..."
-$DOCKER_COMPOSE -f timescaledb-compose.yml up -d
+ACTIONS:
+ up Start services (default)
+ down Stop and remove services
+ restart Restart services
+ logs Show service logs
+ ps Show running services
+ build Build services
+ pull Pull latest images
+ exec Execute command in service
+ health Check service health
-# Wait for services to be ready
-echo "ā³ Waiting for services to be ready..."
-sleep 30
+OPTIONS:
+ -e, --env ENV Environment (production|development) [default: production]
+ -p, --profile PROFILE Docker compose profile (monitoring|tools)
+ -d, --detach Run in detached mode [default: true]
+ -f, --foreground Run in foreground mode
+ -b, --build Build images before starting
+ --pull Pull latest images before starting
+ --force-recreate Force recreate containers
+ --no-remove-orphans Don't remove orphaned containers
+ -h, --help Show this help message
-# Check service health
-echo "š„ Checking service health..."
+EXAMPLES:
+ $0 # Start production environment
+ $0 -e development # Start development environment
+ $0 -p monitoring up # Start with monitoring profile
+ $0 down # Stop all services
+ $0 logs coby-app # Show logs for coby-app service
+ $0 exec coby-app bash # Execute bash in coby-app container
+ $0 -b up # Build and start services
-# Check TimescaleDB
-if docker exec market_data_timescaledb pg_isready -U market_user -d market_data; then
- echo "ā
TimescaleDB is ready"
-else
- echo "ā TimescaleDB is not ready"
- exit 1
-fi
+SERVICES:
+ coby-app Main application
+ timescaledb Database
+ redis Cache
+ coby-dashboard Web dashboard
+ prometheus Metrics collection (monitoring profile)
+ grafana Visualization (monitoring profile)
-# Check Redis
-if docker exec market_data_redis redis-cli -a market_data_redis_2024 ping | grep -q PONG; then
- echo "ā
Redis is ready"
-else
- echo "ā Redis is not ready"
- exit 1
-fi
+EOF
+}
-# Display connection information
-echo ""
-echo "š Deployment completed successfully!"
-echo ""
-echo "š Connection Information:"
-echo " TimescaleDB:"
-echo " Host: 192.168.0.10"
-echo " Port: 5432"
-echo " Database: market_data"
-echo " Username: market_user"
-echo " Password: (check .env file)"
-echo ""
-echo " Redis:"
-echo " Host: 192.168.0.10"
-echo " Port: 6379"
-echo " Password: (check .env file)"
-echo ""
-echo "š Next steps:"
-echo " 1. Update your application configuration to use these connection details"
-echo " 2. Test the connection from your application"
-echo " 3. Set up monitoring and alerting"
-echo " 4. Configure backup schedules"
-echo ""
-echo "š To view logs:"
-echo " docker-compose -f timescaledb-compose.yml logs -f"
-echo ""
-echo "š To stop services:"
-echo " docker-compose -f timescaledb-compose.yml down"
\ No newline at end of file
+# Function to check prerequisites
+check_prerequisites() {
+ print_status "Checking prerequisites..."
+
+ # Check if Docker is installed and running
+ if ! command -v docker &> /dev/null; then
+ print_error "Docker is not installed. Please install Docker first."
+ exit 1
+ fi
+
+ if ! docker info &> /dev/null; then
+ print_error "Docker is not running. Please start Docker first."
+ exit 1
+ fi
+
+ # Check if Docker Compose is available
+ if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then
+ print_error "Docker Compose is not available. Please install Docker Compose."
+ exit 1
+ fi
+
+ # Determine compose command
+ if command -v docker-compose &> /dev/null; then
+ COMPOSE_CMD="docker-compose"
+ else
+ COMPOSE_CMD="docker compose"
+ fi
+
+ print_success "Prerequisites check passed"
+}
+
+# Function to setup environment file
+setup_env_file() {
+ if [[ ! -f "$ENV_FILE" ]]; then
+ print_warning "Environment file not found. Creating from example..."
+ cp "$ENV_EXAMPLE" "$ENV_FILE"
+ print_status "Please edit $ENV_FILE with your configuration"
+ print_warning "Using default configuration for now"
+ else
+ print_success "Environment file found"
+ fi
+}
+
+# Function to build compose command
+build_compose_command() {
+ local cmd="$COMPOSE_CMD"
+
+ # Add compose files
+ cmd="$cmd -f $COMPOSE_FILE"
+
+ if [[ "$ENVIRONMENT" == "development" ]]; then
+ cmd="$cmd -f $DEV_COMPOSE_FILE"
+ fi
+
+ # Add environment file
+ cmd="$cmd --env-file $ENV_FILE"
+
+ # Add profile if specified
+ if [[ -n "$PROFILE" ]]; then
+ cmd="$cmd --profile $PROFILE"
+ fi
+
+ echo "$cmd"
+}
+
+# Function to start services
+start_services() {
+ print_status "Starting COBY services in $ENVIRONMENT mode..."
+
+ local cmd=$(build_compose_command)
+ local up_cmd="$cmd up"
+
+ if [[ "$BUILD" == true ]]; then
+ up_cmd="$up_cmd --build"
+ fi
+
+ if [[ "$PULL" == true ]]; then
+ up_cmd="$up_cmd --pull always"
+ fi
+
+ if [[ "$FORCE_RECREATE" == true ]]; then
+ up_cmd="$up_cmd --force-recreate"
+ fi
+
+ if [[ "$REMOVE_ORPHANS" == true ]]; then
+ up_cmd="$up_cmd --remove-orphans"
+ fi
+
+ if [[ "$DETACHED" == true ]]; then
+ up_cmd="$up_cmd -d"
+ fi
+
+ if [[ -n "$SERVICES" ]]; then
+ up_cmd="$up_cmd $SERVICES"
+ fi
+
+ eval "$up_cmd"
+
+ if [[ "$DETACHED" == true ]]; then
+ print_success "Services started successfully"
+ show_service_status
+ fi
+}
+
+# Function to stop services
+stop_services() {
+ print_status "Stopping COBY services..."
+
+ local cmd=$(build_compose_command)
+ eval "$cmd down --remove-orphans"
+
+ print_success "Services stopped successfully"
+}
+
+# Function to restart services
+restart_services() {
+ print_status "Restarting COBY services..."
+
+ local cmd=$(build_compose_command)
+ if [[ -n "$SERVICES" ]]; then
+ eval "$cmd restart $SERVICES"
+ else
+ eval "$cmd restart"
+ fi
+
+ print_success "Services restarted successfully"
+}
+
+# Function to show logs
+show_logs() {
+ local cmd=$(build_compose_command)
+ if [[ -n "$SERVICES" ]]; then
+ eval "$cmd logs -f $SERVICES"
+ else
+ eval "$cmd logs -f"
+ fi
+}
+
+# Function to show service status
+show_service_status() {
+ print_status "Service status:"
+ local cmd=$(build_compose_command)
+ eval "$cmd ps"
+}
+
+# Function to build services
+build_services() {
+ print_status "Building COBY services..."
+
+ local cmd=$(build_compose_command)
+ if [[ -n "$SERVICES" ]]; then
+ eval "$cmd build $SERVICES"
+ else
+ eval "$cmd build"
+ fi
+
+ print_success "Services built successfully"
+}
+
+# Function to pull images
+pull_images() {
+ print_status "Pulling latest images..."
+
+ local cmd=$(build_compose_command)
+ eval "$cmd pull"
+
+ print_success "Images pulled successfully"
+}
+
+# Function to execute command in service
+exec_command() {
+ if [[ -z "$SERVICES" ]]; then
+ print_error "Service name required for exec command"
+ exit 1
+ fi
+
+ local service=$(echo "$SERVICES" | cut -d' ' -f1)
+ local command=$(echo "$SERVICES" | cut -d' ' -f2-)
+
+ if [[ "$service" == "$command" ]]; then
+ command="bash"
+ fi
+
+ local cmd=$(build_compose_command)
+ eval "$cmd exec $service $command"
+}
+
+# Function to check service health
+check_health() {
+ print_status "Checking service health..."
+
+ local cmd=$(build_compose_command)
+ local services=$(eval "$cmd ps --services")
+
+ for service in $services; do
+ local health=$(eval "$cmd ps $service" | grep -o "healthy\|unhealthy\|starting" | head -1)
+ if [[ -n "$health" ]]; then
+ if [[ "$health" == "healthy" ]]; then
+ print_success "$service: $health"
+ elif [[ "$health" == "unhealthy" ]]; then
+ print_error "$service: $health"
+ else
+ print_warning "$service: $health"
+ fi
+ else
+ print_warning "$service: no health check"
+ fi
+ done
+}
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ -e|--env)
+ ENVIRONMENT="$2"
+ shift 2
+ ;;
+ -p|--profile)
+ PROFILE="$2"
+ shift 2
+ ;;
+ -d|--detach)
+ DETACHED=true
+ shift
+ ;;
+ -f|--foreground)
+ DETACHED=false
+ shift
+ ;;
+ -b|--build)
+ BUILD=true
+ shift
+ ;;
+ --pull)
+ PULL=true
+ shift
+ ;;
+ --force-recreate)
+ FORCE_RECREATE=true
+ shift
+ ;;
+ --no-remove-orphans)
+ REMOVE_ORPHANS=false
+ shift
+ ;;
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ up|down|restart|logs|ps|build|pull|exec|health)
+ ACTION="$1"
+ shift
+ ;;
+ *)
+ SERVICES="$SERVICES $1"
+ shift
+ ;;
+ esac
+done
+
+# Trim leading/trailing spaces from services
+SERVICES=$(echo "$SERVICES" | xargs)
+
+# Main execution
+main() {
+ print_status "COBY Multi-Exchange Data Aggregation System Deployment"
+ print_status "Environment: $ENVIRONMENT"
+ if [[ -n "$PROFILE" ]]; then
+ print_status "Profile: $PROFILE"
+ fi
+ if [[ -n "$SERVICES" ]]; then
+ print_status "Services: $SERVICES"
+ fi
+ print_status "Action: $ACTION"
+ echo
+
+ check_prerequisites
+ setup_env_file
+
+ case $ACTION in
+ up)
+ start_services
+ ;;
+ down)
+ stop_services
+ ;;
+ restart)
+ restart_services
+ ;;
+ logs)
+ show_logs
+ ;;
+ ps)
+ show_service_status
+ ;;
+ build)
+ build_services
+ ;;
+ pull)
+ pull_images
+ ;;
+ exec)
+ exec_command
+ ;;
+ health)
+ check_health
+ ;;
+ *)
+ print_error "Unknown action: $ACTION"
+ show_usage
+ exit 1
+ ;;
+ esac
+}
+
+# Run main function
+main
\ No newline at end of file
diff --git a/COBY/docker/nginx-dev.conf b/COBY/docker/nginx-dev.conf
new file mode 100644
index 0000000..20f0534
--- /dev/null
+++ b/COBY/docker/nginx-dev.conf
@@ -0,0 +1,68 @@
+# Nginx configuration for development environment
+events {
+ worker_connections 1024;
+}
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ # Development logging
+ access_log /var/log/nginx/access.log;
+ error_log /var/log/nginx/error.log debug;
+
+ # Basic settings
+ sendfile on;
+ keepalive_timeout 65;
+
+ server {
+ listen 80;
+ server_name localhost;
+ root /usr/share/nginx/html;
+ index index.html;
+
+ # Disable caching for development
+ add_header Cache-Control "no-cache, no-store, must-revalidate";
+ add_header Pragma "no-cache";
+ add_header Expires "0";
+
+ # Main dashboard
+ location / {
+ try_files $uri $uri/ /index.html;
+ }
+
+ # API proxy to COBY app
+ location /api/ {
+ proxy_pass http://coby-app:8080/;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache_bypass $http_upgrade;
+ proxy_read_timeout 86400;
+ }
+
+ # WebSocket proxy
+ location /ws/ {
+ proxy_pass http://coby-app:8081/;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_read_timeout 86400;
+ }
+
+ # Health check
+ location /health {
+ access_log off;
+ return 200 "healthy\n";
+ add_header Content-Type text/plain;
+ }
+ }
+}
\ No newline at end of file
diff --git a/COBY/docker/nginx.conf b/COBY/docker/nginx.conf
new file mode 100644
index 0000000..e9db9ea
--- /dev/null
+++ b/COBY/docker/nginx.conf
@@ -0,0 +1,112 @@
+# Nginx configuration for COBY dashboard
+events {
+ worker_connections 1024;
+}
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ # Logging
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+
+ access_log /var/log/nginx/access.log main;
+ error_log /var/log/nginx/error.log warn;
+
+ # Basic settings
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ keepalive_timeout 65;
+ types_hash_max_size 2048;
+
+ # Gzip compression
+ gzip on;
+ gzip_vary on;
+ gzip_min_length 1024;
+ gzip_proxied any;
+ gzip_comp_level 6;
+ gzip_types
+ text/plain
+ text/css
+ text/xml
+ text/javascript
+ application/json
+ application/javascript
+ application/xml+rss
+ application/atom+xml
+ image/svg+xml;
+
+ # Rate limiting
+ limit_req_zone $binary_remote_addr zone=dashboard:10m rate=10r/s;
+
+ server {
+ listen 80;
+ server_name localhost;
+ root /usr/share/nginx/html;
+ index index.html;
+
+ # Security headers
+ add_header X-Frame-Options "SAMEORIGIN" always;
+ add_header X-XSS-Protection "1; mode=block" always;
+ add_header X-Content-Type-Options "nosniff" always;
+ add_header Referrer-Policy "no-referrer-when-downgrade" always;
+ add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
+
+ # Main dashboard
+ location / {
+ try_files $uri $uri/ /index.html;
+ limit_req zone=dashboard burst=20 nodelay;
+ }
+
+ # API proxy to COBY app
+ location /api/ {
+ proxy_pass http://coby-app:8080/;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache_bypass $http_upgrade;
+ proxy_read_timeout 86400;
+ }
+
+ # WebSocket proxy
+ location /ws/ {
+ proxy_pass http://coby-app:8081/;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_read_timeout 86400;
+ }
+
+ # Health check
+ location /health {
+ access_log off;
+ return 200 "healthy\n";
+ add_header Content-Type text/plain;
+ }
+
+ # Static assets caching
+ location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
+ expires 1y;
+ add_header Cache-Control "public, immutable";
+ }
+
+ # Error pages
+ error_page 404 /404.html;
+ error_page 500 502 503 504 /50x.html;
+
+ location = /50x.html {
+ root /usr/share/nginx/html;
+ }
+ }
+}
\ No newline at end of file
diff --git a/COBY/docker/prometheus.yml b/COBY/docker/prometheus.yml
new file mode 100644
index 0000000..fc295f5
--- /dev/null
+++ b/COBY/docker/prometheus.yml
@@ -0,0 +1,53 @@
+# Prometheus configuration for COBY monitoring
+global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+rule_files:
+ # - "first_rules.yml"
+ # - "second_rules.yml"
+
+scrape_configs:
+ # COBY application metrics
+ - job_name: 'coby-app'
+ static_configs:
+ - targets: ['coby-app:8080']
+ metrics_path: '/metrics'
+ scrape_interval: 10s
+ scrape_timeout: 5s
+
+ # TimescaleDB metrics (if postgres_exporter is added)
+ - job_name: 'timescaledb'
+ static_configs:
+ - targets: ['timescaledb:5432']
+ scrape_interval: 30s
+ scrape_timeout: 10s
+
+ # Redis metrics (if redis_exporter is added)
+ - job_name: 'redis'
+ static_configs:
+ - targets: ['redis:6379']
+ scrape_interval: 30s
+ scrape_timeout: 10s
+
+ # Prometheus self-monitoring
+ - job_name: 'prometheus'
+ static_configs:
+ - targets: ['localhost:9090']
+
+ # Node exporter for system metrics (if added)
+ - job_name: 'node'
+ static_configs:
+ - targets: ['node-exporter:9100']
+ scrape_interval: 30s
+
+# Alerting configuration
+alerting:
+ alertmanagers:
+ - static_configs:
+ - targets:
+ # - alertmanager:9093
+
+# Alert rules
+rule_files:
+ - "/etc/prometheus/alert_rules.yml"
\ No newline at end of file
diff --git a/COBY/health_check.py b/COBY/health_check.py
new file mode 100644
index 0000000..3235222
--- /dev/null
+++ b/COBY/health_check.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python3
+"""
+Health check script for COBY application
+Used by Docker health checks and monitoring systems
+"""
+
+import sys
+import os
+import requests
+import json
+from datetime import datetime
+
+def check_health():
+ """Perform health check on COBY application"""
+ try:
+ # Check main API endpoint
+ response = requests.get('http://localhost:8080/health', timeout=5)
+
+ if response.status_code == 200:
+ health_data = response.json()
+
+ # Basic health check passed
+ print(f"ā
API Health Check: PASSED")
+ print(f" Status: {health_data.get('status', 'unknown')}")
+ print(f" Timestamp: {health_data.get('timestamp', 'unknown')}")
+
+ # Check individual components
+ components = health_data.get('components', {})
+ all_healthy = True
+
+ for component, status in components.items():
+ if status.get('healthy', False):
+ print(f"ā
{component}: HEALTHY")
+ else:
+ print(f"ā {component}: UNHEALTHY - {status.get('error', 'unknown error')}")
+ all_healthy = False
+
+ if all_healthy:
+ print("\nš Overall Health: HEALTHY")
+ return 0
+ else:
+ print("\nā ļø Overall Health: DEGRADED")
+ return 1
+
+ else:
+ print(f"ā API Health Check: FAILED (HTTP {response.status_code})")
+ return 1
+
+ except requests.exceptions.ConnectionError:
+ print("ā API Health Check: FAILED (Connection refused)")
+ return 1
+ except requests.exceptions.Timeout:
+ print("ā API Health Check: FAILED (Timeout)")
+ return 1
+ except Exception as e:
+ print(f"ā API Health Check: FAILED ({str(e)})")
+ return 1
+
+def check_websocket():
+ """Check WebSocket server health"""
+ try:
+ # Simple TCP connection check to WebSocket port
+ import socket
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(5)
+ result = sock.connect_ex(('localhost', 8081))
+ sock.close()
+
+ if result == 0:
+ print("ā
WebSocket Server: ACCESSIBLE")
+ return True
+ else:
+ print("ā WebSocket Server: NOT ACCESSIBLE")
+ return False
+
+ except Exception as e:
+ print(f"ā WebSocket Server: ERROR ({str(e)})")
+ return False
+
+def main():
+ """Main health check function"""
+ print(f"COBY Health Check - {datetime.now().isoformat()}")
+ print("=" * 50)
+
+ # Check API health
+ api_healthy = check_health() == 0
+
+ # Check WebSocket
+ ws_healthy = check_websocket()
+
+ print("=" * 50)
+
+ if api_healthy and ws_healthy:
+ print("š COBY System: FULLY HEALTHY")
+ return 0
+ elif api_healthy:
+ print("ā ļø COBY System: PARTIALLY HEALTHY (API only)")
+ return 1
+ else:
+ print("ā COBY System: UNHEALTHY")
+ return 1
+
+if __name__ == "__main__":
+ sys.exit(main())
\ No newline at end of file
diff --git a/COBY/main.py b/COBY/main.py
new file mode 100644
index 0000000..5b7833d
--- /dev/null
+++ b/COBY/main.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python3
+"""
+COBY Multi-Exchange Data Aggregation System
+Main application entry point for Docker deployment
+"""
+
+import asyncio
+import signal
+import sys
+import os
+import argparse
+from typing import Optional
+
+# Add the current directory to Python path
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+
+from utils.logging import get_logger, setup_logging
+from config import Config
+from monitoring.metrics_collector import metrics_collector
+from monitoring.performance_monitor import get_performance_monitor
+from monitoring.memory_monitor import memory_monitor
+from api.rest_api import create_app
+from api.websocket_server import WebSocketServer
+
+logger = get_logger(__name__)
+
+
+class COBYApplication:
+ """Main COBY application orchestrator"""
+
+ def __init__(self, config: Config):
+ self.config = config
+ self.running = False
+ self.tasks = []
+ self.websocket_server: Optional[WebSocketServer] = None
+
+ async def start(self):
+ """Start all application components"""
+ try:
+ logger.info("Starting COBY Multi-Exchange Data Aggregation System")
+
+ # Start monitoring systems
+ logger.info("Starting monitoring systems...")
+ metrics_collector.start_collection()
+ get_performance_monitor().start_monitoring()
+ memory_monitor.start_monitoring()
+
+ # Start WebSocket server
+ logger.info("Starting WebSocket server...")
+ self.websocket_server = WebSocketServer(
+ host=self.config.api.host,
+ port=self.config.api.websocket_port
+ )
+ websocket_task = asyncio.create_task(self.websocket_server.start())
+ self.tasks.append(websocket_task)
+
+ # Start REST API server (includes static file serving)
+ logger.info("Starting REST API server with static file serving...")
+ app = create_app(self.config)
+ api_task = asyncio.create_task(
+ self._run_api_server(app, self.config.api.host, self.config.api.port)
+ )
+ self.tasks.append(api_task)
+
+ # Start exchange connectors (placeholder for now)
+ logger.info("Exchange connectors would start here...")
+
+ # Start data processing pipeline (placeholder for now)
+ logger.info("Data processing pipeline would start here...")
+
+ self.running = True
+ logger.info("COBY system started successfully")
+
+ # Wait for all tasks
+ await asyncio.gather(*self.tasks, return_exceptions=True)
+
+ except Exception as e:
+ logger.error(f"Error starting COBY application: {e}")
+ raise
+
+ async def stop(self):
+ """Stop all application components"""
+ if not self.running:
+ return
+
+ logger.info("Stopping COBY Multi-Exchange Data Aggregation System")
+
+ try:
+ # Stop WebSocket server
+ if self.websocket_server:
+ await self.websocket_server.stop()
+
+ # Cancel all tasks
+ for task in self.tasks:
+ if not task.done():
+ task.cancel()
+
+ # Wait for tasks to complete
+ if self.tasks:
+ await asyncio.gather(*self.tasks, return_exceptions=True)
+
+ # Stop monitoring systems
+ memory_monitor.stop_monitoring()
+ get_performance_monitor().stop_monitoring()
+ metrics_collector.stop_collection()
+
+ self.running = False
+ logger.info("COBY system stopped successfully")
+
+ except Exception as e:
+ logger.error(f"Error stopping COBY application: {e}")
+
+ async def _run_api_server(self, app, host: str, port: int):
+ """Run the API server"""
+ try:
+ # Import here to avoid circular imports
+ import uvicorn
+
+ config = uvicorn.Config(
+ app,
+ host=host,
+ port=port,
+ log_level="info",
+ access_log=True
+ )
+ server = uvicorn.Server(config)
+ await server.serve()
+
+ except ImportError:
+ logger.error("uvicorn not available, falling back to basic server")
+ # Fallback implementation would go here
+ await asyncio.sleep(3600) # Keep running for an hour
+
+
+async def main():
+ """Main application entry point"""
+ parser = argparse.ArgumentParser(description='COBY Multi-Exchange Data Aggregation System')
+ parser.add_argument('--debug', action='store_true', help='Enable debug mode')
+ parser.add_argument('--reload', action='store_true', help='Enable auto-reload (development)')
+ parser.add_argument('--config', type=str, help='Configuration file path')
+
+ args = parser.parse_args()
+
+ # Setup logging
+ log_level = 'DEBUG' if args.debug else 'INFO'
+ setup_logging(level=log_level)
+
+ # Load configuration
+ config = Config()
+ if args.debug:
+ config.debug = True
+ config.logging.level = 'DEBUG'
+
+ # Create and start application
+ app = COBYApplication(config)
+
+ # Setup signal handlers
+ def signal_handler(signum, frame):
+ logger.info(f"Received signal {signum}, shutting down...")
+ asyncio.create_task(app.stop())
+
+ signal.signal(signal.SIGINT, signal_handler)
+ signal.signal(signal.SIGTERM, signal_handler)
+
+ try:
+ await app.start()
+ except KeyboardInterrupt:
+ logger.info("Received keyboard interrupt, shutting down...")
+ except Exception as e:
+ logger.error(f"Application error: {e}")
+ sys.exit(1)
+ finally:
+ await app.stop()
+
+
+if __name__ == "__main__":
+ # Ensure we're running in the correct directory
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ # Run the application
+ try:
+ asyncio.run(main())
+ except KeyboardInterrupt:
+ print("\nShutdown complete")
+ except Exception as e:
+ print(f"Fatal error: {e}")
+ sys.exit(1)
\ No newline at end of file
diff --git a/COBY/web/static/index.html b/COBY/web/static/index.html
index 1e6a95a..152f218 100644
--- a/COBY/web/static/index.html
+++ b/COBY/web/static/index.html
@@ -3,185 +3,410 @@
- COBY - Market Data Dashboard
-
-
-
-
+ COBY - Multi-Exchange Data Aggregation
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Volume Imbalance
-
--
-
-
-
-
Liquidity Score
-
--
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Exchange Status
-
-
-
-
-
-
-
-
Statistics
-
-
- Updates/sec
- 0
-
-
- Total Points
- 0
-
-
- Bid Points
- 0
-
-
- Ask Points
- 0
-
-
-
-
-
-
-
-
+
-
-
-
-
-
Loading market data...
-
-
-
-
-
-
-
-
An error occurred
+
+
+
+
+
+
Loading system status...
+
+
+
+
+
+
System Status
+
+
+
+
Checking system health...
+
+
+
+
+
+
+
Exchange Connections
+
+
+
+
Loading exchange status...
+
+
+
+
+
+
Performance Metrics
+
+
+
+
+
Data Processing
+
+
+
+
Loading processing status...
+
+
+
-
-
-
-
-
-
+
+
+
+