Files
gogo2/COBY/docker-compose.portainer.yml
2025-08-05 01:22:27 +03:00

392 lines
9.6 KiB
YAML

# Docker Compose for COBY Multi-Exchange Data Aggregation System
# Optimized for Portainer deployment with Git repository integration
version: '3.8'
services:
# TimescaleDB Database
timescaledb:
image: timescale/timescaledb:latest-pg15
container_name: coby-timescaledb
environment:
POSTGRES_DB: market_data
POSTGRES_USER: market_user
POSTGRES_PASSWORD: market_data_secure_pass_2024
TIMESCALEDB_TELEMETRY: 'off'
ports:
- "5432:5432"
volumes:
- timescale_data:/var/lib/postgresql/data
- ./COBY/docker/init-scripts:/docker-entrypoint-initdb.d:ro
networks:
- coby-network
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U market_user -d market_data"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
deploy:
resources:
limits:
memory: 1G
cpus: '1.0'
reservations:
memory: 512M
cpus: '0.5'
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Redis Cache
redis:
image: redis:7-alpine
container_name: coby-redis
command: >
redis-server
--requirepass market_data_redis_2024
--maxmemory 256mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- coby-network
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "market_data_redis_2024", "ping"]
interval: 30s
timeout: 5s
retries: 3
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
reservations:
memory: 128M
cpus: '0.1'
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# COBY Main Application
coby-app:
build:
context: ./COBY
dockerfile: Dockerfile
target: production
container_name: coby-app
environment:
# Database configuration
DB_HOST: timescaledb
DB_PORT: 5432
DB_NAME: market_data
DB_USER: market_user
DB_PASSWORD: market_data_secure_pass_2024
DB_SCHEMA: market_data
DB_POOL_SIZE: 10
DB_MAX_OVERFLOW: 20
DB_POOL_TIMEOUT: 30
# Redis configuration
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_PASSWORD: market_data_redis_2024
REDIS_DB: 0
REDIS_MAX_CONNECTIONS: 50
REDIS_SOCKET_TIMEOUT: 5
REDIS_CONNECT_TIMEOUT: 5
# Application configuration
ENVIRONMENT: production
DEBUG: false
LOG_LEVEL: INFO
LOG_FILE: logs/coby.log
LOG_MAX_SIZE: 100
LOG_BACKUP_COUNT: 5
ENABLE_CORRELATION_ID: true
# API configuration
API_HOST: 0.0.0.0
API_PORT: 8080
WS_PORT: 8081
CORS_ORIGINS: "*"
RATE_LIMIT: 100
MAX_WS_CONNECTIONS: 1000
# Exchange configuration
MAX_CONNECTIONS_PER_EXCHANGE: 5
RECONNECT_DELAY: 5
MAX_RECONNECT_ATTEMPTS: 10
HEARTBEAT_INTERVAL: 30
EXCHANGES: "binance,coinbase,kraken,bybit,okx,huobi,kucoin,gateio,bitfinex,mexc"
SYMBOLS: "BTCUSDT,ETHUSDT"
# Aggregation configuration
BUCKET_SIZE: 1.0
HEATMAP_DEPTH: 50
UPDATE_FREQUENCY: 0.5
VOLUME_THRESHOLD: 0.01
# Performance configuration
DATA_BUFFER_SIZE: 10000
BATCH_WRITE_SIZE: 1000
MAX_MEMORY_USAGE: 2048
GC_THRESHOLD: 0.8
PROCESSING_TIMEOUT: 10
MAX_QUEUE_SIZE: 50000
# Monitoring configuration
METRICS_COLLECTION_INTERVAL: 1.0
MAX_CPU_USAGE: 80.0
MAX_MEMORY_USAGE_PERCENT: 85.0
MIN_MEMORY_AVAILABLE_GB: 1.0
MAX_LATENCY_MS: 100.0
MAX_ERROR_RATE_PERCENT: 5.0
MIN_THROUGHPUT_OPS: 100.0
ports:
- "8080:8080"
- "8081:8081"
volumes:
- coby_logs:/app/logs
- coby_data:/app/data
networks:
- coby-network
depends_on:
timescaledb:
condition: service_healthy
redis:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8080/health', timeout=5)"]
interval: 30s
timeout: 10s
retries: 3
start_period: 120s
deploy:
resources:
limits:
memory: 2G
cpus: '2.0'
reservations:
memory: 1G
cpus: '1.0'
logging:
driver: "json-file"
options:
max-size: "50m"
max-file: "5"
# Prometheus (Optional - for monitoring)
prometheus:
image: prom/prometheus:latest
container_name: coby-prometheus
ports:
- "9090:9090"
volumes:
- prometheus_data:/prometheus
configs:
- source: prometheus_config
target: /etc/prometheus/prometheus.yml
- source: alert_rules
target: /etc/prometheus/alert_rules.yml
networks:
- coby-network
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
- '--web.enable-admin-api'
restart: unless-stopped
profiles:
- monitoring
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
reservations:
memory: 256M
cpus: '0.2'
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Grafana (Optional - for visualization)
grafana:
image: grafana/grafana:latest
container_name: coby-grafana
ports:
- "3001:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: admin123
GF_USERS_ALLOW_SIGN_UP: false
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-simple-json-datasource
GF_SECURITY_ALLOW_EMBEDDING: true
volumes:
- grafana_data:/var/lib/grafana
networks:
- coby-network
depends_on:
- prometheus
restart: unless-stopped
profiles:
- monitoring
deploy:
resources:
limits:
memory: 256M
cpus: '0.3'
reservations:
memory: 128M
cpus: '0.1'
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Node Exporter for system metrics
node-exporter:
image: prom/node-exporter:latest
container_name: coby-node-exporter
ports:
- "9100:9100"
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
networks:
- coby-network
restart: unless-stopped
profiles:
- monitoring
deploy:
resources:
limits:
memory: 128M
cpus: '0.1'
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
# Network configuration
networks:
coby-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
labels:
- "com.coby.network=main"
# Volume configuration
volumes:
timescale_data:
driver: local
labels:
- "com.coby.volume=database"
redis_data:
driver: local
labels:
- "com.coby.volume=cache"
prometheus_data:
driver: local
labels:
- "com.coby.volume=monitoring"
grafana_data:
driver: local
labels:
- "com.coby.volume=monitoring"
coby_logs:
driver: local
labels:
- "com.coby.volume=logs"
coby_data:
driver: local
labels:
- "com.coby.volume=data"
# Configuration files
configs:
prometheus_config:
content: |
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
- "/etc/prometheus/alert_rules.yml"
scrape_configs:
- job_name: 'coby-app'
static_configs:
- targets: ['coby-app:8080']
metrics_path: '/metrics'
scrape_interval: 10s
scrape_timeout: 5s
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'node'
static_configs:
- targets: ['node-exporter:9100']
scrape_interval: 30s
alert_rules:
content: |
groups:
- name: coby_alerts
rules:
- alert: HighCPUUsage
expr: system_cpu_usage > 80
for: 2m
labels:
severity: warning
annotations:
summary: "High CPU usage detected"
description: "CPU usage is above 80% for more than 2 minutes"
- alert: HighMemoryUsage
expr: system_memory_usage > 85
for: 2m
labels:
severity: warning
annotations:
summary: "High memory usage detected"
description: "Memory usage is above 85% for more than 2 minutes"
- alert: ServiceDown
expr: up == 0
for: 1m
labels:
severity: critical
annotations:
summary: "Service is down"
description: "{{ $$labels.job }} service is down"
# Labels for the entire stack
x-labels: &default-labels
com.coby.project: "multi-exchange-data-aggregation"
com.coby.version: "1.0.0"
com.coby.environment: "production"