websockets>=10.0 websocket-client>=1.0.0 plotly>=5.18.0 dash>=2.14.0 pandas>=2.0.0 numpy>=1.24.0 python-dotenv>=1.0.0 psutil>=5.9.0 tensorboard>=2.15.0 scikit-learn>=1.3.0 matplotlib>=3.7.0 seaborn>=0.12.0 ta>=0.11.0 ccxt>=4.0.0 dash-bootstrap-components>=2.0.0 asyncio-compat>=0.1.2 wandb>=0.16.0 pybit>=5.11.0 requests>=2.31.0 asyncpg>=0.29.0 # COBY System Dependencies fastapi>=0.104.0 uvicorn>=0.24.0 pydantic>=2.5.0 python-multipart>=0.0.6 aiohttp>=3.9.0 redis>=5.0.0 scipy>=1.11.0 structlog>=23.2.0 click>=8.1.0 rich>=13.7.0 # DuckDB for data storage duckdb>=0.9.0 # Flask for ANNOTATE system Flask>=3.0.0 flask-cors>=4.0.0 # NOTE: PyTorch is intentionally not pinned here to avoid pulling NVIDIA CUDA deps on AMD machines. # Install one of the following sets manually depending on your hardware: # # CPU-only (AMD/Intel, no NVIDIA CUDA): # pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu # # NVIDIA GPU (CUDA): # Visit https://pytorch.org/get-started/locally/ for the correct command for your CUDA version. # Example (CUDA 12.1): # pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 # # AMD Strix Halo NPU Acceleration: # pip install onnxruntime-directml onnx transformers optimum