96 lines
2.6 KiB
Bash
96 lines
2.6 KiB
Bash
#!/bin/bash
|
|
# Container Quick Reference - Keep this handy!
|
|
# AMD Strix Halo ROCm Container Commands
|
|
|
|
# ==============================================
|
|
# CONTAINER: amd-strix-halo-llama-rocm
|
|
# ==============================================
|
|
|
|
# CHECK STATUS
|
|
docker ps | grep amd-strix-halo-llama-rocm
|
|
|
|
# ATTACH TO CONTAINER
|
|
docker exec -it amd-strix-halo-llama-rocm bash
|
|
|
|
# ==============================================
|
|
# INSIDE CONTAINER - FIRST TIME SETUP
|
|
# ==============================================
|
|
|
|
# Install Python (run once)
|
|
dnf install -y python3.12 python3-pip python3-devel git
|
|
ln -sf /usr/bin/python3.12 /usr/bin/python3
|
|
ln -sf /usr/bin/python3.12 /usr/bin/python
|
|
|
|
# Copy project (from host, run once)
|
|
# docker cp /mnt/shared/DEV/repos/d-popov.com/gogo2 amd-strix-halo-llama-rocm:/workspace/
|
|
|
|
# Install dependencies (run once)
|
|
cd /workspace/gogo2
|
|
pip3 install -r requirements.txt
|
|
pip3 install torch --index-url https://download.pytorch.org/whl/rocm6.2
|
|
|
|
# Verify GPU
|
|
python3 -c "import torch; print(f'GPU: {torch.cuda.is_available()}, Device: {torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"N/A\"}')"
|
|
|
|
# ==============================================
|
|
# INSIDE CONTAINER - DAILY USE
|
|
# ==============================================
|
|
|
|
cd /workspace/gogo2
|
|
|
|
# Start ANNOTATE
|
|
python3 ANNOTATE/web/app.py --port 8051
|
|
|
|
# Kill stale processes
|
|
python3 kill_dashboard.py
|
|
|
|
# Train models
|
|
python3 training_runner.py --mode realtime --duration 4
|
|
|
|
# Check GPU memory
|
|
rocm-smi
|
|
|
|
# ==============================================
|
|
# FROM HOST - USEFUL COMMANDS
|
|
# ==============================================
|
|
|
|
# Run command in container without attaching
|
|
docker exec amd-strix-halo-llama-rocm python3 -c "import torch; print(torch.cuda.is_available())"
|
|
|
|
# Copy files to container
|
|
docker cp ./newfile.py amd-strix-halo-llama-rocm:/workspace/gogo2/
|
|
|
|
# View container logs
|
|
docker logs amd-strix-halo-llama-rocm -f
|
|
|
|
# Container info
|
|
docker inspect amd-strix-halo-llama-rocm | grep -A 10 '"Mounts"'
|
|
|
|
# ==============================================
|
|
# QUICK COMPARISON
|
|
# ==============================================
|
|
|
|
# HOST (RECOMMENDED):
|
|
# cd /mnt/shared/DEV/repos/d-popov.com/gogo2
|
|
# source venv/bin/activate
|
|
# python ANNOTATE/web/app.py
|
|
|
|
# CONTAINER (ISOLATION):
|
|
# docker exec -it amd-strix-halo-llama-rocm bash
|
|
# cd /workspace/gogo2
|
|
# python3 ANNOTATE/web/app.py --port 8051
|
|
|
|
# ==============================================
|
|
# PORTS
|
|
# ==============================================
|
|
# 8050 - Main Dashboard
|
|
# 8051 - ANNOTATE Dashboard
|
|
# 8052 - COB Dashboard
|
|
# 8080 - COBY API (container is using this)
|
|
# 8081 - COBY WebSocket
|
|
|
|
# NOTE: Container already uses 8080, so use different ports or host env
|
|
|
|
|
|
|