This commit is contained in:
Dobromir Popov
2024-12-02 20:22:10 +02:00
parent 501a7f81eb
commit 45f3ad8dd3
3 changed files with 138 additions and 0 deletions

2
azure/ddeployments.md Normal file
View File

@ -0,0 +1,2 @@
az resource update --resource-group default-switzerland --name ftp --namespace Microsoft.Web --resource-type basicPublishingCredentialsPolicies --parent sites/gateway-junior-test --set properties.allow=true

5
dev/prisma.md Normal file
View File

@ -0,0 +1,5 @@
migrate:
# creates js classes
npx prisma generate
# deploy

View File

@ -0,0 +1,131 @@
version: '3.8'
services:
# llava-controller:
# deploy:
# replicas: 0
# image: db-llava:latest
# ports:
# - 10001:10000
# command: python -m llava.serve.controller --host 0.0.0.0 --port 10000
# gradio-web-server:
# deploy:
# replicas: 0
# image: db-llava:latest
# depends_on:
# - llava-controller
# ports:
# - 7861:7860
# command: python -m llava.serve.gradio_web_server --controller http://llava-controller:10000 --model-list-mode reload
# model-worker:
# image: db-llava:latest
# depends_on:
# - llava-controller
# ports:
# - 40000:40000
# command: python -m llava.serve.model_worker --host 0.0.0.0 --controller http://llava-controller:10000 --port 40000 --worker http://localhost:40000 --model-path liuhaotian/llava-v1.6-vicuna-13b #liuhaotian/llava-v1.5-13b
# environment:
# NVIDIA_VISIBLE_DEVICES: all
# deploy:
# replicas: 0
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# h2o-llmstudio:
# image: gcr.io/vorvan/h2oai/h2o-llmstudio:nightly
# runtime: nvidia
# shm_size: 64g
# init: true
# container_name: h2o-llmstudio
# # user: "${UID}:${GID}"
# ports:
# - "10101:10101"
# volumes:
# - /mnt/storage/docker_slow/lm-studio/data:/workspace/data
# - /mnt/storage/docker_slow/lm-studio/output:/workspace/output
# - /mnt/storage/docker_slow/lm-studio/.cache:/home/llmstudio/.cache
# environment:
# - NVIDIA_VISIBLE_DEVICES=all
# - NVIDIA_DRIVER_CAPABILITIES=compute,utility
# # # #
# https://docs.n8n.io/hosting/installation/docker/#prerequisites
# docker volume create n8n_data
# docker run -it --rm --name n8n -p 5678:5678 -v n8n_data:/home/node/.n8n docker.n8n.io/n8nio/n8n
# # #
n8n:
image: betterweb/n8n-docker
ports:
- "5679:5678"
environment:
- N8N_BASIC_AUTH_ACTIVE=true
- N8N_BASIC_AUTH_USER=admin
- N8N_BASIC_AUTH_PASSWORD=9UXnZ49GR67r6VH
- N8N_HOST=${N8N_HOST:-localhost}
- N8N_PORT=5678
- N8N_PROTOCOL=http
- N8N_ENCRYPTION_KEY=whtqTVF5C8367$74V
- DB_TYPE=postgresdb
- DB_POSTGRESDB_HOST=db
- DB_POSTGRESDB_DATABASE=n8n
- DB_POSTGRESDB_USER=n8n
- DB_POSTGRESDB_PASSWORD=n8n
volumes:
- n8n_data:/home/node/.n8n
depends_on:
- db
restart: unless-stopped
db:
image: postgres:13
environment:
- POSTGRES_DB=n8n
- POSTGRES_USER=n8n
- POSTGRES_PASSWORD=n8n
volumes:
- db_data:/var/lib/postgresql/data
restart: unless-stopped
volumes:
n8n_data:
db_data:
# # #
# neo4j:
# image: neo4j
# ports:
# - "7474:7474"
# - "7687:7687"
# environment:
# NEO4J_AUTH: neo4j/lucas-bicycle-powder-stretch-ford-9492
# NEO4JLABS_PLUGINS: '["apoc"]'
# NEO4J_apoc_export_file_enabled: 'true'
# NEO4J_apoc_import_file_enabled: 'true'
# NEO4J_apoc_import_file_use__neo4j__config: 'true'
# NEO4J_dbms_security_procedures_unrestricted: apoc.*
# volumes:
# - "/mnt/storage/docker_slow/neo4j/data:/data"
# lmdeploy:
# runtime: nvidia
# #environment:
# # - HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}
# volumes:
# - /mnt/storage/docker_slow/.cache/huggingface:/root/.cache/huggingface
# ports:
# - "23333:23333"
# ipc: host
# deploy:
# resources:
# reservations:
# devices:
# - capabilities: [gpu]
# command: lmdeploy serve api_server internlm/internlm2_5-7b-chat
# image: openmmlab/lmdeploy:latest