add petals notes
This commit is contained in:
58
linux/petals.txt
Normal file
58
linux/petals.txt
Normal file
@ -0,0 +1,58 @@
|
||||
###get kernel log:
|
||||
>journalctl -b
|
||||
|
||||
### add acpi=off to your kernel boot parameters
|
||||
>sudo nano /etc/default/grub
|
||||
#Modify the GRUB_CMDLINE_LINUX_DEFAULT line
|
||||
++GRUB_CMDLINE_LINUX_DEFAULT="quiet splash acpi=off"
|
||||
>sudo update-grub
|
||||
|
||||
### ignore lid
|
||||
>sudo nano /etc/systemd/logind.conf
|
||||
+HandleLidSwitch=ignore
|
||||
+HandleLidSwitchExternalPower=ignore
|
||||
>sudo systemctl restart systemd-logind
|
||||
|
||||
### install miniconda
|
||||
>apt update
|
||||
>apt wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
>bash Miniconda3-latest-Linux-x86_64.sh
|
||||
|
||||
> conda create -n petals python=3.10
|
||||
> conda activate petals
|
||||
> pip install git+https://github.com/bigscience-workshop/petals
|
||||
|
||||
|
||||
|
||||
version: '3'
|
||||
services:
|
||||
dbg-nvidia-smi:
|
||||
image: ghcr.io/fnndsc/dbg-nvidia-smi:lite
|
||||
|
||||
runtime: nvidia
|
||||
command: sh -c 'nvidia-smi-wrapper /tmp --cat'
|
||||
petals: #https://github.com/bigscience-workshop/petals
|
||||
image: learningathome/petals:main
|
||||
restart: always
|
||||
#command: python -m petals.cli.run_server --port 31330 enoch/llama-65b --adapters timdettmers/guanaco-65b -- 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
|
||||
#?meta-llama/Llama-2-70b-chat-hf? meta-llama/Llama-2-70b-hf Enoch/llama-65b-hf ?julep-ai/mpt-30b-orca-mini?
|
||||
#command: python -m petals.cli.run_server --use_auth_token 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 --port 31330 --adapters timdettmers/guanaco-65b meta-llama/Llama-2-70b-hf
|
||||
# huggingface-cli login [--token TOKEN] [--add-to-git-credential]
|
||||
#
|
||||
#python -m petals.cli.run_server --port 31330 --adapters timdettmers/guanaco-65b --use_auth_token meta-llama/Llama-2-70b-hf
|
||||
command: |
|
||||
/bin/bash -c "
|
||||
git config --global credential.helper store &&
|
||||
huggingface-cli login --token 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 --add-to-git-credential &&
|
||||
python -m petals.cli.run_server --port 31330 --adapters timdettmers/guanaco-65b --use_auth_token Enoch/llama-65b-hf
|
||||
# Enoch/llama-65b-hf
|
||||
# meta-llama/Llama-2-70b-hf
|
||||
"
|
||||
stdin_open: true
|
||||
tty: true
|
||||
environment:
|
||||
- HUGGINGFACE_TOKEN=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
|
||||
ports:
|
||||
- 31330:31330
|
||||
volumes:
|
||||
- /mnt/storage/docker_slow/petals/:/cache
|
Reference in New Issue
Block a user