From ab315cfbbeea97704b06e00f12c3b882094155af Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Mon, 7 Aug 2023 20:48:13 +0000 Subject: [PATCH] add petals notes --- linux/petals.txt | 58 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 linux/petals.txt diff --git a/linux/petals.txt b/linux/petals.txt new file mode 100644 index 0000000..47ab993 --- /dev/null +++ b/linux/petals.txt @@ -0,0 +1,58 @@ +###get kernel log: +>journalctl -b + +### add acpi=off to your kernel boot parameters +>sudo nano /etc/default/grub +#Modify the GRUB_CMDLINE_LINUX_DEFAULT line +++GRUB_CMDLINE_LINUX_DEFAULT="quiet splash acpi=off" +>sudo update-grub + +### ignore lid +>sudo nano /etc/systemd/logind.conf ++HandleLidSwitch=ignore ++HandleLidSwitchExternalPower=ignore +>sudo systemctl restart systemd-logind + +### install miniconda +>apt update +>apt wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh +>bash Miniconda3-latest-Linux-x86_64.sh + +> conda create -n petals python=3.10 +> conda activate petals +> pip install git+https://github.com/bigscience-workshop/petals + + + +version: '3' +services: + dbg-nvidia-smi: + image: ghcr.io/fnndsc/dbg-nvidia-smi:lite + + runtime: nvidia + command: sh -c 'nvidia-smi-wrapper /tmp --cat' + petals: #https://github.com/bigscience-workshop/petals + image: learningathome/petals:main + restart: always + #command: python -m petals.cli.run_server --port 31330 enoch/llama-65b --adapters timdettmers/guanaco-65b -- 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 + #?meta-llama/Llama-2-70b-chat-hf? meta-llama/Llama-2-70b-hf Enoch/llama-65b-hf ?julep-ai/mpt-30b-orca-mini? + #command: python -m petals.cli.run_server --use_auth_token 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 --port 31330 --adapters timdettmers/guanaco-65b meta-llama/Llama-2-70b-hf + # huggingface-cli login [--token TOKEN] [--add-to-git-credential] + # + #python -m petals.cli.run_server --port 31330 --adapters timdettmers/guanaco-65b --use_auth_token meta-llama/Llama-2-70b-hf + command: | + /bin/bash -c " + git config --global credential.helper store && + huggingface-cli login --token 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 --add-to-git-credential && + python -m petals.cli.run_server --port 31330 --adapters timdettmers/guanaco-65b --use_auth_token Enoch/llama-65b-hf + # Enoch/llama-65b-hf + # meta-llama/Llama-2-70b-hf + " + stdin_open: true + tty: true + environment: + - HUGGINGFACE_TOKEN=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 + ports: + - 31330:31330 + volumes: + - /mnt/storage/docker_slow/petals/:/cache \ No newline at end of file