This commit is contained in:
Dobromir Popov
2024-08-21 10:40:40 +03:00
21 changed files with 850 additions and 33 deletions

3
_ideas/project ideas.md Normal file
View File

@ -0,0 +1,3 @@
AI:
petals private farm aider - self improving assistant jw documents vector database embedding

31
dev/sample mermaid.mmd Normal file
View File

@ -0,0 +1,31 @@
``` mermaid
graph TD
A[Common Configuration] --> B[master]
A[Common Configuration] --> C[stable]
A[Common Configuration] --> D[deployment]
A[Common Configuration] --> E[GAT-5073]
A[Common Configuration] --> F[GAT-5098]
B --> B1[Release_ProfessionalTestServer]
B --> B2[Release_TestServer]
B --> B3[Release_DemoServer]
B1 --> G[TESTPRO on master]
B2 --> H[TEST on master]
B3 --> I[DEMO on master]
C --> C1[Release_DemoServer]
C --> C2[Release_ProfessionalTestServer]
C1 --> J[DEMO on stable]
C2 --> K[TESTPRO on stable]
D --> D1[Release_ProductionServer]
D --> D2[Release_ProfessionalServer]
D1 --> L[PROD Staging on deployment]
D2 --> M[PRO on deployment]
E --> E1[Staging_TestServer]
E1 --> N[TEST-Staging on GAT-5073]
F --> F1[Staging_DemoServer]
F1 --> O[DEMO-Staging on GAT-5098]
```

View File

@ -0,0 +1,92 @@
#!/bin/bash
# Function to escape special characters for Mermaid
escape_for_mermaid() {
echo "$1" | sed 's/[^a-zA-Z0-9]/_/g'
}
# Output file
output_file="branches_diagram.mmd"
# Start the Mermaid diagram
{
echo "%%{init: { 'theme': 'base' } }%%"
echo "gitGraph"
# Set master as the main branch
main_branch="master"
# Start with the main branch
echo " commit id: \"Initial commit\""
echo " branch $main_branch"
echo " checkout $main_branch"
# Keep track of declared branches
declared_branches=("$main_branch")
# Function to check if a branch is declared
is_branch_declared() {
local branch=$1
for declared_branch in "${declared_branches[@]}"; do
if [[ "$declared_branch" == "$branch" ]]; then
return 0
fi
done
return 1
}
# Function to find the base branch
find_base_branch() {
local branch=$1
local base=$(git merge-base --fork-point "$branch" "$main_branch" || echo "$main_branch")
if [[ -z "$base" ]]; then
echo "$main_branch"
else
echo "$base"
fi
}
# Function to declare a branch if not already declared
declare_branch() {
local branch=$1
if ! is_branch_declared "$branch"; then
echo " branch $(escape_for_mermaid "$branch")"
declared_branches+=("$branch")
fi
}
# Function to recursively process merged branches
process_merged_branches() {
local branch=$1
local merged_branches=$(git branch --merged "$branch" | grep -v "\* $branch" | grep -v "$main_branch")
if [ -n "$merged_branches" ]; then
for merged_branch in $merged_branches; do
declare_branch "$merged_branch"
echo " checkout $(escape_for_mermaid "$branch")"
echo " merge $(escape_for_mermaid "$merged_branch")"
process_merged_branches "$merged_branch"
done
fi
}
# Process each branch
for branch in $(git for-each-ref --sort=committerdate --format='%(refname:short)' refs/heads/); do
base_branch=$(find_base_branch "$branch")
declare_branch "$branch"
echo " checkout $(escape_for_mermaid "$branch")"
echo " commit id: \"$(git log -1 --pretty=format:%s "$branch")\""
process_merged_branches "$branch"
done
# Check for branches not yet merged into master
not_merged=$(git branch --no-merged "$main_branch")
if [ -n "$not_merged" ]; then
echo " commit id: \"Branches not yet merged into master:\""
for branch in $not_merged; do
echo " commit id: \"$branch\" - merged into: $(git branch --merged "$branch" | grep -v "\* $branch" | grep -v "$main_branch" | tr '\n' ' ')"
done
fi
} > $output_file
echo "Mermaid diagram saved to $output_file"

View File

@ -0,0 +1,84 @@
<!-- Create a New Branch for Review: -->
git checkout -b review-branch
<!-- find latest hash's parent -->
oldest_commit_hash=$(git log --grep="GAT-4861" --reverse --format="%H" | head -1)
git checkout -b review-branch $oldest_commit_hash^
<!-- PS -->
$oldestCommitHash = git log --grep="GAT-4861" --reverse --format="%H" | Select-Object -First 1
git checkout -b review-branch $oldestCommitHash^
<!-- LINUIX Cherry-Pick Commits Based on Log Search -->
git log --grep="GAT-4861" --format="%H" | xargs -L1 git cherry-pick
<!-- //windows -->
git log --grep="GAT-4861" --format="%H" | ForEach-Object { git cherry-pick $_ }
<!-- forced -->
git log --grep="GAT-4861" --format="%H" | ForEach-Object {
git cherry-pick $_ --strategy-option theirs
}
git log --reverse --grep="GAT-4861" --format="%H" | ForEach-Object {
git cherry-pick $_ --strategy-option theirs
}
<!-- forced new oldest first -->
git log master --reverse --grep="GAT-4861" --format="%H" | ForEach-Object {
git cherry-pick $_ --strategy-option theirs --no-commit
if ($LASTEXITCODE -ne 0) {
Write-Host "Conflict encountered. Skipping commit $_"
git reset --merge
}
}
<!-- Cleanup -->
git checkout master # Replace 'main' with your default branch name if different
git branch -D review-branch
<!-- did not work as we need to store commit hashes before we checkout oldest hash -->
git checkout master # Replace 'main' with your default branch name if different
git branch -D review-branch
$oldestCommitHash = git log --grep="GAT-4861" --reverse --format="%H" | Select-Object -First 1
git checkout -b review-branch $oldestCommitHash^
git log master --reverse --grep="GAT-4861" --format="%H" | ForEach-Object {
git cherry-pick $_ --strategy-option theirs --no-commit
if ($LASTEXITCODE -ne 0) {
Write-Host "Conflict encountered. Skipping commit $_"
git reset --merge
}
}
<!-- try ising patch
Create a Diff Patch for the Commits
This command finds the first and last commits with "GAT-4861" and generates a diff patch between these points.
tail -n 1 and head -n 1 are used to get the earliest and latest commit hashes respectively.
The ^ after the first commit hash indicates the parent of that commit.
The diff is saved in a file named changes.patch. -->
git diff $(git log --grep="GAT-4861" --format="%H" | tail -n 1)^ $(git log --grep="GAT-4861" --format="%H" | head -n 1) > changes.patch
<!-- WIN/PS -->
$firstCommit = git log --grep="GAT-4861" --reverse --format="%H" | Select-Object -First 1
$lastCommit = git log --grep="GAT-4861" --format="%H" | Select-Object -First 1
git diff $firstCommit^ $lastCommit > changes.patch
<!-- Apply the Patch to a New Branch -->
# Checkout to the parent of the earliest "GAT-4861" commit
$earliestCommitHash = git log --grep="GAT-4861" --reverse --format="%H" | Select-Object -First 1
git checkout -b review-branch $earliestCommitHash^
# Apply the patch
git apply changes.patch
# search for 'App_Code' in commit msgs
git log --all --grep="App_Code"
# push to new remote branch (if not existing)
git push -u origin main

BIN
linux/.setup aider.md.swp Normal file

Binary file not shown.

View File

@ -15,3 +15,10 @@ docker save -o <path/to/save/image.tar> 2fauth
docker load -i <path/to/save/image.tar> docker load -i <path/to/save/image.tar>
docker run -d --name <new_container_name> <backup_image_name> docker run -d --name <new_container_name> <backup_image_name>
# cleanup
Remove all unused images/volumes:
docker image prune -a
docker volume prune
# containers, networks, images, and volumes
docker system prune -a --volumes

View File

@ -28,3 +28,7 @@ sudo docker build -t your-image-name .
# attach to container # attach to container
docker exec -it potainer /bin/sh docker exec -it potainer /bin/sh
# on windows - setup port forwarding
#netsh interface portproxy add v4tov4 listenaddress=127.0.0.1 listenport=9000 connectaddress=172.29.111.255 connectport=9000
netsh interface portproxy add v4tov4 listenport=9000 listenaddress=0.0.0.0 connectport=9000 connectaddress=172.29.104.23
netsh interface portproxy delete v4tov4 listenaddress=127.0.0.1 listenport=9000

View File

@ -0,0 +1,20 @@
https://www.mono-project.com/download/stable/#download-lin-ubuntu
sudo apt install ca-certificates gnupg
sudo gpg --homedir /tmp --no-default-keyring --keyring /usr/share/keyrings/mono-official-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
echo "deb [signed-by=/usr/share/keyrings/mono-official-archive-keyring.gpg] https://download.mono-project.com/repo/ubuntu stable-focal main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list
sudo apt update
sudo apt install mono-devel
dotnet restore /workspace/repos/bitbucket.org/gatewayserver/GatewayServer.sln
sudo apt-get update
sudo apt-get install nuget
nuget restore /workspace/repos/bitbucket.org/gatewayserver/GatewayServer.sln

View File

@ -1,27 +0,0 @@
# disk info
lsblk
sudo fdisk -l
sudo parted -l
gnome-disks
sudo umount -f /mnt
umount --lazy /mnt/data
## ntfs resize:
sudo ntfsfix /dev/sda2
# Shrink the NTFS Partition:
sudo ntfsresize --size 100G /dev/sda2 (-f)
mkdir
# folder size:
du -hs
# find big files
find / -type f -size +100M

View File

@ -11,6 +11,43 @@ fdisk p # existing partitions
# Press n to create a new partition. # Press n to create a new partition.
# press w to write the changes to the disk. # press w to write the changes to the disk.
sudo mkfs.ext4 /dev/sdXN sudo mkfs.ext4 /dev/sdXN
# mount # mount
docker run -d -p 8000:8000 -p 9000:9000 --name=portainer --restart=always --pull=always -v /var/run/docker.sock:/var/run/docker.sock -v /mnt/apps/docker_volumes/portainer_data:/data portainer/portainer-ce docker run -d -p 8000:8000 -p 9000:9000 --name=portainer --restart=always --pull=always -v /var/run/docker.sock:/var/run/docker.sock -v /mnt/apps/docker_volumes/portainer_data:/data portainer/portainer-ce
# ---------------------
# disk info
lsblk
sudo fdisk -l
sudo parted -l
gnome-disks
sudo umount -f /mnt
umount --lazy /mnt/data
## ntfs resize:
sudo ntfsfix /dev/sda2
# Shrink the NTFS Partition:
sudo ntfsresize --size 100G /dev/sda2 (-f)
mkdir
# folder size:
du -hs
# find big files
find / -type f -size +100M
# find big files - NCurses Disk Utility - https://dev.yorhel.nl/ncdu
sudo apt-get install ncdu
ncdu
Delete the File: Once you have highlighted the file you want to delete, press the d key. ncdu will prompt you to confirm the deletion.

View File

@ -2,6 +2,7 @@ in conda env:
pip install aider-chat pip install aider-chat
# latest # latest
python -m pip install git+https://github.com/paul-gauthier/aider.git python -m pip install git+https://github.com/paul-gauthier/aider.git
python -m pip install git+https://github.com/d-popov/aider.git
# isntall ctags # isntall ctags
# > sudo apt update && apt install universal-ctags # > sudo apt update && apt install universal-ctags
@ -15,11 +16,51 @@ brew install universal-ctags
/home/linuxbrew/.linuxbrew/bin/ctags /home/linuxbrew/.linuxbrew/bin/ctags
export PATH=$PATH:/home/linuxbrew/.linuxbrew/bin/ctags export PATH=$PATH:/home/linuxbrew/.linuxbrew/bin/ctags
# KEYS
export GROQ_API_KEY=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
export OPENAI_API_BASE=http://ollama.d-popov.com
export AIDER_4=false
export AIDER_35TURBO=false
# RUN # RUN
> aider > aider
# personal # personal
export OPENAI_API_KEY=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 export OPENAI_API_KEY=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
aider -3 --no-auto-commits aider -3 --no-auto-commits
# dev-bro GPT4 # !!!!! dev-bro GPT4
export OPENAI_API_KEY=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 export OPENAI_API_KEY=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
usage: aider [-h] [--file FILE] [--openai-api-key OPENAI_API_KEY] [--anthropic-api-key ANTHROPIC_API_KEY] [--model MODEL] [--opus] [--sonnet] [--4] [--4o] [--4-turbo] [--35turbo] [--models MODEL] [--openai-api-base OPENAI_API_BASE]
[--openai-api-type OPENAI_API_TYPE] [--openai-api-version OPENAI_API_VERSION] [--openai-api-deployment-id OPENAI_API_DEPLOYMENT_ID] [--openai-organization-id OPENAI_ORGANIZATION_ID]
[--model-settings-file MODEL_SETTINGS_FILE] [--model-metadata-file MODEL_METADATA_FILE] [--verify-ssl | --no-verify-ssl] [--edit-format EDIT_FORMAT] [--weak-model WEAK_MODEL]
[--show-model-warnings | --no-show-model-warnings] [--map-tokens MAP_TOKENS] [--max-chat-history-tokens MAX_CHAT_HISTORY_TOKENS] [--env-file ENV_FILE] [--input-history-file INPUT_HISTORY_FILE]
[--chat-history-file CHAT_HISTORY_FILE] [--restore-chat-history | --no-restore-chat-history] [--llm-history-file LLM_HISTORY_FILE] [--dark-mode] [--light-mode] [--pretty | --no-pretty] [--stream | --no-stream]
[--user-input-color USER_INPUT_COLOR] [--tool-output-color TOOL_OUTPUT_COLOR] [--tool-error-color TOOL_ERROR_COLOR] [--assistant-output-color ASSISTANT_OUTPUT_COLOR] [--code-theme CODE_THEME] [--show-diffs]
[--git | --no-git] [--gitignore | --no-gitignore] [--aiderignore AIDERIGNORE] [--auto-commits | --no-auto-commits] [--dirty-commits | --no-dirty-commits] [--attribute-author | --no-attribute-author]
[--attribute-committer | --no-attribute-committer] [--attribute-commit-message | --no-attribute-commit-message] [--dry-run | --no-dry-run] [--commit] [--lint] [--lint-cmd LINT_CMD] [--auto-lint | --no-auto-lint]
[--test-cmd TEST_CMD] [--auto-test | --no-auto-test] [--test] [--vim] [--voice-language VOICE_LANGUAGE] [--version] [--just-check-update] [--check-update | --no-check-update] [--apply FILE] [--yes] [-v] [--show-repo-map]
[--show-prompts] [--exit] [--message COMMAND] [--message-file MESSAGE_FILE] [--encoding ENCODING] [-c CONFIG_FILE] [--gui]
[FILE ...]
### OLLAMA || GROQ
# #################################################### #
aider --models groq/
export GROQ_API_KEY=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
aider --model groq/llama3-70b-8192 --no-auto-commits --show-repo-map
# OLLAMA?
aider --openai-api-base https://ollama.d-popov.com --models openai/
# models:
https://aider.chat/docs/leaderboards/
aider --model openrouter/meta-llama/llama-3.1-405b-instruct
# https://openrouter.ai/settings/keys
OPENROUTER_API_KEY=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

View File

@ -0,0 +1,42 @@
# https://docs.waydro.id/usage/install-on-desktops
apt update
apt install curl ca-certificates -y
curl https://repo.waydro.id | bash
apt install waydroid -y
ensure we have in yml:
devices:
- "/dev/binder:/dev/binder"
- "/dev/ashmem:/dev/ashmem"
privileged: true
#
waydroid init
systemctl start waydroid-container
systemctl enable waydroid-container
systemctl status waydroid-container
<!-- ## ERROR: [Errno 2] No such file or directory: 'modprobe'
apt update
apt install kmod
## modprobe: FATAL: Module binder_linux not found in directory /lib/modules/5.15.0-91-generic
apt install linux-modules-extra-$(uname -r) -->
#
# export WAYLAND_DISPLAY=wayland-0
# export XDG_RUNTIME_DIR=/run/user/$(id -u)
useradd -m -s /bin/bash user
passwd user
usermod -aG sudo user
su - user
export DISPLAY=:1
export XDG_RUNTIME_DIR=/run/user/$(id -u)
# Start Waydroid if it is not already running
$waydroid session start

View File

@ -1,6 +1,30 @@
#wget https://links.fortinet.com/forticlient/deb/vpnagent #wget https://links.fortinet.com/forticlient/deb/vpnagent
#wget https://filestore.fortinet.com/forticlient/forticlient_vpn_7.0.7.0246_amd64.deb #wget https://filestore.fortinet.com/forticlient/forticlient_vpn_7.0.7.0246_amd64.deb
apt update apt update
sudo su apt install -y openfortivpn ppp
apt install openfortivpn openfortivpn vpn.gateway.one:10443 -u 'dobromir.popov@gateway.one' --trusted-cert bd26362cc802a27102fcdbf7e7e9328f3dede58aa44c125ede4aadb9e39da8c8
openfortivpn vpn.gateway.one:10443 -u 'Dobromir Popov' --trusted-cert bd26362cc802a27102fcdbf7e7e9328f3dede58aa44c125ede4aadb9e39da8c8 U6n4A7^8^c7dX&p6s
or
nano /etc/openfortivpn/config
>
host = vpn.gateway.one
port = 10443
trusted-cert = bd26362cc802a27102fcdbf7e7e9328f3dede58aa44c125ede4aadb9e39da8c8
pppd-use-peerdns = 1
set-routes = 1
username = dobromir.popov@gateway.one
password = U6n4A7^8^c7dX&p6s
#ERROR: pppd: The kernel does not support PPP, for example, the PPP kernel driver is not included or cannot be loaded.
docker run --cap-add=NET_ADMIN your_image
privileged: true
cap_add:
- NET_ADMIN
# useradd popov
# passwd popov
# usermod -aG sudo popov
code --no-sandbox --user-data-dir /home/popov/.config/Code

0
linux/sysyem maintain.md Normal file
View File

View File

@ -0,0 +1,65 @@
# NOT USED. Check /hassio folder
panel_iframe:
portainer:
title: "Portaiiner"
url: "https://docker.d-popov.com//#!/1/docker/containers"
icon: mdi:docker
require_admin: true
# Loads default set of integrations. Do not remove.
default_config:
# Load frontend themes from the themes folder
frontend:
themes: !include_dir_merge_named themes
# Text to speech
tts:
- platform: google_translate
http:
use_x_forwarded_for: true
trusted_proxies:
- 192.168.0.10 # Add the IP address of the proxy server
#almond:
# type: local
# host: http://192.168.0.10:3001
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
homeassistant:
external_url: "https://home.d-popov.com"
packages: !include_dir_named integrations/
sensor:
- platform: command_line
name: CPU Temp
command: "cat /sys/class/thermal/thermal_zone0/temp"
unit_of_measurement: "C"
value_template: "{{ value | multiply(0.001) | round(1) }}"
- platform: command_line
name: GPU Temp
command: "/opt/vc/bin/vcgencmd measure_temp"
unit_of_measurement: "C"
value_template: '{{ value | regex_findall_index("=([0-9]*\.[0-9]*)", 0) }}'
- platform: command_line
name: CPU Clock
command: "/opt/vc/bin/vcgencmd measure_clock arm"
unit_of_measurement: "MHz"
value_template: '{{ value | regex_findall_index("=([0-9]*)", 0) | multiply(0.000001) | round(0) }}'
mqtt:
sensor:
state_topic: 'Esp/bedroom/temperature'
name: 'Bedroom Temperature'
unit_of_measurement: 'C'
logger:
logs:
custom_components.extended_openai_conversation: info

View File

@ -0,0 +1,115 @@
version: '3.8'
services:
homeassistant:
container_name: homeassistant
image: homeassistant/home-assistant:stable
volumes:
- /mnt/apps/docker_volumes/homeassistant/config:/config
- /etc/localtime:/etc/localtime:ro
- /run/dbus:/run/dbus
- /dev/bus/usb:/dev/bus/usb
- /dev/hci0:/dev/hci0
privileged: true # Required for full access to host devices
# cap_add:
# - NET_ADMIN
# - SYS_ADMIN
# - SYS_RAWIO
# restart: unless-stopped
network_mode: host
#devices:
# - /dev/ttyACM0:/dev/ttyACM0
# - /dev/hci0:/dev/hci0
dind:
deploy:
replicas: 0
image: docker:dind
container_name: docker_in_docker
privileged: true
environment:
- DOCKER_TLS_CERTDIR=/certs
volumes:
- docker_in_docker_vol:/var/lib/docker
#- /mnt/apps/docker_volumes/dind:/var/lib/docker
- /mnt/apps/docker_volumes/haos:/mnt/haos
- /mnt/apps/DEV/docker-compose/home-infrastructure:/mnt/setup
- /mnt/apps/docker_volumes/dind/certs:/certs
- /mnt/apps/docker_volumes/dind/etc-docker:/etc/docker
#- /sys/fs/cgroup:/sys/fs/cgroup:ro
ports:
- "2376:2376"
- "8122:8123" # hassio HTTP
restart: unless-stopped
tty: true
stdin_open: true
homeassistant_old:
deploy:
replicas: 0
container_name: homeassistant
image: homeassistant/home-assistant:latest
volumes:
- /mnt/apps/docker_volumes/homeassistant/:/config
environment:
- TZ=YOUR_TIMEZONE
restart: unless-stopped
#network_mode: host
ports:
- "8123:8123"
esphome:
container_name: esphome
image: esphome/esphome:latest
volumes:
- /mnt/apps/docker_volumes/esphome/config:/config # Maps the configuration directory to a local folder
ports:
- "6052:6052" # Optional: for API communication
- "6123:6123" # Optional: for OTA updates
restart: unless-stopped
network_mode: host # Recommended for discovery to work properly
wyoming-piper:
image: rhasspy/wyoming-piper
command: --voice en_US-lessac-medium
volumes:
- /mnt/apps/docker_volumes/ha/piper:/data
ports:
- "10200:10200"
stdin_open: true
tty: true
wyoming-whisper:
image: rhasspy/wyoming-whisper # tiny-int8 base 1GB, small 2GB RAM {tiny,tiny-int8,base,base-int8,small,small-int8,medium,medium-int8}
command: --model small --language en
volumes:
- /path/to/local/data:/data
ports:
- "10300:10300"
stdin_open: true
tty: true
wyoming-whisper-bg:
image: rhasspy/wyoming-whisper # tiny-int8
command: --model small --language bg
volumes:
- /path/to/local/data:/data
ports:
- "10301:10300"
stdin_open: true
tty: true
openwakeword:
image: dalehumby/openwakeword-rhasspy
restart: always
ports:
- "12202:12202/udp"
volumes:
- /mnt/apps/docker_volumes/ha/openwakeword-rhasspy/config:/config
# openwakeword:
# container_name: openwakeword
# image: homeassistant/amd64-addon-openwakeword
# restart: unless-stopped
# volumes:
# - /mnt/apps/docker_volumes/ha/openwakeword/models:/data/models
# network_mode: host
# environment:
# - TZ=Your/Timezone
# - MODELS_DIR=/data/models
# - CUSTOM_MODEL_DIR=/data/models
# - THRESHOLD=0.5 # Example threshold value
# - TRIGGER_LEVEL=3 # Example trigger level
volumes:
docker_in_docker_vol:

View File

@ -0,0 +1,23 @@
use 'homeassistant/home-assistant:stable' container
# https://community.home-assistant.io/t/addons-for-docker-installation/436190/42
# resource: https://community.home-assistant.io/t/problems-installing-ha-supervisor-on-docker/526234/5
enter container shell:
docker exec -it homeassistant bash
wget -O - https://get.hacs.xyz | bash -
custom agent AI
https://community.home-assistant.io/t/custom-integration-ollama-conversation-local-ai-agent/636103/7
custom functions:
https://community.home-assistant.io/t/custom-component-extended-openai-conversation-lets-control-entities-via-chatgpt/636500

76
python/appveyor.mmd Normal file
View File

@ -0,0 +1,76 @@
graph TD
A[Common Configuration] --> B[master]
A[Common Configuration] --> C[stable]
A[Common Configuration] --> D[deployment]
A[Common Configuration] --> E[GAT-5073]
A[Common Configuration] --> F[GAT-5098]
B --> B1[Release_ProfessionalTestServer]
B --> B2[Release_TestServer]
B --> B3[Release_DemoServer]
B1 --> G[TESTPRO on master]
B2 --> H[TEST on master]
B3 --> I[DEMO on master]
C --> C1[Release_DemoServer]
C --> C2[Release_ProfessionalTestServer]
C1 --> J[DEMO on stable]
C2 --> K[TESTPRO on stable]
D --> D1[Release_ProductionServer]
D --> D2[Release_ProfessionalServer]
D1 --> L[PROD Staging on deployment]
D2 --> M[PRO on deployment]
E --> E1[Staging_TestServer]
E1 --> N[TEST-Staging on GAT-5073]
F --> F1[Staging_DemoServer]
F1 --> O[DEMO-Staging on GAT-5098]
style A fill:#f9f,stroke:#333,stroke-width:2px
style C1 fill:#bbf,stroke:#333,stroke-width:2px,stroke-dasharray: 5,5
style C2 fill:#bbf,stroke:#333,stroke-width:2px,stroke-dasharray: 5,5
%% graph TD
%% A[Branches] --> B[master]
%% A --> C[stable]
%% A --> D[deployment]
%% A --> E[GAT-5073]
%% A --> F[GAT-5098]
%% B --> G[Release_ProfessionalTestServer]
%% B --> H[Release_DemoServer]
%% C --> I[Release_DemoServer]
%% C --> J[Release_ProfessionalTestServer]
%% D --> K[Release_ProductionServer]
%% D --> L[Release_ProfessionalServer]
%% E --> M[Staging_TestServer]
%% F --> N[Staging_DemoServer]
%% G --> O[TESTPRO - production]
%% H --> P[DEMO - production]
%% I --> Q[DEMO - production]
%% J --> R[TESTPRO - production]
%% K --> S[PROD - staging]
%% L --> T[PRO - production]
%% M --> U[TEST - staging]
%% N --> V[DEMO - staging]
%% style A fill:#f9f,stroke:#333,stroke-width:2px
%% style B fill:#bbf,stroke:#333,stroke-width:2px
%% style C fill:#bbf,stroke:#333,stroke-width:2px
%% style D fill:#bbf,stroke:#333,stroke-width:2px
%% style E fill:#bbf,stroke:#333,stroke-width:2px
%% style F fill:#bbf,stroke:#333,stroke-width:2px

104
python/deployments_graph.py Normal file
View File

@ -0,0 +1,104 @@
import yaml
import re
# Load the YAML content from the file
with open('appveyor.yml', 'r') as file:
appveyor_yml = file.read()
# Parsing the YAML content
config = yaml.safe_load(appveyor_yml)
# Extract branches and configurations from the YAML
branches = {}
deployments = {}
inactive_branches = []
inactive_configs = {}
# Extract specific branch configurations
if 'for' in config:
for branch_config in config['for']:
branch_names = branch_config.get('branches', {}).get('only', [])
configurations = branch_config.get('configuration', [])
for branch in branch_names:
if branch not in branches:
branches[branch] = []
branches[branch].extend(configurations)
# Manually add common branches and configurations if not already included
if 'branches' in config:
common_branches = config['branches'].get('only', [])
for branch in common_branches:
if branch not in branches:
branches[branch] = config.get('configuration', [])
# Check for inactive branches and configurations in the comments
lines = appveyor_yml.splitlines()
for i, line in enumerate(lines):
# Check for commented branches
if re.match(r'#\s*-\s*(\w+)', line):
branch_name = re.findall(r'#\s*-\s*(\w+)', line)[0]
if 'branches' in lines[i - 1] and 'only:' in lines[i - 2]:
inactive_branches.append(branch_name.replace('-', '_'))
# Check for commented configurations
if re.match(r'#\s*-\s*(\w+)', line):
inactive_configs.setdefault(lines[i - 1].strip().split()[1], []).append(re.findall(r'#\s*-\s*(\w+)', line)[0].replace('-', '_'))
# Extract deployment configurations
if 'deploy' in config:
deployments['common'] = config['deploy']
if 'for' in config:
for branch_config in config['for']:
branch_names = branch_config.get('branches', {}).get('only', [])
if 'deploy' in branch_config:
for branch in branch_names:
if branch not in deployments:
deployments[branch] = []
deployments[branch].extend(branch_config['deploy'])
# Generate the Mermaid graph content
mermaid_graph = """
graph TD
A[Common Configuration]
"""
# Add branches to the graph
for branch in branches.keys():
branch_id = branch.replace('-', '_')
mermaid_graph += f" A --> {branch_id}\n"
# Add configurations to the branches
for branch, configs in branches.items():
branch_id = branch.replace('-', '_')
for idx, config in enumerate(configs):
config_id = f"{branch_id}{idx + 1}"
mermaid_graph += f" {branch_id} --> {config_id}[{config}]\n"
# Add deployments to the configurations
for branch, deploys in deployments.items():
branch_id = branch.replace('-', '_')
for deploy in deploys:
configuration = deploy.get('configuration')
if configuration:
config_id = f"{branch_id}{branches[branch].index(configuration) + 1}"
service_id = deploy.get('website', '').replace('-', '_')
mermaid_graph += f" {config_id} --> {service_id}[{deploy.get('provider', '')} on {branch}]\n"
# Highlight inactive branches and configurations with a dotted style
for branch in inactive_branches:
mermaid_graph += f" style {branch} fill:#f96,stroke:#333,stroke-width:2px,stroke-dasharray: 5, 5\n"
for branch, configs in inactive_configs.items():
branch_id = branch.replace('-', '_')
for config in configs:
config_id = f"{branch_id}_{config}"
mermaid_graph += f" style {config_id} fill:#bbf,stroke:#333,stroke-width:2px,stroke-dasharray: 5,5\n"
# Add styles to the graph
mermaid_graph += """
style A fill:#f9f,stroke:#333,stroke-width:2px
"""
# Output the graph
print(mermaid_graph)

View File

76
windows/wsl.cmd Normal file
View File

@ -0,0 +1,76 @@
@REM run image (alpine) as root user
wsl -d Alpine -u root
@REM wsl install docker cuda
# Update the package list and install dependencies
sudo apt update
sudo apt install -y ca-certificates curl gnupg lsb-release
# Add Dockers official GPG key
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
# Set up the Docker repository
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# Update the package list
sudo apt update
# Install Docker Engine
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# Start Docker
sudo service docker start
# Enable Docker to start at boot
sudo systemctl enable docker
# Allow Docker commands without sudo
sudo usermod -aG docker $USER
@REM Install WSL-Ubuntu Package for CUDA:
@REM Follow the instructions to install the CUDA toolkit specifically designed for WSL. Do not install the default CUDA toolkit that includes the drivers.
wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin
sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb
sudo dpkg -i cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb
sudo cp /var/cuda-repo-wsl-ubuntu-12-2-local/cuda-*-keyring.gpg /usr/share/keyrings/
sudo apt-get update
sudo apt-get install cuda
Install NVIDIA Container Toolkit:
@REM Set up the package repository and install the NVIDIA Container Toolkit:
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update
sudo apt-get install -y nvidia-docker2
# Create/update Docker daemon configuration
mkdir -p /etc/docker
echo '{
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
}
}' > /etc/docker/daemon.json
sudo systemctl restart docker
@REM Run a test container to ensure everything is set up correctly:
sudo docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi