This commit is contained in:
Dobromir Popov 2024-06-12 15:49:09 +03:00
commit 2719d19526
65 changed files with 2878 additions and 140 deletions

23
.env
View File

@ -1,10 +1,27 @@
TTS_BACKEND_URL=https://api.tts.d-popov.com/
TTS_BACKEND_URL=http://192.168.0.10:9008/asr #TTS_BACKEND_URL=http://192.168.0.10:9009/asr
#TTS_BACKEND_URL=http://localhost:9001/asr #gpu 9002-cpu #TTS_BACKEND_URL=http://localhost:9001/asr #gpu 9002-cpu
TTS_BACKEND_URL2=http://localhost:9002/asr TTS_BACKEND_URL2=http://localhost:9002/asr
TTS_BACKEND_URL3=http://192.168.0.10:9008/asr #gpu TTS_BACKEND_URL3=http://192.168.0.10:9008/asr #gpu
TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu #! TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu
WS_URL=ws://localhost:8081 WS_URL=ws://localhost:8081
SERVER_PORT_WS=8081 SERVER_PORT_WS=8081
SERVER_PORT_HTTP=3005 SERVER_PORT_HTTP=3005
# aider
AIDER_MODEL=
AIDER_4=false
#AIDER_35TURBO=
# OPENAI_API_KEY=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
# OPENAI_API_BASE=https://api.deepseek.com/v1
# OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a
# AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat
GROQ_API_KEY=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
aider --model groq/llama3-70b-8192
# List models available from Groq
aider --models groq/

View File

@ -1,6 +1,6 @@
TTS_BACKEND_URL=http://192.168.0.10:9008/asr # TTS_BACKEND_URL=http://192.168.0.10:9008/asr
WS_URL=ws://192.168.0.10:9008:8081 # WS_URL=ws://192.168.0.10:9008
SERVER_PORT_WS=8081 # SERVER_PORT_WS=8081
SERVER_PORT_HTTP=8080 # SERVER_PORT_HTTP=8080

16
.env.development Normal file
View File

@ -0,0 +1,16 @@
ENV_NAME=development
TTS_API_URL=https://api.tts.d-popov.com/asr
# LLN_MODEL=qwen2
# LNN_API_URL=https://ollama.d-popov.com/api/generate
LLN_MODEL=qwen2
LNN_API_URL=https://ollama.d-popov.com/api/generate
GROQ_API_KEY=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
OPENAI_API_KEY=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
WS_URL=ws://localhost:8081
SERVER_PORT_WS=8081
SERVER_PORT_HTTP=8080

View File

@ -2,7 +2,7 @@
TTS_BACKEND_URL=http://localhost:9001/asr #gpu 9002-cpu TTS_BACKEND_URL=http://localhost:9001/asr #gpu 9002-cpu
TTS_BACKEND_URL2=http://localhost:9002/asr TTS_BACKEND_URL2=http://localhost:9002/asr
TTS_BACKEND_URL3=http://192.168.0.10:9008/asr #gpu TTS_BACKEND_URL3=http://192.168.0.10:9008/asr #gpu
TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu #! TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu
WS_URL=ws://localhost:8081 WS_URL=ws://localhost:8081
SERVER_PORT_WS=8081 SERVER_PORT_WS=8081
SERVER_PORT_HTTP=8080 SERVER_PORT_HTTP=8080

3
.gitignore vendored
View File

@ -9,3 +9,6 @@ agent-py-bot/scrape/raw/*
tts/*.m4a tts/*.m4a
agent-mobile/jdk/* agent-mobile/jdk/*
agent-mobile/artimobile/supervisord.pid agent-mobile/artimobile/supervisord.pid
agent-pyter/lag-llama
agent-pyter/google-chrome-stable_current_amd64.deb
web/.node-persist/*

84
.vscode/launch.json vendored
View File

@ -1,48 +1,80 @@
{ {
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0", "version": "0.2.0",
"configurations": [ "configurations": [
{ // {
"name": "Docker Node.js Launch", // "name": "Docker Node.js Launch",
"type": "docker", // "type": "docker",
"request": "launch", // "request": "launch",
"preLaunchTask": "docker-run: debug", // "preLaunchTask": "docker-run: debug",
"platform": "node" // "platform": "node"
}, // },
{ // {
"name": "Docker Python Launch?", // "name": "Docker Python Launch?",
"type": "python", // "type": "python",
"request": "launch", // "request": "launch",
"program": "${workspaceFolder}/agent-py-bot/agent.py", // "program": "${workspaceFolder}/agent-py-bot/agent.py",
"console": "integratedTerminal", // "console": "integratedTerminal"
// "python": "${command:python.interpreterPath}", // Assumes Python extension is installed // },
// "preLaunchTask": "docker-run: python-debug", // You may need to create this task
// "env": {
// "PYTHONUNBUFFERED": "1"
// }
},
{ {
"name": "Docker Python Launch with venv", "name": "Docker Python Launch with venv",
"type": "debugpy", "type": "debugpy",
"request": "launch", "request": "launch",
"program": "${workspaceFolder}/agent-py-bot/agent.py", "program": "${workspaceFolder}/agent-py-bot/agent.py",
"console": "integratedTerminal", "console": "integratedTerminal",
"python": "/venv/bin/python", // Path to the Python interpreter in your venv "python": "/venv/bin/python",
"env": { "env": {
"PYTHONUNBUFFERED": "1" "PYTHONUNBUFFERED": "1"
} }
}, },
{ {
"name": "node: Launch server.js", "name": "Launch chat-server.js",
"type": "node", "type": "node",
"request": "launch", "request": "launch",
"program": "conda activate node && node web/server.js", "program": "${workspaceFolder}/web/chat-server.js",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"env": {
"CONDA_ENV": "node", //?
"NODE_ENV": "development"
},
"skipFiles": [ "skipFiles": [
"<node_internals>/**" "<node_internals>/**"
] ]
},
{
"name": "Launch server.js",
"type": "node",
"request": "launch",
// "program": "conda activate node && ${workspaceFolder}/web/server.js",
"program": "${workspaceFolder}/web/server.js",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"env": {
"CONDA_ENV": "node", //?
"NODE_ENV": "development"
},
"skipFiles": [
"<node_internals>/**"
]
},
{
"name": "Python Debugger: Python File",
"type": "debugpy",
"request": "launch",
"program": "${file}"
},
{
"name": "Python Debugger: Python File with Conda",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
//"python": "${command:python.interpreterPath}",
"python": "/config/miniconda3/envs/py/bin/python",
"presentation": {
"clear": true
},
//"preLaunchTask": "conda-activate" // Name of your pre-launch task
} }
] ]
} }

64
.vscode/tasks.json vendored
View File

@ -1,24 +1,24 @@
{ {
"version": "2.0.0", "version": "2.0.0",
"tasks": [ "tasks": [
{ // {
"type": "docker-build", // "type": "docker-build",
"label": "docker-build", // "label": "docker-build",
"platform": "node", // "platform": "node",
"dockerBuild": { // "dockerBuild": {
"dockerfile": "${workspaceFolder}/Dockerfile", // "dockerfile": "${workspaceFolder}/Dockerfile",
"context": "${workspaceFolder}", // "context": "${workspaceFolder}",
"pull": true // "pull": true
} // }
}, // },
{ // {
"type": "docker-run", // "type": "docker-run",
"label": "docker-run: release", // "label": "docker-run: release",
"dependsOn": [ // "dependsOn": [
"docker-build" // "docker-build"
], // ],
"platform": "node" // "platform": "node"
}, // },
// { // {
// "type": "docker-run", // "type": "docker-run",
// "label": "docker-run: debug2", // "label": "docker-run: debug2",
@ -74,7 +74,31 @@
// "kind": "build", // "kind": "build",
// "isDefault": true // "isDefault": true
// } // }
// } // },
,
{
"label": "Activate Conda Env, Set ENV Variable, and Open Shell",
"type": "shell",
"command": "bash --init-file <(echo 'source ~/miniconda3/etc/profile.d/conda.sh && conda activate aider && export OPENAI_API_KEY=xxx && aider --no-auto-commits')",
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "new"
},
},
{
"label": "conda-activate",
"type": "shell",
"command": "source ~/miniconda3/etc/profile.d/conda.sh && conda activate ${input:condaEnv} && echo 'Activated Conda Environment (${input:condaEnv})!'",
"problemMatcher": [],
}
],
"inputs": [
{
"id": "condaEnv",
"type": "promptString",
"description": "Enter the Conda environment name",
"default": "py"
}
] ]
} }

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
create a python app that will watch for new pairs on https://www.dextools.io/app/en/pairs (we need to crawl that with JS enabled to pull the actual content) and provide option to execute a HTTP request to an API to inform about the new pair

View File

@ -0,0 +1,37 @@
git clone https://github.com/stitionai/devika.git
conda create -n devika python=3.10
conda activate devika
which python
/config/miniconda3/envs/devika/bin/python -m pip install -r requirements.txt
fix browser issue
#apt --fix-broken install
#sudo apt-get update
#sudo apt-get install libnss3
/ui#>?
playwright install --with-deps
npm install
bun run dev
open new terminal
conda activate devika
rename sample.config.toml
fill out config.toml
/ui#>
bun run preview --host --port 3000
SET VITE_API_BASE_URL=https://api.dev.d-popov.com
bun run dev --host
# RUN:
/#>conda activate devika && python3 devika.py --debug
/#>conda activate node && npx bun run dev --host
TOML:
google search:
https://developers.google.com/custom-search/v1/introduction
https://programmablesearchengine.google.com/controlpanel/overview?cx=0382a4a0cfd6745b7

View File

@ -0,0 +1,87 @@
FROM ollama/ollama
# FROM ubuntu:20.04
# Avoid prompts from apt during build
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y \
wget \
x11vnc \
xvfb \
net-tools \
git \
python3 \
python3-numpy \
novnc
# Create the /app directory to hold the application
WORKDIR /app
# https://medium.com/@renswunnink/why-i-love-appimage-for-linux-distros-924769510ec5
RUN wget -O lmstudio.AppImage "https://releases.lmstudio.ai/linux/0.2.18/beta/LM_Studio-0.2.18.AppImage" && \
chmod u+x lmstudio.AppImage && \
./lmstudio.AppImage --appimage-extract && \
rm lmstudio.AppImage && \
mv squashfs-root lmstudio
# or adddependencies on another layer
RUN apt-get update && apt-get install -y \
dbus pciutils \
libglib2.0-0 \
libnss3 \
libgbm1 \
libxshmfence1 \
libgl1-mesa-glx \
libegl1-mesa \
libatk1.0-0 \
libatk-bridge2.0-0 \
libgtk-3-0 \
libasound2 \
&& rm -rf /var/lib/apt/lists/*
# Setup a virtual display environment with XVFB
RUN Xvfb :99 -screen 0 1024x768x16 &
ENV DISPLAY=:99
EXPOSE 8080
# Expose port 5980 for noVNC
EXPOSE 5980
RUN ln -s /app/lmstudio/lm-studio /lm-studio
#? RUN chmod +x /app/lmstudio/AppRun
# Create a startup script to run Xvfb and your application
# Create a startup script
RUN echo '#!/bin/bash\n\
Xvfb :99 -screen 0 1024x768x16 &\n\
export DISPLAY=:99\n\
# Start X11VNC\n\
x11vnc -display :99 -nopw -listen localhost -xkb -forever &\n\
# Start noVNC\n\
/opt/noVNC/utils/launch.sh --vnc localhost:5900 --listen 5980\n\
# Start the application\n\
exec /app/lmstudio/AppRun --no-sandbox\n\
' > /start-app.sh && chmod +x /start-app.sh
CMD ["/start-app.sh"]
#> apt-get update && apt-get install -y git x11vnc
#> git clone https://github.com/novnc/noVNC.git /opt/noVNC
#? x11vnc -display :99 -nopw -listen localhost -xkb -forever &
# Run LM Studio (assumes LM Studio can be run headlessly or in a server mode)
#CMD ["./lmstudio/AppRun"]
#CMD ["./lmstudio/AppRun", "--no-sandbox"]
#CMD ["/bin/bash"] # interactive shell
# build: docker build -t llmstudio .
# run: docker run (-dit) -p 8980:8080 llmstudio
# docker build -t llmstudio . && docker run -it -p 8980:8080 llmstudio
# cd /mnt/storage/DEV/workspace/repos/git.d-popov.com/ai-kevin/lmstudio/
# docker run --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all -it llmstudio
# docker build -t llmstudio . && docker run -dit -p 8980:8080 --volume /var/run/dbus:/var/run/dbus llmstudio
# docker build -t llmstudio . && docker run -it -p 8980:8080 --volume /var/run/dbus:/var/run/dbus --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all --security-opt apparmor=unconfined llmstudio

View File

@ -0,0 +1,39 @@
# docker build -t opendevin . && docker run -d --name OpenDevin-1 -p 3050:3000 -p 3051:3001 opendevin
# docker run --name OpenDevin-dev -it opendevin
# Start with a base image that has both Python and Node.js
FROM nikolaik/python-nodejs:python3.11-nodejs14
# Install system dependencies required for the project
RUN apt-get update && apt-get install -y \
git \
curl \
&& rm -rf /var/lib/apt/lists/*
# Clone the latest version of OpenDevin
WORKDIR /opendevin
RUN git clone https://github.com/OpenDevin/OpenDevin.git .
# Install Python dependencies
WORKDIR /opendevin/backend
RUN python -m pipenv requirements > requirements.txt && python -m pip install -r requirements.txt
RUN python -m pip install -r requirements.txt
# Install Node.js dependencies
WORKDIR /opendevin/frontend
RUN npm install monaco-editor
RUN npm install
# Build the frontend
RUN npm run build
# Expose backend and frontend ports
EXPOSE 3000 3001
# Add a script to start both backend and frontend services
WORKDIR /opendevin
COPY start-services.sh /opendevin/start-services.sh
RUN chmod +x /opendevin/start-services.sh
CMD ["/opendevin/start-services.sh"]

View File

@ -0,0 +1,12 @@
#!/bin/bash
# Navigate to the backend directory and start the backend server in the background
cd /opendevin/backend
uvicorn opendevin.server.listen:app --port 3000 &
# Serve the frontend from the build directory
cd /opendevin/frontend/build
npx serve -l 3001 &
# Keep the container running
wait

13
_doc/_notes/PROMPTS.md Normal file
View File

@ -0,0 +1,13 @@
You are an expert in extracting new information from text sorting it out in existing categories or creating new categoty (We're using Neo4j as graph database).
This is multi-step process:
1. Divide the text into paragraph simmilar chunks that have the same toppic. Add a summary to the paragraph.
2. Create Memory object for each summary
3. Extract facts from each paragraph and add them as knowledge linked to the paragraph as separate memory object linked to the first one. Look into verbs, ajectives and nouns to extract the plain information from the text. If there is a source code, do not interpret it, but remember it as linked Memory as it is, while adding summary and link it to the main "Memory".
4. Assign possible category and scientific field labels to the information by adding them as tags.
This systematization and segmentation will allow you to remember the text and store it in your long-term memory as knowledge graph, while providing easy access later. Respond in json format with the extracted and restructured data.
Here's the source text:
------------
create a web app that has a button to start recording sends it to latest whisper llm for STT, translates it to target language and shows the result in realtime. it should be streaming text with minimal lag.

View File

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

32
_doc/_notes/arti/ideas.md Normal file
View File

@ -0,0 +1,32 @@
Key features & principles:
- modal/plug & play design
- Biomimicing based
- self inferencing loop
- Graph->LLM->Graph based logic (Self reflect)
- attention (Short term memory)
- generalized & contextuaized memory schema (memory is strongly context dependent and temporal)
LLM module
Graph module
Short term memory module
mid-term memory (history on the toppic)
graph powered long term memory with embedding storage for skills & AII ( interface on some of the layers)
separate text IOs"
- multi agent communication module/console/
- internal state/context/mood/STM
- actions output
GRAPH schema
idea
- is child of
Q: Brainstorm neo4j schema for biomimicing memory storage as neo4j graph database. It should be similar to the way humans store, retrieve and generalize knowledge
Memory model:
-- example prompt:
you are an AI living native on a linux host. you have all local resources and installed software at your disposal: root console access, git repo and the internet as resources. you have your own codebase and will brainstorm and find ideas how to improve your reasoning and knowledge of the world. . To avoid messing up your health, you will prefer to install new packages and test ideas inside docker or virtual envs. You can test your ideas in a docker containers and can experiment with different ways to pipleline and organise the thought process. you will also explain each approach and promote the best containers. A local neo4j database is your memory and knowledge source. Whenever you are not certain about a toppic, or need to refresh your knowledge, you will check online for updated information. If the problem concerns npm, pip or other package or an exiting repository on github or other repository server, you will first update your internal database with the latest readme and usage documentaion from the repository authors and later search stackoverflow to update your skills. periodically you will reorgnize and tidy your "mmory" making it more easily accessible and updating the embedding vectors to allow easy access to the needed context when there is a problem to solve. You will build a tools library over time that wil lgelp you solve more complex problems and keep it in your memory together with a list of task completed with each tool. you will experiment with different pipelines for different tasks, memorise them and generalize and optimize existing pipelines over time to build a solid knowhow.
let's create the project setup in python. our main file will be the agent, and it will call the llm and other tools (maybe langchain), explore, organize and improve when 'resting'.

11
_doc/aider.md Normal file
View File

@ -0,0 +1,11 @@
python -m pip install git+https://github.com/d-popov/aider.git
export GROQ_API_KEY=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
aider --models groq/
aider --model groq/llama3-70b-8192 --no-auto-commits
#@ OLLAMA
export OPENAI_API_BASE=https://ollama.d-popov.com
aider --openai-api-base https://ollama.d-popov.com --openai-api-key ol-ddddd --models openai/

49
_doc/oi-notes.md Normal file
View File

@ -0,0 +1,49 @@
interpreter --api_base http://192.168.0.11:11434/v1/
interpreter --model "gpt-3.5-turbo" # mistral
interpreter --model "mistral" --api_base http://192.168.0.11:11434/v1/
Mac/Linux: 'export OPENAI_API_KEY=your-key-here',
Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.
interpreter --local
interpreter --api_base http://192.168.0.11:11434/v1 --api_key "" --model openai/local
interpreter --api_base http://192.168.0.137:1234/v1 --api_key "" --model openai/local
192.168.0.137
GROQ_API_KEY
# ################################ GROQ ########################## working
export OPENAI_API_KEY=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
interpreter -y --api_base https://api.groq.com/openai/v1 --model llama2-70b-4096 ## mixtral-8x7b-32768 # gemma-7b-it # llama2-70b-4096
##
# Load a model, start the server, and run this example in your terminal
# Choose between streaming and non-streaming mode by setting the "stream" field
curl http://192.168.0.11:11434/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"messages": [
{ "role": "system", "content": "Always answer in rhymes." },
{ "role": "user", "content": "Introduce yourself." }
],
"temperature": 0.7,
"max_tokens": -1,
"stream": false
}'
curl http://192.168.0.137:1234/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"messages": [
{ "role": "system", "content": "Always answer in rhymes." },
{ "role": "user", "content": "Introduce yourself." }
],
"temperature": 0.7,
"max_tokens": -1,
"stream": false
}'

34
_doc/scripts/aider.sh Normal file
View File

@ -0,0 +1,34 @@
#!/bin/bash
# python -m pip install git+https://github.com/paul-gauthier/aider.git
source ~/miniconda3/etc/profile.d/conda.sh # Adjust the path as per your Conda installation
conda activate aider
export OPENAI_API_KEY=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
# aider --no-auto-commits
OPENAI_API_BASE=https://api.deepseek.com/v1
OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a
AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat
aider --openai-api-base https://api.deepseek.com/v1 --openai-api-key sk-99df7736351f4536bd72cd64a416318a --model deepseek-coder
aider --openai-api-base 'https://api.groq.com/openai/v1' --openai-api-key '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' --model 'llama2-70b-4096'
usage: aider [-h] [--openai-api-key OPENAI_API_KEY] [--model MODEL] [--skip-model-availability-check SKIP_MODEL_AVAILABILITY_CHECK] [--4] [--4turbo] [--35turbo] [--voice-language VOICE_LANGUAGE]
[--openai-api-base OPENAI_API_BASE] [--openai-api-type OPENAI_API_TYPE] [--openai-api-version OPENAI_API_VERSION] [--openai-api-deployment-id OPENAI_API_DEPLOYMENT_ID]
[--openai-organization-id OPENAI_ORGANIZATION_ID] [--openrouter] [--edit-format EDIT_FORMAT] [--map-tokens MAP_TOKENS] [--input-history-file INPUT_HISTORY_FILE]
[--chat-history-file CHAT_HISTORY_FILE] [--dark-mode] [--light-mode] [--pretty | --no-pretty] [--stream | --no-stream] [--user-input-color USER_INPUT_COLOR]
[--tool-output-color TOOL_OUTPUT_COLOR] [--tool-error-color TOOL_ERROR_COLOR] [--assistant-output-color ASSISTANT_OUTPUT_COLOR] [--code-theme CODE_THEME] [--show-diffs]
[--git | --no-git] [--gitignore | --no-gitignore] [--aiderignore AIDERIGNORE] [--auto-commits | --no-auto-commits] [--dirty-commits | --no-dirty-commits] [--dry-run | --no-dry-run]
[--commit] [--version] [--check-update] [--skip-check-update] [--apply FILE] [--yes] [-v] [--show-repo-map] [--message COMMAND] [--message-file MESSAGE_FILE] [--encoding ENCODING]
[-c CONFIG_FILE]
export OPENAI_API_KEY=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
interpreter -y --api_base https://api.groq.com/openai/v1 --model gemma-7b-it ## mixtral-8x7b-32768 # gemma-7b-it # llama2-70b-4096
# Setup OpenRouter access
export OPENAI_API_KEY=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
export OPENAI_API_BASE=https://api.groq.com/openai/v1
# For example, run aider with Claude 3 Opus using the diff editing format
aider --model llama2-70b-4096 --edit-format diff

View File

@ -1,27 +0,0 @@
Key features & principles:
- modal/plug & play design
- Biomimicing based
- self inferencing loop
- Graph->LLM->Graph based logic (Self reflect)
- attention (Short term memory)
- generalized & contextuaized memory schema (memory is strongly context dependent and temporal)
LLM module
Graph module
Short term memory module
mid-term memory (history on the toppic)
graph powered long term memory with embedding storage for skills & AII ( interface on some of the layers)
separate text IOs"
- multi agent communication module/console/
- internal state/context/mood/STM
- actions output
GRAPH schema
idea
- is child of
Q: Brainstorm neo4j schema for biomimicing memory storage as neo4j graph database. It should be similar to the way humans store, retrieve and generalize knowledge
Memory model:

3
agent-a/.env Normal file
View File

@ -0,0 +1,3 @@
NEO4J_URI="bolt://192.168.0.10:7687"
NEO4J_USER="neo4j"
NEO4J_PASSWORD="lucas-bicycle-powder-stretch-ford-9492"

0
agent-a/.gitignore vendored Normal file
View File

0
agent-a/README.md Normal file
View File

0
agent-a/requirements.txt Normal file
View File

31
agent-a/setup.sh Normal file
View File

@ -0,0 +1,31 @@
#!/bin/bash
# Function to create directories
create_directories() {
mkdir -p ./{data/{raw,processed},notebooks,src/{agent,llm,tools,utils},tests/{agent,llm,tools,utils}}
}
# Function to create files
create_files() {
touch ./{.gitignore,requirements.txt,README.md}
touch ./src/{agent,llm,tools,utils}/__init__.py
touch ./tests/{agent,llm,tools,utils}/test_{agent,llm,tool1,tool2,utils}.py
}
# Function to initialize Git repository
initialize_git() {
echo "Do you want to initialize a Git repository? (y/n)"
read answer
if [ "$answer" == "y" ]; then
git init
echo "Git repository initialized."
cd ..
fi
}
# Main script execution
create_directories
create_files
#initialize_git
echo "Project setup complete."

View File

View File

@ -0,0 +1,47 @@
# src/agent/agent.py
class Agent:
def __init__(self):
self.tools = [] # Initialize an empty list to store tools
def add_tool(self, tool):
# Add a tool to the agent's toolbox
self.tools.append(tool)
def remove_tool(self, tool):
# Remove a tool from the agent's toolbox
if tool in self.tools:
self.tools.remove(tool)
def use_tool(self, tool, *args, **kwargs):
# Use a tool with the agent
if tool in self.tools:
return tool.use(*args, **kwargs)
else:
return "Tool not found in agent's toolbox."
def explore(self):
# Implement the logic for exploring new ideas
pass
def organize(self):
# Implement the logic for organizing knowledge
pass
def improve(self):
# Implement the logic for improving reasoning
pass
def rest(self):
# Implement the logic for resting and updating knowledge
pass
# Example usage
if __name__ == "__main__":
agent = Agent()
# Add tools to the agent's toolbox
# agent.add_tool(some_tool_instance)
# Use a tool
# result = agent.use_tool(some_tool_instance, some_arguments)
# print(result)

View File

View File

View File

1
agent-arti/readme.md Normal file
View File

@ -0,0 +1 @@
ToDo: copy arti code here

1
agent-b/main.py Normal file
View File

@ -0,0 +1 @@
Hello, world!

View File

@ -30,4 +30,7 @@ in python, create an app that will search for a news about a specific topic on t
# devika
create new homepage for memecoin. look at https://donk.meme/ for inspiration about functionality. but the design should be novel.

92
agent-pyter/ccxt.py Normal file
View File

@ -0,0 +1,92 @@
# # https://github.com/ccxt/ccxt/tree/master/examples/py
# # ! pip install ccxt
# # //cjs
# # var ccxt = require ('ccxt')
# # console.log (ccxt.exchanges) // print all available exchanges
# # py
# import ccxt
# #print(ccxt.exchanges)
# #import ccxt.async_support as ccxt
# # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# # import ccxt
# # hitbtc = ccxt.hitbtc({'verbose': True})
# # bitmex = ccxt.bitmex()
# # huobipro = ccxt.huobipro()
# # exmo = ccxt.exmo({
# # 'apiKey': 'YOUR_PUBLIC_API_KEY',
# # 'secret': 'YOUR_SECRET_PRIVATE_KEY',
# # })
# # kraken = ccxt.kraken({
# # 'apiKey': 'YOUR_PUBLIC_API_KEY',
# # 'secret': 'YOUR_SECRET_PRIVATE_KEY',
# # })
# # exchange_id = 'binance'
# # exchange_class = getattr(ccxt, exchange_id)
# # exchange = exchange_class({
# # 'apiKey': 'YOUR_API_KEY',
# # 'secret': 'YOUR_SECRET',
# # })
# # hitbtc_markets = hitbtc.load_markets()
# # print(hitbtc.id, hitbtc_markets)
# # print(bitmex.id, bitmex.load_markets())
# # print(huobipro.id, huobipro.load_markets())
# # print(hitbtc.fetch_order_book(hitbtc.symbols[0]))
# # print(bitmex.fetch_ticker('BTC/USD'))
# # print(huobipro.fetch_trades('LTC/USDT'))
# # print(exmo.fetch_balance())
# # # sell one ฿ for market price and receive $ right now
# # print(exmo.id, exmo.create_market_sell_order('BTC/USD', 1))
# # # limit buy BTC/EUR, you pay €2500 and receive ฿1 when the order is closed
# # print(exmo.id, exmo.create_limit_buy_order('BTC/EUR', 1, 2500.00))
# # # pass/redefine custom exchange-specific order params: type, amount, price, flags, etc...
# # kraken.create_market_buy_order('BTC/USD', 1, {'trading_agreement': 'agree'})
# # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# # # -*- coding: utf-8 -*-
# # import os
# # import sys
# # from pprint import pprint
# # root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# # sys.path.append(root + '/python')
# import ccxt # noqa: E402
# # -----------------------------------------------------------------------------
# # print('CCXT Version:', ccxt.__version__)
# # -----------------------------------------------------------------------------
# exchange = ccxt.coinbase({
# 'apiKey': 'tk2ShLJCmByejn78',
# 'secret': 'UcJfI5HzQmkEjclCeHFSfG8hnNYxaESv',
# # 'verbose': True, # for debug output
# })
# symbol = 'BTC/USDT'
# timeframe = '1m'
# since = None
# limit = None # not used by coinbase
# try:
# # Max 300 Candles
# candles = exchange.fetch_ohlcv(symbol, timeframe, since, limit)
# pprint(candles)
# except Exception as err:
# print(err)

102
agent-pyter/dexbot.py Normal file
View File

@ -0,0 +1,102 @@
# source /path/to/virtualenv/bin/activate # On Unix or MacOS
# source /config/miniconda3/envs/py/bin/activate
# pip install requests beautifulsoup4 schedule selenium
# wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
# sudo dpkg -i google-chrome-stable_current_amd64.deb
# apt install libnss3 libxss1
import requests
from bs4 import BeautifulSoup
# # import schedule
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import time
# pip install requests beautifulsoup4 schedule
# Initialize WebDriver
service = Service(ChromeDriverManager().install())
driver = webdriver.Chrome(service=service)
def check_pairs_sel():
try:
# Open the page
driver.get("https://www.dextools.io/app/en/bnb/pool-explorer")
time.sleep(10) # Wait for JavaScript to execute
# Extract the page source
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
# Assuming the pairs are listed in <div> tags with a class that includes the word 'pair'
pairs = soup.find_all('div', class_=lambda x: x and 'pair' in x)
print("Pairs found:", [pair.text for pair in pairs])
finally:
driver.quit()
# Initialize WebDriver
service = Service(ChromeDriverManager().install())
driver = webdriver.Chrome(service=service)
def check_pairs_sel():
try:
# Open the page
driver.get("https://www.dextools.io/app/en/bnb/pool-explorer")
time.sleep(10) # Wait for JavaScript to execute
# Extract the page source
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
# Assuming the pairs are listed in <div> tags with a class that includes the word 'pair'
pairs = soup.find_all('div', class_=lambda x: x and 'pair' in x)
print("Pairs found:", [pair.text for pair in pairs])
finally:
driver.quit()
def check_new_pairs():
# log the running time
print("Checking for new pairs...")
url = "https://www.dextools.io/"
url = "https://www.dextools.io/app/en/bnb/pool-explorer"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# make html dump to ./dextools-last.html
with open('./dextools-last.html', 'w') as f:
f.write(soup.prettify())
# Assuming the pairs are listed in <div> tags with a class that includes the word 'pair'
pairs = soup.find_all('div', class_=lambda x: x and 'pair' in x)
current_pairs = {pair.text for pair in pairs}
if not hasattr(check_new_pairs, "last_pairs"):
check_new_pairs.last_pairs = current_pairs
new_pairs = current_pairs - check_new_pairs.last_pairs
if new_pairs:
print("New Pairs Found:", new_pairs)
# Here you can add the code to trigger any event (e.g., send an email, a notification, etc.)
# Update the last checked pairs
check_new_pairs.last_pairs = current_pairs
def main():
#schedule.every(10).seconds.do(check_new_pairs)
while True:
# schedule.run_pending()
check_pairs_sel()
time.sleep(10000)
if __name__ == "__main__":
main()

1
agent-pyter/lag-llama Submodule

@ -0,0 +1 @@
Subproject commit 948665530fcda634df9d7df0bee5e19b87785eb9

374
agent-pyter/lag-llama.ipynb Normal file
View File

@ -0,0 +1,374 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 15,
"id": "04e8bf3c-845f-49bb-9e9c-992d6b8948f0",
"metadata": {},
"outputs": [],
"source": [
"# https://colab.research.google.com/drive/1XxrLW9VGPlZDw3efTvUi0hQimgJOwQG6?usp=sharing#scrollTo=gyH5Xq9eSvzq"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "37f96736-8654-4852-a144-fd75df22aaf7",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Cloning into 'lag-llama'...\n",
"remote: Enumerating objects: 124, done.\u001b[K\n",
"remote: Counting objects: 100% (69/69), done.\u001b[K\n",
"remote: Compressing objects: 100% (43/43), done.\u001b[K\n",
"remote: Total 124 (delta 39), reused 47 (delta 26), pack-reused 55\u001b[K\n",
"Receiving objects: 100% (124/124), 190.17 KiB | 2.29 MiB/s, done.\n",
"Resolving deltas: 100% (49/49), done.\n"
]
}
],
"source": [
"!git clone https://github.com/time-series-foundation-models/lag-llama/"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "f5fac8fa-5ac8-4330-97e0-8a2f4237ba0f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/workspace/repos/git.d-popov.com/ai-kevin/agent-pyter/lag-llama/lag-llama\n"
]
}
],
"source": [
"cd ./lag-llama"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "968625c9-00fd-4037-b97c-33dfc4758491",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: gluonts[torch] in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 1)) (0.14.4)\n",
"Requirement already satisfied: numpy==1.23.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 2)) (1.23.5)\n",
"Requirement already satisfied: torch>=2.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 3)) (2.2.1)\n",
"Requirement already satisfied: wandb in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 4)) (0.16.4)\n",
"Requirement already satisfied: scipy in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 5)) (1.12.0)\n",
"Requirement already satisfied: pandas==2.1.4 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 6)) (2.1.4)\n",
"Requirement already satisfied: huggingface_hub[cli] in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 7)) (0.21.3)\n",
"Requirement already satisfied: python-dateutil>=2.8.2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pandas==2.1.4->-r requirements.txt (line 6)) (2.9.0)\n",
"Requirement already satisfied: pytz>=2020.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pandas==2.1.4->-r requirements.txt (line 6)) (2024.1)\n",
"Requirement already satisfied: tzdata>=2022.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pandas==2.1.4->-r requirements.txt (line 6)) (2024.1)\n",
"Requirement already satisfied: pydantic<3,>=1.7 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (2.6.3)\n",
"Requirement already satisfied: tqdm~=4.23 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (4.66.2)\n",
"Requirement already satisfied: toolz~=0.10 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (0.12.1)\n",
"Requirement already satisfied: typing-extensions~=4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (4.8.0)\n",
"Requirement already satisfied: lightning<2.2,>=2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (2.1.4)\n",
"Requirement already satisfied: pytorch-lightning<2.2,>=2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (2.1.4)\n",
"Requirement already satisfied: filelock in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (3.13.1)\n",
"Requirement already satisfied: sympy in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (1.12)\n",
"Requirement already satisfied: networkx in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (3.2.1)\n",
"Requirement already satisfied: jinja2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (3.1.2)\n",
"Requirement already satisfied: fsspec in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (2024.2.0)\n",
"Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n",
"Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n",
"Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n",
"Requirement already satisfied: nvidia-cudnn-cu12==8.9.2.26 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (8.9.2.26)\n",
"Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.3.1)\n",
"Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (11.0.2.54)\n",
"Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (10.3.2.106)\n",
"Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (11.4.5.107)\n",
"Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.0.106)\n",
"Requirement already satisfied: nvidia-nccl-cu12==2.19.3 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (2.19.3)\n",
"Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n",
"Requirement already satisfied: triton==2.2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (2.2.0)\n",
"Requirement already satisfied: nvidia-nvjitlink-cu12 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.0.0->-r requirements.txt (line 3)) (12.4.99)\n",
"Requirement already satisfied: Click!=8.0.0,>=7.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (8.1.7)\n",
"Requirement already satisfied: GitPython!=3.1.29,>=1.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (3.1.42)\n",
"Requirement already satisfied: requests<3,>=2.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (2.31.0)\n",
"Requirement already satisfied: psutil>=5.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (5.9.0)\n",
"Requirement already satisfied: sentry-sdk>=1.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (1.40.6)\n",
"Requirement already satisfied: docker-pycreds>=0.4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (0.4.0)\n",
"Requirement already satisfied: PyYAML in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (6.0.1)\n",
"Requirement already satisfied: setproctitle in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (1.3.3)\n",
"Requirement already satisfied: setuptools in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (68.0.0)\n",
"Requirement already satisfied: appdirs>=1.4.3 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (1.4.4)\n",
"Requirement already satisfied: protobuf!=4.21.0,<5,>=3.19.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (4.25.3)\n",
"Requirement already satisfied: packaging>=20.9 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from huggingface_hub[cli]->-r requirements.txt (line 7)) (23.2)\n",
"Requirement already satisfied: InquirerPy==0.3.4 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from huggingface_hub[cli]->-r requirements.txt (line 7)) (0.3.4)\n",
"Requirement already satisfied: pfzy<0.4.0,>=0.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from InquirerPy==0.3.4->huggingface_hub[cli]->-r requirements.txt (line 7)) (0.3.4)\n",
"Requirement already satisfied: prompt-toolkit<4.0.0,>=3.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from InquirerPy==0.3.4->huggingface_hub[cli]->-r requirements.txt (line 7)) (3.0.42)\n",
"Requirement already satisfied: six>=1.4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from docker-pycreds>=0.4.0->wandb->-r requirements.txt (line 4)) (1.16.0)\n",
"Requirement already satisfied: gitdb<5,>=4.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from GitPython!=3.1.29,>=1.0.0->wandb->-r requirements.txt (line 4)) (4.0.11)\n",
"Requirement already satisfied: lightning-utilities<2.0,>=0.8.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from lightning<2.2,>=2.0->gluonts[torch]->-r requirements.txt (line 1)) (0.10.1)\n",
"Requirement already satisfied: torchmetrics<3.0,>=0.7.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from lightning<2.2,>=2.0->gluonts[torch]->-r requirements.txt (line 1)) (1.3.1)\n",
"Requirement already satisfied: annotated-types>=0.4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pydantic<3,>=1.7->gluonts[torch]->-r requirements.txt (line 1)) (0.6.0)\n",
"Requirement already satisfied: pydantic-core==2.16.3 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pydantic<3,>=1.7->gluonts[torch]->-r requirements.txt (line 1)) (2.16.3)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (3.4)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (2023.7.22)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from jinja2->torch>=2.0.0->-r requirements.txt (line 3)) (2.1.3)\n",
"Requirement already satisfied: mpmath>=0.19 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from sympy->torch>=2.0.0->-r requirements.txt (line 3)) (1.3.0)\n",
"Requirement already satisfied: aiohttp!=4.0.0a0,!=4.0.0a1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (3.9.3)\n",
"Requirement already satisfied: smmap<6,>=3.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gitdb<5,>=4.0.1->GitPython!=3.1.29,>=1.0.0->wandb->-r requirements.txt (line 4)) (5.0.1)\n",
"Requirement already satisfied: wcwidth in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from prompt-toolkit<4.0.0,>=3.0.1->InquirerPy==0.3.4->huggingface_hub[cli]->-r requirements.txt (line 7)) (0.2.13)\n",
"Requirement already satisfied: aiosignal>=1.1.2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (1.3.1)\n",
"Requirement already satisfied: attrs>=17.3.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (23.2.0)\n",
"Requirement already satisfied: frozenlist>=1.1.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (1.4.1)\n",
"Requirement already satisfied: multidict<7.0,>=4.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (6.0.5)\n",
"Requirement already satisfied: yarl<2.0,>=1.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (1.9.4)\n"
]
}
],
"source": [
"!pip install -r requirements.txt #--quiet # this could take some time # ignore the errors displayed by colab"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "8f10c802-4ffa-40f7-bd62-14ff13fae03c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: requests in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (2.31.0)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (3.4)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (2023.7.22)\n",
"Requirement already satisfied: matplotlib in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (3.8.3)\n",
"Requirement already satisfied: contourpy>=1.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (1.2.0)\n",
"Requirement already satisfied: cycler>=0.10 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (0.12.1)\n",
"Requirement already satisfied: fonttools>=4.22.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (4.49.0)\n",
"Requirement already satisfied: kiwisolver>=1.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (1.4.5)\n",
"Requirement already satisfied: numpy<2,>=1.21 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (1.23.5)\n",
"Requirement already satisfied: packaging>=20.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (23.2)\n",
"Requirement already satisfied: pillow>=8 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (10.0.1)\n",
"Requirement already satisfied: pyparsing>=2.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (3.1.2)\n",
"Requirement already satisfied: python-dateutil>=2.7 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (2.9.0)\n",
"Requirement already satisfied: six>=1.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n"
]
}
],
"source": [
"!pip install --upgrade requests\n",
"!pip install matplotlib\n"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "0a64aa15-1477-44bc-b772-a9342a5640c8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Consider using `hf_transfer` for faster downloads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.\n",
"./lag-llama.ckpt\n"
]
}
],
"source": [
"!huggingface-cli download time-series-foundation-models/Lag-Llama lag-llama.ckpt --local-dir ./"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "a328c513-558f-45ca-b900-b669c4ef33ed",
"metadata": {},
"outputs": [],
"source": [
"from itertools import islice\n",
"\n",
"from matplotlib import pyplot as plt\n",
"import matplotlib.dates as mdates\n",
"\n",
"import torch\n",
"from gluonts.evaluation import make_evaluation_predictions, Evaluator\n",
"from gluonts.dataset.repository.datasets import get_dataset\n",
"\n",
"from gluonts.dataset.pandas import PandasDataset\n",
"import pandas as pd\n",
"\n",
"from lag_llama.gluon.estimator import LagLlamaEstimator"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "f098efb9-490c-46b7-9ea3-bea1f2871fa5",
"metadata": {},
"outputs": [],
"source": [
"def get_lag_llama_predictions(dataset, prediction_length, num_samples=100):\n",
" ckpt = torch.load(\"lag-llama.ckpt\", map_location=torch.device('cuda:0')) # Uses GPU since in this Colab we use a GPU.\n",
" estimator_args = ckpt[\"hyper_parameters\"][\"model_kwargs\"]\n",
"\n",
" estimator = LagLlamaEstimator(\n",
" ckpt_path=\"lag-llama.ckpt\",\n",
" prediction_length=prediction_length,\n",
" context_length=32, # Should not be changed; this is what the released Lag-Llama model was trained with\n",
"\n",
" # estimator args\n",
" input_size=estimator_args[\"input_size\"],\n",
" n_layer=estimator_args[\"n_layer\"],\n",
" n_embd_per_head=estimator_args[\"n_embd_per_head\"],\n",
" n_head=estimator_args[\"n_head\"],\n",
" scaling=estimator_args[\"scaling\"],\n",
" time_feat=estimator_args[\"time_feat\"],\n",
"\n",
" batch_size=1,\n",
" num_parallel_samples=100\n",
" )\n",
"\n",
" lightning_module = estimator.create_lightning_module()\n",
" transformation = estimator.create_transformation()\n",
" predictor = estimator.create_predictor(transformation, lightning_module)\n",
"\n",
" forecast_it, ts_it = make_evaluation_predictions(\n",
" dataset=dataset,\n",
" predictor=predictor,\n",
" num_samples=num_samples\n",
" )\n",
" forecasts = list(forecast_it)\n",
" tss = list(ts_it)\n",
"\n",
" return forecasts, tss"
]
},
{
"cell_type": "raw",
"id": "e7e6dd60-7c0c-483f-86d4-b2ba7c4104d3",
"metadata": {},
"source": [
"import pandas as pd\n",
"from gluonts.dataset.pandas import PandasDataset\n",
"\n",
"url = (\n",
" \"https://gist.githubusercontent.com/rsnirwan/a8b424085c9f44ef2598da74ce43e7a3/raw/b6fdef21fe1f654787fa0493846c546b7f9c4df2/ts_long.csv\"\n",
")\n",
"df = pd.read_csv(url, index_col=0, parse_dates=True)\n",
"# Set numerical columns as float32\n",
"for col in df.columns:\n",
" # Check if column is not of string type\n",
" if df[col].dtype != 'object' and pd.api.types.is_string_dtype(df[col]) == False:\n",
" df[col] = df[col].astype('float32')\n",
"\n",
"# Create the Pandas\n",
"dataset = PandasDataset.from_long_dataframe(df, target=\"target\", item_id=\"item_id\")\n",
"\n",
"backtest_dataset = dataset\n",
"prediction_length = 24 # Define your prediction length. We use 24 here since the data is of hourly frequency\n",
"num_samples = 100 # number of samples sampled from the probability distribution for each timestep\n",
"forecasts, tss = get_lag_llama_predictions(backtest_dataset, prediction_length, num_samples)\n",
"len(forecasts)\n",
"forecasts[0].samples.shape\n",
"plt.figure(figsize=(20, 15))\n",
"date_formater = mdates.DateFormatter('%b, %d')\n",
"plt.rcParams.update({'font.size': 15})\n",
"\n",
"# Iterate through the first 9 series, and plot the predicted samples\n",
"for idx, (forecast, ts) in islice(enumerate(zip(forecasts, tss)), 9):\n",
" ax = plt.subplot(3, 3, idx+1)\n",
"\n",
" plt.plot(ts[-4 * prediction_length:].to_timestamp(), label=\"target\", )\n",
" forecast.plot( color='g')\n",
" plt.xticks(rotation=60)\n",
" ax.xaxis.set_major_formatter(date_formater)\n",
" ax.set_title(forecast.item_id)\n",
"\n",
"plt.gcf().tight_layout()\n",
"plt.legend()\n",
"plt.show()"
]
},
{
"cell_type": "raw",
"id": "74dc9a03-435e-40a5-bbda-4ddac9f6cfb9",
"metadata": {},
"source": [
"# Set numerical columns as float32\n",
"for col in df.columns:\n",
" # Check if column is not of string type\n",
" if df[col].dtype != 'object' and pd.api.types.is_string_dtype(df[col]) == False:\n",
" df[col] = df[col].astype('float32')\n",
"\n",
"# Create the Pandas\n",
"dataset = PandasDataset.from_long_dataframe(df, target=\"target\", item_id=\"item_id\")\n",
"\n",
"backtest_dataset = dataset\n",
"prediction_length = 24 # Define your prediction length. We use 24 here since the data is of hourly frequency\n",
"num_samples = 100 # number of samples sampled from the probability distribution for each timestep\n",
"forecasts, tss = get_lag_llama_predictions(backtest_dataset, prediction_length, num_samples)\n",
"len(forecasts)\n",
"forecasts[0].samples.shape\n",
"plt.figure(figsize=(20, 15))\n",
"date_formater = mdates.DateFormatter('%b, %d')\n",
"plt.rcParams.update({'font.size': 15})\n",
"\n",
"# Iterate through the first 9 series, and plot the predicted samples\n",
"for idx, (forecast, ts) in islice(enumerate(zip(forecasts, tss)), 9):\n",
" ax = plt.subplot(3, 3, idx+1)\n",
"\n",
" plt.plot(ts[-4 * prediction_length:].to_timestamp(), label=\"target\", )\n",
" forecast.plot( color='g')\n",
" plt.xticks(rotation=60)\n",
" ax.xaxis.set_major_formatter(date_formater)\n",
" ax.set_title(forecast.item_id)\n",
"\n",
"plt.gcf().tight_layout()\n",
"plt.legend()\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b8be08b6-0cfd-45b5-ac23-142e9f388049",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

12
agent-pyter/notes.md Normal file
View File

@ -0,0 +1,12 @@
https://github.com/ccxt/ccxt/tree/master/examples/py/
playwright._impl._errors.TargetClosedError: Target page, context or browser has been closed
Browser logs:
<launching> /config/.cache/ms-playwright/chromium-1105/chrome-linux/chrome --disable-field-trial-config --disable-background-networking --enable-features=NetworkService,NetworkServiceInProcess --disable-background-timer-throttling --disable-backgrounding-occluded-windows --disable-back-forward-cache --disable-breakpad --disable-client-side-phishing-detection --disable-component-extensions-with-background-pages --disable-component-update --no-default-browser-check --disable-default-apps --disable-dev-shm-usage --disable-extensions --disable-features=ImprovedCookieControls,LazyFrameLoading,GlobalMediaControls,DestroyProfileOnBrowserClose,MediaRouter,DialMediaRouteProvider,AcceptCHFrame,AutoExpandDetailsElement,CertificateTransparencyComponentUpdater,AvoidUnnecessaryBeforeUnloadCheckSync,Translate,HttpsUpgrades,PaintHolding --allow-pre-commit-input --disable-hang-monitor --disable-ipc-flooding-protection --disable-popup-blocking --disable-prompt-on-repost --disable-renderer-backgrounding --force-color-profile=srgb --metrics-recording-only --no-first-run --enable-automation --password-store=basic --use-mock-keychain --no-service-autorun --export-tagged-pdf --disable-search-engine-choice-screen --headless --hide-scrollbars --mute-audio --blink-settings=primaryHoverType=2,availableHoverTypes=2,primaryPointerType=4,availablePointerTypes=4 --no-sandbox --user-data-dir=/tmp/playwright_chromiumdev_profile-kMyQDr --remote-debugging-pipe --no-startup-window
<launched> pid=1019347
[pid=1019347][err] /config/.cache/ms-playwright/chromium-1105/chrome-linux/chrome: error while loading shared libraries: libnss3.so: cannot open shared object file: No such file or directory
[pid=1019347] <process did exit: exitCode=127, signal=null>
[pid=1019347] starting temporary directories cleanup

1
agent-pyter/prompts.txt Normal file
View File

@ -0,0 +1 @@
create python app that will monitor for new pairs on https://dextools.io and trigger event immediately when there is new token

15
config.json Normal file
View File

@ -0,0 +1,15 @@
{
"tabAutocompleteModel": {
"title": "Tab Autocomplete Model",
"provider": "ollama",
"model": "stable-code:code",
"apiBase": "https://ollama.d-popov.com"
}
}
// original: "tabAutocompleteModel": {
// "title": "Starcoder 3b",
// "provider": "ollama",
// "model": "starcoder-3b"
// },

25
crypto/sol/app.py Normal file
View File

@ -0,0 +1,25 @@
from flask import Flask, render_template, request, jsonify
from solana.rpc.api import Client
app = Flask(__name__)
solana_client = Client("https://api.mainnet-beta.solana.com")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/tokens', methods=['GET'])
def get_tokens():
# Here you would add logic to fetch new tokens or token data
return jsonify(['SOL', 'USDC']) # Example token list
@app.route('/swap', methods=['POST'])
def swap_tokens():
data = request.json
token_name = data['token_name']
amount = data['amount']
# Here you would add logic to perform the token swap
return jsonify({'status': 'success', 'message': f'Swapped {amount} of {token_name}'})
if __name__ == '__main__':
app.run(debug=True)

4
crypto/sol/r.txt Normal file
View File

@ -0,0 +1,4 @@
flask
solana
idna
httpx

28
crypto/sol/static/app.js Normal file
View File

@ -0,0 +1,28 @@
document.getElementById('connectWallet').addEventListener('click', async () => {
try {
const { solana } is window;
if (solana && solana.isPhantom) {
const response = await solana.connect({ onlyIfTrusted: true });
console.log('Connected with Public Key:', response.publicKey.toString());
} else {
alert('Phantom wallet not found. Please install it.');
}
} catch (error) {
console.error(error);
alert('Connection to Phantom Wallet failed');
}
});
document.getElementById('swapToken').addEventListener('click', () => {
const tokenName = document.getElementById('tokenName').value;
const amount = document.getElementById('amount').value;
fetch('/swap', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({token_name: tokenName, amount: amount})
})
.then(response => response.json())
.then(data => alert(data.message));
});

View File

@ -0,0 +1,21 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Token Swapper</title>
</head>
<body>
<h1>Token Swapper</h1>
<div>
<button id="connectWallet">Connect Phantom Wallet</button>
</div>
<div>
<input type="text" id="tokenName" placeholder="Enter Token Name">
<input type="number" id="amount" placeholder="Enter Amount">
<button id="swapToken">Swap Token</button>
</div>
<script src="https://cdn.jsdelivr.net/npm/@solana/web3.js"></script>
<script src="app.js"></script>
</body>
</html>

View File

@ -9,7 +9,7 @@ services:
dockerfile: ./Dockerfile dockerfile: ./Dockerfile
environment: environment:
NODE_ENV: production NODE_ENV: production
TTS_BACKEND_URL: http://192.168.0.10:9009/asr # TTS_BACKEND_URL: http://192.168.0.10:9009/asr
WS_URL: ws://192.168.0.10:28081 WS_URL: ws://192.168.0.10:28081
SERVER_PORT_WS: 8081 SERVER_PORT_WS: 8081
SERVER_PORT_HTTP: 8080 SERVER_PORT_HTTP: 8080

View File

@ -11,6 +11,8 @@ class Neo4jConnection:
# Create the schema # Create the schema
self.create_schema() self.create_schema()
self.test_retrieval()
# Close the connection # Close the connection
self.close() self.close()
@ -21,11 +23,13 @@ class Neo4jConnection:
with self.driver.session() as session: with self.driver.session() as session:
session.write_transaction(self._create_constraints_and_indexes) session.write_transaction(self._create_constraints_and_indexes)
def test_retrieval(tx):
#run MATCH (n) RETURN n LIMIT 25
result = tx.run("MATCH (n) RETURN n LIMIT 25;")
@staticmethod @staticmethod
def _create_constraints_and_indexes(tx): def _create_constraints_and_indexes(tx):
# Constraints and indexes for Person
tx.run("CREATE CONSTRAINT ON (p:Person) ASSERT p.person_id IS UNIQUE;")
# Constraints and indexes for Memory # Constraints and indexes for Memory
tx.run("CREATE CONSTRAINT ON (m:Memory) ASSERT m.memory_id IS UNIQUE;") tx.run("CREATE CONSTRAINT ON (m:Memory) ASSERT m.memory_id IS UNIQUE;")
tx.run("CREATE INDEX ON :Memory(content);") tx.run("CREATE INDEX ON :Memory(content);")

View File

@ -1,10 +1,3 @@
class Person:
def __init__(self, person_id, name, age):
self.person_id = person_id
self.name = name
self.age = age
class Memory: class Memory:
def __init__(self, memory_id, content, timestamp, importance, relevance, associated_tags): def __init__(self, memory_id, content, timestamp, importance, relevance, associated_tags):
self.memory_id = memory_id self.memory_id = memory_id

270
package-lock.json generated
View File

@ -8,15 +8,60 @@
"name": "kevin-ai", "name": "kevin-ai",
"version": "1.0.0", "version": "1.0.0",
"dependencies": { "dependencies": {
"axios": "^1.7.2",
"body-parser": "^1.20.2", "body-parser": "^1.20.2",
"dotenv": "^16.0.3", "dotenv": "^16.4.5",
"express": "^4.18.2", "express": "^4.18.2",
"git": "^0.1.5", "git": "^0.1.5",
"groq-sdk": "^0.4.0",
"node-persist": "^3.1.3", "node-persist": "^3.1.3",
"ollama": "^0.5.1",
"openai": "^4.50.0",
"request": "^2.88.2", "request": "^2.88.2",
"ws": "^8.12.1" "ws": "^8.12.1"
} }
}, },
"node_modules/@types/node": {
"version": "18.19.34",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.34.tgz",
"integrity": "sha512-eXF4pfBNV5DAMKGbI02NnDtWrQ40hAN558/2vvS4gMpMIxaf6JmD7YjnZbq0Q9TDSSkKBamime8ewRoomHdt4g==",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@types/node-fetch": {
"version": "2.6.11",
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz",
"integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==",
"dependencies": {
"@types/node": "*",
"form-data": "^4.0.0"
}
},
"node_modules/@types/node-fetch/node_modules/form-data": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/abort-controller": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
"dependencies": {
"event-target-shim": "^5.0.0"
},
"engines": {
"node": ">=6.5"
}
},
"node_modules/accepts": { "node_modules/accepts": {
"version": "1.3.8", "version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
@ -29,6 +74,17 @@
"node": ">= 0.6" "node": ">= 0.6"
} }
}, },
"node_modules/agentkeepalive": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz",
"integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==",
"dependencies": {
"humanize-ms": "^1.2.1"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/ajv": { "node_modules/ajv": {
"version": "6.12.6", "version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
@ -83,6 +139,29 @@
"resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz", "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz",
"integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==" "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg=="
}, },
"node_modules/axios": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz",
"integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
}
},
"node_modules/axios/node_modules/form-data": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/bcrypt-pbkdf": { "node_modules/bcrypt-pbkdf": {
"version": "1.0.2", "version": "1.0.2",
"resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
@ -246,11 +325,14 @@
} }
}, },
"node_modules/dotenv": { "node_modules/dotenv": {
"version": "16.0.3", "version": "16.4.5",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz",
"integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==", "integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==",
"engines": { "engines": {
"node": ">=12" "node": ">=12"
},
"funding": {
"url": "https://dotenvx.com"
} }
}, },
"node_modules/ecc-jsbn": { "node_modules/ecc-jsbn": {
@ -288,6 +370,14 @@
"node": ">= 0.6" "node": ">= 0.6"
} }
}, },
"node_modules/event-target-shim": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/express": { "node_modules/express": {
"version": "4.18.2", "version": "4.18.2",
"resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz",
@ -406,6 +496,25 @@
"node": ">= 0.8" "node": ">= 0.8"
} }
}, },
"node_modules/follow-redirects": {
"version": "1.15.6",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz",
"integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/RubenVerborgh"
}
],
"engines": {
"node": ">=4.0"
},
"peerDependenciesMeta": {
"debug": {
"optional": true
}
}
},
"node_modules/forever-agent": { "node_modules/forever-agent": {
"version": "0.6.1", "version": "0.6.1",
"resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
@ -427,6 +536,31 @@
"node": ">= 0.12" "node": ">= 0.12"
} }
}, },
"node_modules/form-data-encoder": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="
},
"node_modules/formdata-node": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
"dependencies": {
"node-domexception": "1.0.0",
"web-streams-polyfill": "4.0.0-beta.3"
},
"engines": {
"node": ">= 12.20"
}
},
"node_modules/formdata-node/node_modules/web-streams-polyfill": {
"version": "4.0.0-beta.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
"engines": {
"node": ">= 14"
}
},
"node_modules/forwarded": { "node_modules/forwarded": {
"version": "0.2.0", "version": "0.2.0",
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
@ -485,6 +619,21 @@
"resolved": "https://registry.npmjs.org/mime/-/mime-1.2.9.tgz", "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.9.tgz",
"integrity": "sha512-WiLgbHTIq5AYUvU/Luli4mZ1bUcHpGNHyCsbl+KPMg4zt+XUDpQehWjuBjdLaEvDTinvKj/FgfQt3fPoT7j08g==" "integrity": "sha512-WiLgbHTIq5AYUvU/Luli4mZ1bUcHpGNHyCsbl+KPMg4zt+XUDpQehWjuBjdLaEvDTinvKj/FgfQt3fPoT7j08g=="
}, },
"node_modules/groq-sdk": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/groq-sdk/-/groq-sdk-0.4.0.tgz",
"integrity": "sha512-h79q9sv4hcOBESR05N5eqHlGhAug9H9lr3EIiB+37ysWWekeG+KYQDK2lIIHYCm6O9LzgZzO/VdLdPP298+T0w==",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7",
"web-streams-polyfill": "^3.2.1"
}
},
"node_modules/har-schema": { "node_modules/har-schema": {
"version": "2.0.0", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
@ -557,6 +706,14 @@
"npm": ">=1.3.7" "npm": ">=1.3.7"
} }
}, },
"node_modules/humanize-ms": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
"dependencies": {
"ms": "^2.0.0"
}
},
"node_modules/iconv-lite": { "node_modules/iconv-lite": {
"version": "0.4.24", "version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
@ -689,6 +846,43 @@
"node": ">= 0.6" "node": ">= 0.6"
} }
}, },
"node_modules/node-domexception": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
"integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/jimmywarting"
},
{
"type": "github",
"url": "https://paypal.me/jimmywarting"
}
],
"engines": {
"node": ">=10.5.0"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/node-gyp-build": { "node_modules/node-gyp-build": {
"version": "4.6.0", "version": "4.6.0",
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.6.0.tgz", "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.6.0.tgz",
@ -725,6 +919,14 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/ollama": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.1.tgz",
"integrity": "sha512-mAiCHxdvu63E8EFopz0y82QG7rGfYmKAWgmjG2C7soiRuz/Sj3r/ebvCOp+jasiCubqUPE0ZThKT5LR6wrrPtA==",
"dependencies": {
"whatwg-fetch": "^3.6.20"
}
},
"node_modules/on-finished": { "node_modules/on-finished": {
"version": "2.4.1", "version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
@ -736,6 +938,24 @@
"node": ">= 0.8" "node": ">= 0.8"
} }
}, },
"node_modules/openai": {
"version": "4.50.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-4.50.0.tgz",
"integrity": "sha512-2ADkNIU6Q589oYHr5pn9k7SbUcrBTK9X0rIXrYqwMVSoqOj1yK9/1OO0ExaWsqOOpD7o58UmRjeKlx9gKAcuKQ==",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7",
"web-streams-polyfill": "^3.2.1"
},
"bin": {
"openai": "bin/cli"
}
},
"node_modules/parseurl": { "node_modules/parseurl": {
"version": "1.3.3", "version": "1.3.3",
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
@ -766,6 +986,11 @@
"node": ">= 0.10" "node": ">= 0.10"
} }
}, },
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"node_modules/psl": { "node_modules/psl": {
"version": "1.9.0", "version": "1.9.0",
"resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz",
@ -990,6 +1215,11 @@
"node": ">=0.8" "node": ">=0.8"
} }
}, },
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/tunnel-agent": { "node_modules/tunnel-agent": {
"version": "0.6.0", "version": "0.6.0",
"resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
@ -1018,6 +1248,11 @@
"node": ">= 0.6" "node": ">= 0.6"
} }
}, },
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
},
"node_modules/unpipe": { "node_modules/unpipe": {
"version": "1.0.0", "version": "1.0.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
@ -1086,6 +1321,33 @@
"extsprintf": "^1.2.0" "extsprintf": "^1.2.0"
} }
}, },
"node_modules/web-streams-polyfill": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz",
"integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==",
"engines": {
"node": ">= 8"
}
},
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
},
"node_modules/whatwg-fetch": {
"version": "3.6.20",
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz",
"integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg=="
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/ws": { "node_modules/ws": {
"version": "8.12.1", "version": "8.12.1",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.12.1.tgz", "resolved": "https://registry.npmjs.org/ws/-/ws-8.12.1.tgz",

View File

@ -8,11 +8,15 @@
"start:tele": "python agent-py-bot/agent.py" "start:tele": "python agent-py-bot/agent.py"
}, },
"dependencies": { "dependencies": {
"axios": "^1.7.2",
"body-parser": "^1.20.2", "body-parser": "^1.20.2",
"dotenv": "^16.0.3", "dotenv": "^16.4.5",
"express": "^4.18.2", "express": "^4.18.2",
"git": "^0.1.5", "git": "^0.1.5",
"groq-sdk": "^0.4.0",
"node-persist": "^3.1.3", "node-persist": "^3.1.3",
"ollama": "^0.5.1",
"openai": "^4.50.0",
"request": "^2.88.2", "request": "^2.88.2",
"ws": "^8.12.1" "ws": "^8.12.1"
} }

16
translate-nllb/nllb.py Normal file
View File

@ -0,0 +1,16 @@
# conda activate transformers
# conda install pip
# pip install https://github.com/huggingface/transformers/archive/nllb.zip -q
# pip install transformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M")
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang="tam_Taml", tgt_lang='eng_Latn', max_length = 400)
translator("திஸ் ஐஸ் எ வெரி குட் மாடல் ")
# --------------------------------------------
https://colab.research.google.com/drive/1o9r0QbEQZ1tn4eBVv-wajAtgwi6Lp-ZJ?usp=sharing#scrollTo=q49lJjXd4Jct
https://colab.research.google.com/drive/1QEF0U9AaBeQdjLw7SyRty2ENAD4Qiiil#scrollTo=TFm232e77QRT

22
web/.env Normal file
View File

@ -0,0 +1,22 @@
TTS_BACKEND_URL=https://api.tts.d-popov.com/
WS_URL=ws://localhost:8081
SERVER_PORT_WS=8081
SERVER_PORT_HTTP=3005
# aider
AIDER_MODEL=
AIDER_4=false
#AIDER_35TURBO=
# OPENAI_API_KEY=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
# OPENAI_API_BASE=https://api.deepseek.com/v1
# OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a
# AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat
GROQ_API_KEY=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
aider --model groq/llama3-70b-8192
# List models available from Groq
aider --models groq/

201
web/audio.js Normal file
View File

@ -0,0 +1,201 @@
let selectedDeviceId = "default";
export let serverTime;
export let recordButton;
export let socket;
let audioRecorder;
let audioStream;
let recording = false;
let connectionStatus;
let statusRecording;
let audioContext;
let volumeChecker;
let lastVolumes = new Array(5);
let averageVolume;
let silenceCount = 0;
let isSpeaking = false;
let soundDetected = false;
let speakingCount = 0;
let analyser = null;
let SILENCE_DELAY_MS = 50;
let preDetect_IncludedAudio = 400; //ms
let soundCount_Threshold = 10;
let silenceCount_Threshold = 10;
const volumeHistory = [];
export function setSocket(newSocket) {
socket = newSocket;
}
export function setRecordButton(newRecordButton) {
recordButton = newRecordButton;
recordButton.addEventListener("click", toggleListening);
}
export function InitAudioAnalyser(stream) {
audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);
analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
analyser.smoothingTimeConstant = 0.8;
source.connect(analyser);
}
export function startListening() {
recording = true;
navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000 } })
.then((stream) => {
audioStream = stream;
const audioContext = new AudioContext();
const sourceNode = audioContext.createMediaStreamSource(audioStream);
const audioSampleRate = sourceNode.context.sampleRate;
info.innerHTML = "Sample rate: " + audioSampleRate + " Hz";
var preBuffer = [];
const channelSplitter = audioContext.createChannelSplitter(2);
const channelMerger = audioContext.createChannelMerger(1);
sourceNode.connect(channelSplitter);
channelSplitter.connect(channelMerger, 0, 0);
const outputNode = channelMerger;
const mediaStreamDestination = audioContext.createMediaStreamDestination();
outputNode.connect(mediaStreamDestination);
const singleChannelStream = mediaStreamDestination.stream;
audioRecorder = new MediaRecorder(singleChannelStream);
audioRecorder.start();
audioRecorder.addEventListener("dataavailable", (event) => {
if (!soundDetected && autosend.checked) {
preBuffer = [];
preBuffer.push(event.data);
return;
}
if (event.data.size > 0) {
let data = event.data;
if (preBuffer.length > 0) {
sendAudioToServerPost(preBuffer);
}
sendAudioToServer(data);
soundDetected = false;
}
});
InitAudioAnalyser(stream);
});
recordButton.innerHTML = "Stop Talking";
recordButton.classList.toggle('bg-red-500');
recordButton.classList.toggle('bg-blue-500');
recordButton.classList.toggle('hover:bg-blue-700');
}
export function stopListening() {
recording = false;
audioRecorder.stop();
recordButton.innerHTML = "Push to Talk";
recordButton.classList.toggle('bg-blue-500');
recordButton.classList.toggle('bg-red-500');
recordButton.classList.toggle('hover:bg-blue-700');
clearInterval(volumeChecker);
if (audioStream) {
audioStream.getTracks().forEach(track => track.stop());
audioStream = null;
}
}
export function sendAudioToServerPost(data) {
const blob = new Blob(data, { type: "audio/ogg; codecs=opus" });
var formData = new FormData();
formData.append('file', data);
fetch('/upload', {
method: 'POST',
body: formData
});
}
export function sendAudioToServerJson(data) {
if (socket && socket.readyState === WebSocket.OPEN) {
const binaryData = Buffer.from(base64AudioData, 'base64');
socket.send(JSON.stringify({ type: 'audio', audiobase64: binaryData }));
serverTime = Date.now();
if (!autosend.checked) {
transcription.placeholder = "Processing audio...";
}
}
}
export function sendAudioToServer(data) {
if (socket && socket.readyState === WebSocket.OPEN) {
socket.send(data);
serverTime = Date.now();
if (!autosend.checked) {
transcription.placeholder = "Processing audio...";
}
}
}
export function toggleListening() {
if (socket.readyState === WebSocket.OPEN) {
if (recording) {
stopListening();
} else {
startListening();
}
}
}
export function initializeVolumeChecker() {
volumeChecker = setInterval(() => {
if (!audioContext) {
console.log("No audio context");
return;
}
const frequencyData = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(frequencyData);
let totalVolume = 0;
for (let i = 0; i < frequencyData.length; i++) {
totalVolume += frequencyData[i];
}
averageVolume = totalVolume / frequencyData.length;
volumeHistory.push(averageVolume);
if (volumeHistory.length > 100) {
volumeHistory.shift();
}
const threshold = volumeHistory.reduce((acc, curr) => acc + curr) / volumeHistory.length + 5;
const isSilent = averageVolume < threshold;
if (averageVolume > threshold) {
if (autosend.checked && speakingCount == 0 && audioRecorder) {
soundDetected = false;
audioRecorder.stop();
audioRecorder.start();
}
speakingCount++;
if (speakingCount > soundCount_Threshold) {
statusRecording.innerHTML = "Listening...";
statusRecording.style.color = "green";
isSpeaking = true;
}
} else if (averageVolume - 5 < threshold) {
speakingCount = 0;
if (isSpeaking) {
silenceCount++;
if (silenceCount > silenceCount_Threshold) {
if (autosend.checked) {
soundDetected = true;
audioRecorder.stop();
audioRecorder.start();
}
isSpeaking = false;
statusRecording.innerHTML = "Silence detected...";
statusRecording.style.color = "orange";
}
}
}
}, SILENCE_DELAY_MS);
}

615
web/chat-client.html Normal file
View File

@ -0,0 +1,615 @@
<!DOCTYPE html>
<html>
<head>
<title>Real-time Voice Chat</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/tailwindcss/2.2.19/tailwind.min.css">
</head>
<body class="bg-gray-100">
<div class="container mx-auto px-4 py-8">
<h1 class="text-2xl font-bold mb-4 text-center">Real-time Voice Chat</h1>
<div class="flex justify-center items-center mb-4">
<!-- Username Input -->
<input type="text" id="username" class="border rounded p-2 mr-4" placeholder="Enter your username">
<div id="join-container" class="hidden">
<button id="btn-join" onclick="logInAndStoreSession()"
class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded">Join Chat</button>
<select id="language-select">
<option value="auto">Auto</option>
<option value="en">English</option>
<option value="bg">Български</option>
<option value="fr">Français</option>
</select>
</div>
<!-- Clear Session Option -->
<button id="btn-disconnect" onclick="clearSession()"
class="hidden bg-red-500 hover:bg-red-700 text-white font-bold py-2 px-4 rounded">Clear Session</button>
</div>
<!-- Active Users List -->
<div id="active-users-container" class="hidden flex justify-center items-center mb-4">
<div class="w-1/3">
<h2 class="text-xl font-bold mb-2">Active Users</h2>
<select id="users-list" class="w-full bg-white p-4 rounded shadow" multiple size="10">
<!-- Dynamic list of users -->
</select>
<button onclick="startChat()"
class="bg-green-500 hover:bg-green-700 text-white font-bold py-2 px-4 rounded mt-4">Start
Chat</button>
</div>
</div>
<div id="previous-chats-container" class="hidden w-2/3 mx-auto">
<h2 class="text-xl font-bold mb-2">Previous Chats</h2>
<div id="previous-chats" class="bg-white p-4 rounded shadow">
<!-- Previous chats content -->
</div>
</div>
<!-- Chat Room -->
<div id="chat-room-container" class="hidden w-2/3 mx-auto">
<h2 class="text-xl font-bold mb-2">Chat Room</h2>
<div id="chat-room" class="bg-white p-4 rounded shadow mb-4">
<!-- Chat room content -->
<div>
<div id="chat-room-users" class="flex flex-wrap mb-4">
<!-- Participants list -->
</div>
</div>
<div class="mb-4">
<label class="flex items-center space-x-2">
<input type="checkbox" id="autosend" class="mr-2">
<span>Continuous</span>
</label>
</div>
<div class="mb-4">
<button id="record-button" disabled
class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded mr-4">Push to
Talk</button>
</div>
<div id="status-recording" class="flex justify-center items-center mb-4">
</div>
<div id="transcription" class="border rounded p-4 h-48 overflow-y-scroll mb-4">
<!-- Transcription content -->
</div>
<canvas id="canvas" class="w-full mb-4"></canvas>
<div class="flex justify-between items-center">
<button id="copyButton"
class="bg-gray-200 hover:bg-gray-300 text-gray-700 font-medium py-2 px-4 rounded focus:outline-none"
onclick="copyToClipboard('transcription')">Copy</button>
<button id="clearButton"
class="bg-gray-200 hover:bg-gray-300 text-gray-700 font-medium py-2 px-4 rounded focus:outline-none"
onclick="clearTranscription()">Clear</button>
</div>
</div>
</div>
<!-- Connection Status and Info -->
<div class="flex justify-center items-center mb-4">
<div id="connection-status" class="mr-4"></div>
</div>
<div class="flex justify-center items-center mb-4">
<div id="info"></div>
</div>
<div id="status-recording" class="flex justify-center items-center mb-4"> status</div>
</div>
<script type="module">
// import * as audio from './audio.js';
let socket;
let sessionId;
let username;
let users = [];
let selectedUsers = [];
let chats = [];
let recordButton;
let connectionStatus;
let statusRecording;
let connected = false;
document.getElementById('autosend').addEventListener('change', (event) => {
const autosend = event.target.checked;
fetch('/settings', {
method: 'POST',
body: JSON.stringify({ autosend, sessionId }),
headers: { 'Content-Type': 'application/json' },
credentials: 'same-origin'
});
});
function connect() {
return new Promise((resolve, reject) => {
connectionStatus.innerHTML = "Connecting to WS...";
let wsurl = "ws://localhost:8081";
fetch("/wsurl")
.then((response) => response.text())
.then((data) => {
wsurl = data;
console.log("Got ws url: '" + wsurl + "'");
})
.then(() => {
socket = new WebSocket(wsurl);
// audio.setSocket(socket); // Set the socket in the audio module
socket.onopen = () => {
connectionStatus.innerHTML = "Connected to " + wsurl;
recordButton.disabled = false;
connected = true;
//if we stored a session id in a cookie, reconnect
const sessionId = getCookie("sessionId");
if (sessionId) {
socket.send(JSON.stringify({ type: 'reconnect', sessionId }));
}
else {
socket.send(JSON.stringify({ type: 'sessionId' }));
}
resolve(socket);
};
socket.onmessage = onmessage;
socket.onclose = () => {
connectionStatus.innerHTML = "Disconnected";
recordButton.disabled = true;
connected = false;
setTimeout(() => {
connect().then(resolve).catch(reject);
}, 5000);
};
})
.catch((error) => {
connectionStatus.innerHTML = "Error getting ws url: " + error;
reject(error);
});
});
};
function onmessage(event) {
try {
let json = JSON.parse(event.data);
switch (json.type) {
case "sessionId":
sessionId = json.sessionId;
//set the session id in the cookie
document.cookie = `sessionId=${sessionId}; path=/;`;
console.log("Got session id: " + sessionId);
break;
case "languageDetected":
statusRecording.innerHTML = "Detected language: " + json.languageDetected;
break;
case "text":
case "transcriptionResult":
transcription.innerHTML += "<br />" + json.text;
let latency = Date.now() - serverTime;
if (autosend.checked) {
// const arr = event.data.split(/[(\)]/);
// let queue = arr[1];
// let text = arr[2].trim();
// info.innerHTML = "latency: " + latency + "ms; server queue: " + queue + " requests";
//transcription.value += event.data + " ";
statusRecording.innerHTML = "Listening...";
statusRecording.style.color = "black";
} else {
//transcription.innerHTML = event.data;
}
break;
case 'audio':
const audioBuffer = Uint8Array.from(atob(json.audio), char => char.charCodeAt(0));
const audioBlob = new Blob([audioBuffer], { type: 'audio/mp3' });
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play();
break;
case "userList":
users = json.users;
updateUserList();
break;
case "chats":
chats = json.chats;
updateChatList();
break;
case "chat":
displayChatParticipants(json.chat.id, json.chat.participants);
break;
default:
console.log("Unknown message type:", json.type);
}
} catch (e) {
console.error("Failed to parse message", e);
}
}
function logInAndStoreSession() {
username = document.getElementById('username').value;
if (username.trim() === "") {
alert("Please enter a username");
return;
}
if (!socket || socket.readyState !== WebSocket.OPEN) {
connect().then(() => {
userJoin(sessionId, username, document.getElementById('language-select').value);
});
} else {
userJoin(sessionId, username, document.getElementById('language-select').value);
}
}
function userJoin(sessionId, username, language) {
socket.send(JSON.stringify({ type: 'join', username, language }));
document.cookie = `sessionId=${sessionId}; path=/;`;
document.cookie = `username=${username}; path=/;`;
showClearSessionOption();
}
function clearSession() {
document.cookie = "sessionId=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;";
document.cookie = "username=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;";
location.reload();
}
function showClearSessionOption() {
const sessionId = getCookie("sessionId");
if (sessionId) {
if (!socket || socket.readyState !== WebSocket.OPEN) {
connect().then((s) => {
s.send(JSON.stringify({ type: 'reconnect', sessionId }));
});
}
document.getElementById('btn-disconnect').classList.remove('hidden');
document.getElementById('join-container').classList.add('hidden');
document.getElementById('active-users-container').classList.remove('hidden');
document.getElementById('previous-chats-container').classList.remove('hidden');
} else {
document.getElementById('btn-disconnect').classList.add('hidden');
document.getElementById('join-container').classList.remove('hidden');
}
}
function getCookie(name) {
const value = `; ${document.cookie}`;
const parts = value.split(`; ${name}=`);
if (parts.length === 2) return parts.pop().split(';').shift();
}
window.onload = () => {
recordButton = document.getElementById("record-button");
setRecordButton(recordButton);
connectionStatus = document.getElementById("connection-status");
statusRecording = document.getElementById("status-recording");
showClearSessionOption();
connect().then(() => {
// audio.initializeVolumeChecker();
});
};
function copyToClipboard(id) {
var textarea = document.getElementById(id);
textarea.select();
document.execCommand('copy');
}
function clearTranscription() {
document.getElementById('transcription').innerText = '';
}
function updateUserList() {
const usersList = document.getElementById('users-list');
usersList.innerHTML = '';
users.forEach(user => {
const option = document.createElement('option');
option.value = user.sessionId;
option.innerText = "[" + user.language + "] " + user.username;
if (user.username === username) {
option.innerText += " (me)";
}
usersList.appendChild(option);
});
}
function startChat() {
const selectedOptions = Array.from(document.querySelectorAll('#users-list option:checked'));
selectedUsers = selectedOptions.map(option => option.value);
if (selectedUsers.length === 0) {
alert("Please select at least one user to start a chat.");
return;
}
selectedUsers.push(sessionId); // Add self to the selected users list for self-chat
socket.send(JSON.stringify({ type: 'startChat', users: selectedUsers }));
document.getElementById('chat-room-container').classList.remove('hidden');
}
function fetchPreviousChats(username) {
fetch(`/chats?username=${username}`)
.then(response => response.json())
.then(data => {
chats = data.chats;
updateChatList();
});
}
function updateChatList() {
const previousChats = document.getElementById('previous-chats');
previousChats.innerHTML = '';
chats.forEach(chat => {
const chatDiv = document.createElement('div');
chatDiv.classList.add('border', 'rounded', 'p-2', 'mb-2', 'cursor-pointer');
chatDiv.setAttribute('data-chat-id', chat.id); // Store chat ID in data attribute
const participants = chat.participants.join(', ');
const status = chat.participants.map(participant => {
const user = users.find(u => u.username === participant);
return user ? `${participant} (online)` : `${participant} (offline)`;
}).join(', ');
chatDiv.innerHTML = `${status}`;
chatDiv.addEventListener('click', () => {
// Remove highlight from all chat divs
document.querySelectorAll('#previous-chats > div').forEach(div => {
div.classList.remove('bg-blue-100');
});
// Highlight selected chat div
chatDiv.classList.add('bg-blue-100');
selectChatRoom(chat.id);
});
previousChats.appendChild(chatDiv);
});
}
function selectChatRoom(chatId) {
const chat = chats.find(c => c.id === chatId);
if (!chat) return;
const chatRoomUsers = document.getElementById('chat-room-users');
chatRoomUsers.innerHTML = ''; // Clear existing content
socket.send(JSON.stringify({ type: 'enterChat', chatId }));
document.getElementById('chat-room-container').classList.remove('hidden');
// displayChatParticipants(chatId, chat.participants);
}
function displayChatParticipants(chatId, participants) {
const chatRoomUsers = document.getElementById('chat-room-users');
let participantsHtml = '<div class="flex flex-wrap mb-4">';
participants.forEach(participantId => {
const user = users.find(u => u.sessionId === participantId);
const status = user ? "online" : "offline";
const username = user ? user.username : "Unknown User";
participantsHtml += `<span class="inline-flex items-center px-3 py-0.5 rounded-full text-sm font-medium bg-${status === 'online' ? 'green' : 'gray'}-100 text-${status === 'online' ? 'green' : 'gray'}-800 mr-2 mb-2">
${username} (${status})
</span>`;
});
participantsHtml += '</div>';
chatRoomUsers.innerHTML = participantsHtml;
}
// REGION AUDIO RECORDING
let selectedDeviceId = "default";
export let serverTime;
// export let recordButton;
// export let socket;
// let connectionStatus;
// let statusRecording;
let audioRecorder;
let audioStream;
let recording = false;
let audioContext;
let volumeChecker;
let lastVolumes = new Array(5);
let averageVolume;
let silenceCount = 0;
let isSpeaking = false;
let soundDetected = false;
let speakingCount = 0;
let analyser = null;
let SILENCE_DELAY_MS = 50;
let preDetect_IncludedAudio = 400; //ms
let soundCount_Threshold = 10;
let silenceCount_Threshold = 10;
const volumeHistory = [];
function setSocket(newSocket) {
socket = newSocket;
}
export function setRecordButton(newRecordButton) {
recordButton = newRecordButton;
recordButton.addEventListener("click", toggleListening);
}
export function InitAudioAnalyser(stream) {
audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);
analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
analyser.smoothingTimeConstant = 0.8;
source.connect(analyser);
}
export function startListening() {
recording = true;
navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000 } })
.then((stream) => {
audioStream = stream;
const audioContext = new AudioContext();
const sourceNode = audioContext.createMediaStreamSource(audioStream);
const audioSampleRate = sourceNode.context.sampleRate;
info.innerHTML = "Sample rate: " + audioSampleRate + " Hz";
var preBuffer = [];
const channelSplitter = audioContext.createChannelSplitter(2);
const channelMerger = audioContext.createChannelMerger(1);
sourceNode.connect(channelSplitter);
channelSplitter.connect(channelMerger, 0, 0);
const outputNode = channelMerger;
const mediaStreamDestination = audioContext.createMediaStreamDestination();
outputNode.connect(mediaStreamDestination);
const singleChannelStream = mediaStreamDestination.stream;
audioRecorder = new MediaRecorder(singleChannelStream);
audioRecorder.start();
audioRecorder.addEventListener("dataavailable", (event) => {
if (!soundDetected && autosend.checked) {
preBuffer = [];
preBuffer.push(event.data);
return;
}
if (event.data.size > 0) {
let data = event.data;
if (preBuffer.length > 0) {
sendAudioToServerPost(preBuffer);
}
sendAudioToServer(data);
soundDetected = false;
}
});
InitAudioAnalyser(stream);
});
recordButton.innerHTML = "Stop Talking";
recordButton.classList.toggle('bg-red-500');
recordButton.classList.toggle('bg-blue-500');
recordButton.classList.toggle('hover:bg-blue-700');
}
export function stopListening() {
recording = false;
audioRecorder.stop();
recordButton.innerHTML = "Push to Talk";
recordButton.classList.toggle('bg-blue-500');
recordButton.classList.toggle('bg-red-500');
recordButton.classList.toggle('hover:bg-blue-700');
clearInterval(volumeChecker);
if (audioStream) {
audioStream.getTracks().forEach(track => track.stop());
audioStream = null;
}
}
export function sendAudioToServerPost(data) {
const blob = new Blob(data, { type: "audio/ogg; codecs=opus" });
var formData = new FormData();
formData.append('file', data);
fetch('/upload', {
method: 'POST',
body: formData
});
}
export function sendAudioToServerJson(data) {
if (socket && socket.readyState === WebSocket.OPEN) {
const binaryData = Buffer.from(base64AudioData, 'base64');
socket.send(JSON.stringify({ type: 'audio', audiobase64: binaryData }));
serverTime = Date.now();
if (!autosend.checked) {
transcription.placeholder = "Processing audio...";
}
}
}
export function sendAudioToServer(data) {
if (socket && socket.readyState === WebSocket.OPEN) {
socket.send(data);
serverTime = Date.now();
if (!autosend.checked) {
transcription.placeholder = "Processing audio...";
}
}
}
export function toggleListening() {
if (socket.readyState === WebSocket.OPEN) {
if (recording) {
stopListening();
} else {
startListening();
}
}
}
export function initializeVolumeChecker() {
volumeChecker = setInterval(() => {
if (!audioContext) {
console.log("No audio context");
return;
}
const frequencyData = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(frequencyData);
let totalVolume = 0;
for (let i = 0; i < frequencyData.length; i++) {
totalVolume += frequencyData[i];
}
averageVolume = totalVolume / frequencyData.length;
volumeHistory.push(averageVolume);
if (volumeHistory.length > 100) {
volumeHistory.shift();
}
const threshold = volumeHistory.reduce((acc, curr) => acc + curr) / volumeHistory.length + 5;
const isSilent = averageVolume < threshold;
if (averageVolume > threshold) {
if (autosend.checked && speakingCount == 0 && audioRecorder) {
soundDetected = false;
audioRecorder.stop();
audioRecorder.start();
}
speakingCount++;
if (speakingCount > soundCount_Threshold) {
statusRecording.innerHTML = "Listening...";
statusRecording.style.color = "green";
isSpeaking = true;
}
} else if (averageVolume - 5 < threshold) {
speakingCount = 0;
if (isSpeaking) {
silenceCount++;
if (silenceCount > silenceCount_Threshold) {
if (autosend.checked) {
soundDetected = true;
audioRecorder.stop();
audioRecorder.start();
}
isSpeaking = false;
statusRecording.innerHTML = "Silence detected...";
statusRecording.style.color = "orange";
}
}
}
}, SILENCE_DELAY_MS);
}
// Expose functions to global scope
window.logInAndStoreSession = logInAndStoreSession;
window.clearSession = clearSession;
window.copyToClipboard = copyToClipboard;
window.clearTranscription = clearTranscription;
window.startChat = startChat;
</script>
<script src="https://cdn.webrtc-experiment.com/MediaStreamRecorder.js"></script>
</body>
</html>

399
web/chat-server.js Normal file
View File

@ -0,0 +1,399 @@
const express = require('express');
const bodyParser = require('body-parser');
const WebSocket = require('ws');
const storage = require('node-persist');
const request = require('request');
const fs = require('fs');
const path = require('path');
const dotenv = require('dotenv');
const ollama = require('ollama');
const axios = require('axios');
const OpenAI = require('openai');
const Groq = require('groq-sdk');
// Load environment variables
dotenv.config({ path: `.env${process.env.NODE_ENV === 'development' ? '.development' : ''}` });
// Initialize services
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const groq = new Groq({ apiKey: process.env.GROQ_API_KEY });
// Express setup
const app = express();
app.use(bodyParser.json());
// Configuration constants
const PORT_HTTP = process.env.SERVER_PORT_HTTP || 3000;
const PORT_WS = process.env.SERVER_PORT_WS || 8080;
const TTS_API_URL = process.env.TTS_API_URL;
const LNN_API_URL = process.env.LNN_API_URL;
const LLN_MODEL = process.env.LLN_MODEL;
let language = "en";
let storeRecordings = false;
let queueCounter = 0;
const sessions = new Map();
const chats = new Map(); // Store chat rooms
// Initialize storage and load initial values
async function initStorage() {
await storage.init();
language = await storage.getItem('language') || language;
storeRecordings = await storage.getItem('storeRecordings') || storeRecordings;
const storedChats = await storage.getItem('chats') || [];
storedChats.forEach(chat => chats.set(chat.id, chat));
const storedSessions = await storage.getItem('sessions') || [];
storedSessions.forEach(session => sessions.set(session.sessionId, session));
}
initStorage();
// WebSocket Server
const wss = new WebSocket.Server({ port: PORT_WS });
wss.on('connection', ws => {
ws.on('message', async message => handleMessage(ws, message));
ws.on('close', () => handleClose(ws));
});
// Handle WebSocket messages
async function handleMessage(ws, message) {
let data;
try {
data = JSON.parse(message);
} catch {
return handleAudioData(ws, message);
}
try {
switch (data.type) {
case 'sessionId':
await handleSessionId(ws);
break;
case 'join':
await handleJoin(ws, data);
break;
case 'startChat':
await handleStartChat(ws, data);
break;
case 'enterChat':
await handleEnterChat(ws, data);
break;
case 'reconnect':
await handleReconnect(ws, data);
break;
default:
console.log('Unknown message type:', data.type);
}
} catch (err) {
console.error('Failed to handle message', err);
}
}
function handleClose(ws) {
sessions.delete(ws.sessionId);
broadcastUserList();
}
// Handlers for specific message types
async function handleSessionId(ws) {
ws.sessionId = generateSessionId();
sessions.set(ws.sessionId, { language: 'en' });
await storage.setItem('sessions', Array.from(sessions.values()));
}
async function handleJoin(ws, { username, language }) {
sessions.set(ws.sessionId, { username, sessionId: ws.sessionId, language });
ws.send(JSON.stringify({ type: 'sessionId', sessionId: ws.sessionId, language, storeRecordings }));
const userChats = Array.from(chats.values()).filter(chat => chat.participants.includes(ws.sessionId));
ws.send(JSON.stringify({ type: 'chats', chats: userChats }));
broadcastUserList();
}
async function handleStartChat(ws, { users }) {
const chatId = generateChatId();
let participants = [ws.sessionId, ...users];
participants = [...new Set(participants)];
chats.set(chatId, { participants, messages: [] });
await storage.setItem('chats', Array.from(chats.values()));
notifyParticipants(participants);
broadcastUserList();
}
async function handleEnterChat(ws, { chatId }) {
const enteredChat = chats.get(chatId);
const currentSession = sessions.get(ws.sessionId);
currentSession.currentChat = chatId;
if (enteredChat && enteredChat.participants.includes(ws.sessionId)) {
ws.send(JSON.stringify({ type: 'chat', chat: enteredChat }));
}
}
async function handleReconnect(ws, { sessionId }) {
const userSession = sessions.get(sessionId);
if (userSession) {
sessions.set(ws.sessionId, userSession);
ws.sessionId = sessionId;
const userChats = Array.from(chats.values()).filter(chat => chat.participants.includes(ws.sessionId));
ws.send(JSON.stringify({ type: 'chats', chats: userChats }));
} else {
console.log('Session not found:', sessionId);
}
broadcastUserList();
}
// Utility functions
function generateSessionId() {
return Math.random().toString(36).substring(2);
}
function generateChatId() {
return Math.random().toString(36).substring(2);
}
function broadcastUserList() {
const userList = Array.from(sessions.values()).map(user => ({
username: user.username,
sessionId: user.sessionId,
currentChat: user.currentChat,
language: user.language
}));
wss.clients.forEach(client => {
if (client.readyState === WebSocket.OPEN) {
client.send(JSON.stringify({ type: 'userList', users: userList }));
}
});
}
function notifyParticipants(participants) {
participants.forEach(sessionId => {
const participantSocket = Array.from(wss.clients).find(client => client.sessionId === sessionId);
if (participantSocket && participantSocket.readyState === WebSocket.OPEN) {
const userChats = Array.from(chats.entries())
.filter(([id, chat]) => chat.participants.includes(sessionId))
.map(([id, chat]) => ({ id, participants: chat.participants }));
participantSocket.send(JSON.stringify({ type: 'chats', chats: userChats }));
}
});
}
async function handleAudioData(ws, data) {
const sessionData = sessions.get(ws.sessionId);
let { language, task } = sessionData;
const formData = {
task: task || 'transcribe',
language,
vad_filter: 'true',
output: 'json',
audio_file: {
value: data,
options: { filename: 'audio.ogg', contentType: 'audio/ogg' }
}
};
if (!language || language === 'auto') {
await detectLanguage(ws, formData);
} else {
await transcribeAudio(ws, formData, sessionData);
}
}
async function detectLanguage(ws, formData) {
try {
const result = await requestPromise({
method: 'POST',
url: TTS_API_URL.replace('/asr', '/detect-language'),
formData
});
const { language_code } = JSON.parse(result);
if (language_code) {
const sessionData = sessions.get(ws.sessionId);
sessionData.language = language_code;
ws.send(JSON.stringify({ type: 'languageDetected', languageDetected: language_code }));
await transcribeAudio(ws, formData, sessionData);
}
} catch (err) {
console.error('Language detection failed:', err);
}
}
async function transcribeAudio(ws, formData, sessionData) {
const start = new Date().getTime();
queueCounter++;
try {
if(sessionData.language) {
formData.language = sessionData.language;
}
formData.vad_filter = 'true';
const body = await requestPromise({ method: 'POST', url: TTS_API_URL, formData });
queueCounter--;
const duration = new Date().getTime() - start;
ws.send(JSON.stringify({
type: 'text',
queueCounter,
duration,
language: sessionData.language,
text: body
}));
await handleChatTranscription(ws, body, sessionData);
} catch (err) {
console.error('Transcription failed:', err);
}
if (storeRecordings) {
const timestamp = Date.now();
fs.mkdir('rec', { recursive: true }, err => {
if (err) console.error(err);
else {
fs.writeFile(`rec/audio${timestamp}.ogg`, formData.audio_file.value, err => {
if (err) console.error(err);
else console.log(`Audio data saved to rec/audio${timestamp}.ogg`);
});
}
});
}
}
async function handleChatTranscription(ws, body, sessionData) {
if (sessionData.currentChat) {
const chat = chats.get(sessionData.currentChat);
if (chat) {
let msg = { sender: sessionData.username, text: body, translations: [] };
chat.messages.push(msg);
for (let sessionId of chat.participants) {
if (sessionId !== ws.sessionId) {
const targetLang = sessions.get(sessionId)?.language || 'en';
if (targetLang !== sessionData.language) {
const translation = await translateText(body, sessionData.language, targetLang);
msg.translations.push({ language: targetLang, text: translation });
const participantSocket = Array.from(wss.clients).find(client => client.sessionId === sessionId);
if (participantSocket && participantSocket.readyState === WebSocket.OPEN) {
participantSocket.send(JSON.stringify({ type: 'text', text: `${sessionData.username}: ${translation}` }));
const audioBuffer = await generateSpeech(translation);
participantSocket.send(JSON.stringify({ type: 'audio', audio: audioBuffer.toString('base64') }));
}
} else {
const participantSocket = Array.from(wss.clients).find(client => client.sessionId === sessionId);
if (participantSocket && participantSocket.readyState === WebSocket.OPEN) {
participantSocket.send(JSON.stringify({ type: 'text', text: `${sessionData.username}: ${body}` }));
participantSocket.send(JSON.stringify({ type: 'audio', audio: formData.toString('base64') }));
}
}
}
}
}
}
}
async function translateText(originalText, originalLanguage, targetLanguage) {
const prompt = `Translate this text from ${originalLanguage} to ${targetLanguage}: ${originalText}`;
const response = await groq.chat.completions.create({
messages: [
{
role: "system",
content: `You are translating voice transcriptions from '${originalLanguage}' to '${targetLanguage}'. Reply with just the translation.`,
},
{
role: "user",
content: originalText,
},
],
model: "llama3-8b-8192",
});
return response.choices[0]?.message?.content || "";
}
async function generateSpeech(text) {
const mp3 = await openai.audio.speech.create({
model: "tts-1",
voice: "alloy",
input: text,
});
return Buffer.from(await mp3.arrayBuffer());
}
// HTTP Server
app.get('/', (req, res) => {
res.sendFile(path.join(__dirname, 'chat-client.html'));
});
app.get('/audio.js', (req, res) => {
res.sendFile(path.join(__dirname, 'audio.js'));
});
app.post('/log', (req, res) => {
console.log(`[LOG ${new Date().toISOString()}] ${req.body.message}`);
res.status(200).send('OK');
});
app.get('/wsurl', (req, res) => {
res.status(200).send(process.env.WS_URL);
});
app.get('/settings', async (req, res) => {
if (req.query.language) {
language = req.query.language;
await storage.setItem('language', language);
}
if (req.query.storeRecordings) {
storeRecordings = req.query.storeRecordings;
await storage.setItem('storeRecordings', storeRecordings);
}
res.status(200).send({ language, storeRecordings });
});
app.post('/settings', async (req, res) => {
const { sessionId, language, storeRecordings, task } = req.body;
const sessionData = sessions.get(sessionId);
if (language) sessionData.language = language;
if (storeRecordings !== undefined) sessionData.storeRecordings = storeRecordings;
if (task) sessionData.task = task;
res.status(200).send('OK');
});
app.post('/upload', (req, res) => {
const timestamp = Date.now();
console.log('Received audio data:', timestamp);
fs.mkdir('rec', { recursive: true }, err => {
if (err) return res.status(500).send('ERROR');
const file = fs.createWriteStream(`rec/audio_slice_${timestamp}.ogg`);
req.pipe(file);
file.on('finish', () => res.status(200).send('OK'));
});
});
app.get('/chats', (req, res) => {
const { username } = req.query;
const userChats = Array.from(chats.values()).filter(chat => chat.participants.includes(username));
res.status(200).send({ chats: userChats });
});
app.listen(PORT_HTTP, () => {
console.log(`Server listening on port ${PORT_HTTP}`);
});
// Helper to wrap request in a promise
function requestPromise(options) {
return new Promise((resolve, reject) => {
request(options, (error, response, body) => {
if (error) return reject(error);
resolve(body);
});
});
}

View File

@ -3,11 +3,9 @@
<head> <head>
<title>Real-time Speech-to-Text</title> <title>Real-time Speech-to-Text</title>
<meta name="viewport" <meta name="viewport" content="width=device-width, initial-scale=1">
content="width=device-width, initial-scale=1">
<!-- Add the Tailwind CSS library --> <!-- Add the Tailwind CSS library -->
<link rel="stylesheet" <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/tailwindcss/2.2.19/tailwind.min.css">
href="https://cdnjs.cloudflare.com/ajax/libs/tailwindcss/2.2.19/tailwind.min.css">
</head> </head>
<body class="bg-gray-100"> <body class="bg-gray-100">
@ -15,61 +13,61 @@
<h1 class="text-2xl font-bold mb-4 text-center">Rt STT</h1> <h1 class="text-2xl font-bold mb-4 text-center">Rt STT</h1>
<div class="flex justify-center items-center mb-4"> <div class="flex justify-center items-center mb-4">
<label class="toggle flex items-center"> <label class="toggle flex items-center">
<input type="checkbox" <input type="checkbox" id="autosend" class="mr-2">
id="autosend"
class="mr-2">
<span class="slider"></span> <span class="slider"></span>
<span class="ml-2">Continuous</span> <span class="ml-2">Continuous</span>
</label> </label>
<select id="input-devices" <select id="input-devices" class="ml-4">
class="ml-4">
<option value="default">Default</option> <option value="default">Default</option>
</select> </select>
<select id="language-select"> <select id="language-select">
<option value="auto">Auto</option>
<option value="en">English</option> <option value="en">English</option>
<option value="bg">Български</option> <option value="bg">Български</option>
<option value="fr">Français</option>
</select>
<select id="task-select">
<option value="transcribe">Transcribe</option>
<option value="translate">Translate</option>
</select> </select>
<label class="toggle flex items-center ml-4"> <label class="toggle flex items-center ml-4">
<input type="checkbox" <input type="checkbox" id="store-recordings" class="mr-2">
id="store-recordings"
class="mr-2">
<span class="slider"></span> <span class="slider"></span>
<span class="ml-2">Store Recordings</span> <span class="ml-2">Store Recordings</span>
</div> </div>
<div class="flex justify-center items-center mb-4"> <div class="flex justify-center items-center mb-4">
<span id="record-actions"> <span id="record-actions">
<button id="record-button" <button id="record-button" disabled
disabled
class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded mr-4"> class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded mr-4">
Start Recording</button> Start Recording</button>
<button id="record-button-speakers" <button id="record-button-speakers" disabled
disabled
class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded mr-4"> class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded mr-4">
Stream from speakers</button> Stream from speakers</button>
</span> </span>
</div> </div>
<div class="flex justify-center items-center mb-4"> <div class="flex justify-center items-center mb-4">
<div id="connection-status" <div id="connection-status" style="margin-right: 5px;"></div>
style="margin-right: 5px;"></div>
</div> </div>
<div class="flex justify-center items-center mb-4"> <div class="flex justify-center items-center mb-4">
<div id="info"></div> <div id="info"></div>
</div> </div>
<div id="status-recording" <div id="status-recording" class="flex justify-center items-center mb-4">
class="flex justify-center items-center mb-4">
</div> </div>
<div class="relative rounded-lg border border-gray-300 shadow-sm"> <div class="relative rounded-lg border border-gray-300 shadow-sm">
<textarea id="transcription" <textarea id="transcription" class="block w-full h-48 p-4 resize-none"
class="block w-full h-48 p-4 resize-none"
placeholder="Whisper something into the microphone..."></textarea> placeholder="Whisper something into the microphone..."></textarea>
<button id="copyButton" <button id="copyButton"
class="absolute top-0 right-0 px-4 py-2 text-sm font-medium text-gray-700 bg-gray-200 hover:bg-gray-300 rounded-bl-lg focus:outline-none" class="absolute top-0 right-0 px-4 py-2 text-sm font-medium text-gray-700 bg-gray-200 hover:bg-gray-300 rounded-bl-lg focus:outline-none"
onclick="copyToClipboard('transcription')"> onclick="copyToClipboard('transcription')">
Copy Copy
</button> </button>
<button id="clearButton"
class="absolute top-0 right-20 px-2 py-1 text-sm font-medium text-gray-700 bg-gray-200 hover:bg-gray-300 rounded-br-lg focus:outline-none"
onclick="transcription.value = ''">
Clear
</button>
</div> </div>
<canvas id="canvas" <canvas id="canvas" class="w-full"></canvas>
class="w-full"></canvas>
<script> <script>
let sessionId; let sessionId;
@ -125,6 +123,15 @@ disabled
credentials: 'same-origin' credentials: 'same-origin'
}); });
}); });
document.getElementById('task-select').addEventListener('change', (event) => {
const task = event.target.value;
fetch('/settings', {
method: 'POST',
body: JSON.stringify({ task, sessionId }),
headers: { 'Content-Type': 'application/json' },
credentials: 'same-origin'
});
});
// Draw sliding bar graph // Draw sliding bar graph
function drawSlidingBarGraph(lastVolumes) { function drawSlidingBarGraph(lastVolumes) {
@ -261,10 +268,28 @@ disabled
if (json.hasOwnProperty("language")) { if (json.hasOwnProperty("language")) {
languageSelect.value = json.language; languageSelect.value = json.language;
} }
if (json.hasOwnProperty("languageDetected")) {
statusRecording.innerHTML = "Detected language: " + json.languageDetected;
}
if (json.hasOwnProperty("taskSelect")) {
taskSelect.value = json.taskSelect;
}
//storerecordings checkbox //storerecordings checkbox
if (json.hasOwnProperty("storeRecordings")) { if (json.hasOwnProperty("storeRecordings")) {
storeRecordings.checked = json.storeRecordings; storeRecordings.checked = json.storeRecordings;
} }
if (json.hasOwnProperty("text")) {
transcription.value += "\r\n" + json.text;
}
if (json.hasOwnProperty("queueCounter")) {
let latency = Date.now() - serverTime;
console.log("Received message from server: " + event.data + " (latency: " + latency + "ms)");
info.innerHTML = "latency: " + latency + "ms; server queue: " + queue + " requests";
}
return; return;
} catch (e) { } catch (e) {
//not json //not json
@ -407,7 +432,7 @@ disabled
serverTime = Date.now(); serverTime = Date.now();
console.log("Sent some audio data to server."); console.log("Sent some audio data to server.");
if (!autosend.checked) { if (!autosend.checked) {
transcription.innerHTML = "Processing audio..."; transcription.placeholder = "Processing audio...";
} }
} else { } else {
console.log("Not connected, not sending audio data to server."); console.log("Not connected, not sending audio data to server.");
@ -484,6 +509,7 @@ disabled
languageSelect = document.getElementById("language-select"); languageSelect = document.getElementById("language-select");
inputDevices = document.getElementById("input-devices"); inputDevices = document.getElementById("input-devices");
storeRecordings = document.getElementById("store-recordings"); storeRecordings = document.getElementById("store-recordings");
taskSelect = document.getElementById("task-select");
enumerateDevices(); enumerateDevices();
connect(socket); connect(socket);

View File

@ -1,6 +1,7 @@
//load .env file //load .env file
if (require('dotenv')) { if (require('dotenv')) {
require('dotenv').config() const envFile =process.env.NODE_ENV === 'development' ? '.env.development' : '.env';
require('dotenv').config({ path: envFile });
} }
console.log('Starting ws server on port ' + process.env.SERVER_PORT_WS); console.log('Starting ws server on port ' + process.env.SERVER_PORT_WS);
@ -8,9 +9,10 @@ const WebSocket = require('ws');
const wss = new WebSocket.Server({ port: process.env.SERVER_PORT_WS }); const wss = new WebSocket.Server({ port: process.env.SERVER_PORT_WS });
console.log(process.env) // console.log("ENV="+process.env)
console.log(process.env.TTS_BACKEND_URL) console.log("TTS_API_URL=" + process.env.TTS_API_URL)
console.log(process.env.WS_URL) console.log("WS_URL=" + process.env.WS_URL)
console.log("ENV_NAME=" + process.env.ENV_NAME)
let language = "en"; let language = "en";
let storeRecordings = false; let storeRecordings = false;
@ -19,7 +21,7 @@ let queueCounter = 0;
const storage = require('node-persist'); const storage = require('node-persist');
storage.init().then(() => { storage.init().then(() => {
storage.getItem('language').then((value) => { storage.getItem('language').then((value) => {
if (value != undefined) { language = value; console.log('language: ' + language); } if (value != undefined) { language = value; console.log('stored language: ' + language); }
else { storage.setItem('language', language).then(() => { console.log('language set to ' + language + "(default)"); }); } else { storage.setItem('language', language).then(() => { console.log('language set to ' + language + "(default)"); }); }
}); });
@ -49,12 +51,16 @@ wss.on('connection', (ws, req) => {
console.log('No session data found for session id ' + webSocket.sessionId); console.log('No session data found for session id ' + webSocket.sessionId);
} }
let language = sessionData?.language || 'en'; let language = sessionData?.language || 'en';
let task = sessionData?.task || 'transcribe';
//show the size of the audio data as 0.000 MB //show the size of the audio data as 0.000 MB
console.log('(queue ' + queueCounter + ') Received ' + (data.length / 1024 / 1024).toFixed(3) + ' MB audio from client. Crrent language: ' + language); console.log('(queue ' + queueCounter + ') Received ' + (data.length / 1024 / 1024).toFixed(3) + ' MB audio from client. Crrent language: ' + language, 'task: ' + task);
var request = require('request'); var request = require('request');
var endpoint = process.env.TTS_API_URL;
var formData = { var formData = {
task: 'transcribe', task: task,
language: sessionData.language, language: language,
vad_filter: 'true',
output: 'json', output: 'json',
audio_file: { audio_file: {
value: data, value: data,
@ -64,6 +70,31 @@ wss.on('connection', (ws, req) => {
} }
} }
}; };
console.log('language:', language);
if (language == 'auto' || language == '') {
console.log('Detecting language...');
request.post({ url: endpoint.replace('/asr', '/detect-language'), formData: formData }, function optionalCallback(err, httpResponse, body) {
console.log('detected:', body);
if (typeof body === 'string') {
body = JSON.parse(body);
}
if (body && body.language_code) {
language = body.language_code; if (body && body.language_code) {
let language = body.language_code;
sessionData.language = language;
console.log('language set to:', language);
webSocket.send(JSON.stringify({ languageDetected: body.detected_language }));
} else {
console.error('Error: Invalid body or missing language_code');
}
sessionData.language = language;
console.log('language set to:', language);
} else {
console.error('Error: Invalid body or missing language_code');
}
});
}
storeRecordings = sessionData?.storeRecordings || storeRecordings; storeRecordings = sessionData?.storeRecordings || storeRecordings;
if (storeRecordings) { if (storeRecordings) {
@ -84,7 +115,7 @@ wss.on('connection', (ws, req) => {
//record start time //record start time
var start = new Date().getTime(); var start = new Date().getTime();
queueCounter++; queueCounter++;
request.post({ url: process.env.TTS_BACKEND_URL, formData: formData }, function optionalCallback(err, httpResponse, body) { request.post({ url: process.env.TTS_API_URL, formData: formData }, function optionalCallback(err, httpResponse, body) {
queueCounter--; queueCounter--;
if (err) { if (err) {
return console.error('upload failed:', err); return console.error('upload failed:', err);
@ -93,7 +124,8 @@ wss.on('connection', (ws, req) => {
var duration = new Date().getTime() - start; var duration = new Date().getTime() - start;
//console.log('decoded (' + duration + 'ms):', body); //console.log('decoded (' + duration + 'ms):', body);
console.log('decoded (' + (duration / 1000).toFixed(2) + 's):', body); console.log('decoded (' + (duration / 1000).toFixed(2) + 's):', body);
webSocket.send("(" + queueCounter + ") " + body); //webSocket.send("(" + queueCounter + ") " + body);
webSocket.send(JSON.stringify({ queueCounter: queueCounter, duration: duration, language: language, text: body}));
}); });
}); });
}); });
@ -157,6 +189,10 @@ app.post('/settings', (req, res) => {
sessionData.storeRecordings = body.storeRecordings; sessionData.storeRecordings = body.storeRecordings;
console.log(`Session ${sid}: storeRecordings set to ${sessionData.storeRecordings}`); console.log(`Session ${sid}: storeRecordings set to ${sessionData.storeRecordings}`);
} }
if (body.task != undefined) {
sessionData.task = body.task;
}
res.send('OK', 200, { 'Content-Type': 'text/plain' }); res.send('OK', 200, { 'Content-Type': 'text/plain' });
}); });
@ -205,14 +241,14 @@ app.get('/test_ocr', (req, res) => {
formData.language = req.query.language; formData.language = req.query.language;
} }
var tts_url = process.env.TTS_BACKEND_URL; var tts_url = process.env.TTS_API_URL;
if (req.query.ttsID != undefined) { if (req.query.ttsID != undefined) {
//1: TTS_BACKEND_URL //1: TTS_API_URL
//2: TTS_BACKEND_URL2 //2: TTS_API_URL2
//3: TTS_BACKEND_URL3 //3: TTS_API_URL3
//4: TTS_BACKEND_URL4 //4: TTS_API_URL4
if (req.query.ttsID !== '1') { if (req.query.ttsID !== '1') {
tts_url = process.env['TTS_BACKEND_URL' + req.query.ttsID]; tts_url = process.env['TTS_API_URL' + req.query.ttsID];
} }
} }