From a9ce278e9a603afc022a2c1c96bb90fd680d56ec Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Wed, 21 Feb 2024 07:24:42 +0000 Subject: [PATCH 1/5] prompts --- agent-android/prompts.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 agent-android/prompts.txt diff --git a/agent-android/prompts.txt b/agent-android/prompts.txt new file mode 100644 index 0000000..efd1f2b --- /dev/null +++ b/agent-android/prompts.txt @@ -0,0 +1,7 @@ +you're in a shell console at a root folder of a new software project. we use vscode. +let's create a mobile app (prioritize android, and plan to also support iOS) which will send the audio input to tts llm +plan for the following extesion features in the future: + - ability to listen in the background for a wake word and send the following voice command + - ability to listen on hardware button press, or O buttton hold or other android fast access shortcut intent + + \ No newline at end of file From eab2a3ea6b0ed704cbe3016e428232fb898c4ec1 Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Wed, 21 Feb 2024 18:20:25 +0200 Subject: [PATCH 2/5] ignore --- .gitignore | 1 + agent-mobile/artimobile/supervisord.pid | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 8e77a12..ef81834 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ agent-py-bot/scrape/raw/* .aider* tts/*.m4a agent-mobile/jdk/* +agent-mobile/artimobile/supervisord.pid diff --git a/agent-mobile/artimobile/supervisord.pid b/agent-mobile/artimobile/supervisord.pid index ec63514..7f8f011 100644 --- a/agent-mobile/artimobile/supervisord.pid +++ b/agent-mobile/artimobile/supervisord.pid @@ -1 +1 @@ -9 +7 From ec1efcba1650e353095ced7c94caff9cae578732 Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Mon, 26 Feb 2024 17:05:52 +0000 Subject: [PATCH 3/5] create storage --- store-py/notes.md | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 store-py/notes.md diff --git a/store-py/notes.md b/store-py/notes.md new file mode 100644 index 0000000..fcf05ff --- /dev/null +++ b/store-py/notes.md @@ -0,0 +1,57 @@ +#Environment Setup +cd vector_knowledge_graph +python -m venv venv +source venv/bin/activate +pip install fastapi uvicorn openai psycopg2-binary sqlalchemy + + +#Create a Database: Create a new PostgreSQL database. +CREATE EXTENSION vector; +CREATE TABLE knowledge ( + id SERIAL PRIMARY KEY, + embedding vector(512) NOT NULL, -- assuming 512 dimensions for embeddings + metadata JSONB +); +CREATE INDEX ON knowledge USING ivfflat (embedding); + +#Application Code + +from fastapi import FastAPI + +app = FastAPI() + +@app.get("/") +async def read_root(): + return {"Hello": "World"} +Database Client (app/vector_db/client.py): Implement a simple client for connecting to the database and inserting/fetching vectors. + +python +Copy code +import psycopg2 +from psycopg2.extras import Json + +def insert_embedding(embedding, metadata): + conn = psycopg2.connect("dbname=your_db user=your_user") + cur = conn.cursor() + cur.execute("INSERT INTO knowledge (embedding, metadata) VALUES (%s, %s)", (embedding, Json(metadata))) + conn.commit() + cur.close() + conn.close() + +def search_embedding(embedding): + conn = psycopg2.connect("dbname=your_db user=your_user") + cur = conn.cursor() + cur.execute("SELECT id, metadata FROM knowledge ORDER BY embedding <-> %s LIMIT 5", (embedding,)) + results = cur.fetchall() + cur.close() + conn.close() + return results +5. LLM Integration +At this stage, we'll need to implement the logic to interact with OpenAI's API to generate and process embeddings. Since this involves using OpenAI's services, ensure you have an API key and have agreed to their terms of use. + +6. Running the Application +With the basic components in place, you can start the FastAPI application using uvicorn: + +bash +Copy code +uvicorn app.api.main:app --reload \ No newline at end of file From 97b0f9b64f911bf4ff5d466199eac5f2d47e9e69 Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Mon, 26 Feb 2024 21:04:19 +0000 Subject: [PATCH 4/5] initial prompt of store-py --- store-py/notes.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/store-py/notes.md b/store-py/notes.md index fcf05ff..484bc1e 100644 --- a/store-py/notes.md +++ b/store-py/notes.md @@ -1,3 +1,6 @@ + +<< +using python, create a new project that will utilize a vector store database to create interlinked vector space knowledge graph as a "memory" function. It will be used by a realtime LLM to store and retrieve knowledge>> #Environment Setup cd vector_knowledge_graph python -m venv venv @@ -9,7 +12,7 @@ pip install fastapi uvicorn openai psycopg2-binary sqlalchemy CREATE EXTENSION vector; CREATE TABLE knowledge ( id SERIAL PRIMARY KEY, - embedding vector(512) NOT NULL, -- assuming 512 dimensions for embeddings + embedding vector(1536) NOT NULL, -- assuming 512 dimensions for embeddings; openai uses 1536 metadata JSONB ); CREATE INDEX ON knowledge USING ivfflat (embedding); From de62e222200b004a1ae881890d78a58882400afb Mon Sep 17 00:00:00 2001 From: Dobromir Popov Date: Tue, 27 Feb 2024 01:19:07 +0200 Subject: [PATCH 5/5] store wip --- store-py/store.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 store-py/store.py diff --git a/store-py/store.py b/store-py/store.py new file mode 100644 index 0000000..dca89bf --- /dev/null +++ b/store-py/store.py @@ -0,0 +1,39 @@ +import faiss +import numpy as np + +# Define the knowledge graph schema +entities = ['Alice', 'Bob', 'Charlie'] +relationships = [('Alice', 'friend', 'Bob'), ('Alice', 'friend', 'Charlie')] + +# Create the database schema +db = faiss.Database('knowledge_graph.db') + +db.create_table('entities', entities) +db.create_table('relationships', relationships) + +# Implement the knowledge graph embedding +model = Word2Vec(sentences=['Alice is friends with Bob and Charlie'], dim=100) + +# Store the knowledge graph in the database +for entity in entities: + db.insert('entities', entity) +for relationship in relationships: + db.insert('relationships', relationship) + +# Implement the LLM +llm = LanguageModel(model) + +# Integrate the knowledge graph embedding with the LLM +def get_entity_vector(entity): + entity_vector = np.array(db.get('entities', entity)) + return entity_vector + +def get_relationship_vector(relationship): + relationship_vector = np.array(db.get('relationships', relationship)) + return relationship_vector + +llm.add_entity_vector_fn(get_entity_vector) +llm.add_relationship_vector_fn(get_relationship_vector) + +# Test the system +llm.process('Alice is friends with Bob and Charlie') \ No newline at end of file