generated agent-1
This commit is contained in:
parent
9373d2ba3f
commit
90f4537a4c
12
.env
12
.env
@ -7,4 +7,14 @@ TTS_BACKEND_URL3=http://192.168.0.10:9008/asr #gpu
|
||||
TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu
|
||||
WS_URL=ws://localhost:8081
|
||||
SERVER_PORT_WS=8081
|
||||
SERVER_PORT_HTTP=3005
|
||||
SERVER_PORT_HTTP=3005
|
||||
|
||||
# aider
|
||||
AIDER_MODEL=
|
||||
AIDER_4=
|
||||
AIDER_35TURBO=
|
||||
|
||||
# OPENAI_API_KEY=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
|
||||
OPENAI_API_BASE=https://api.deepseek.com/v1
|
||||
OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a
|
||||
AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat
|
14
.vscode/tasks.json
vendored
14
.vscode/tasks.json
vendored
@ -74,7 +74,17 @@
|
||||
// "kind": "build",
|
||||
// "isDefault": true
|
||||
// }
|
||||
// }
|
||||
|
||||
// },
|
||||
,
|
||||
{
|
||||
"label": "Activate Conda Env, Set ENV Variable, and Open Shell",
|
||||
"type": "shell",
|
||||
"command": "bash --init-file <(echo 'source ~/miniconda3/etc/profile.d/conda.sh && conda activate aider && export OPENAI_API_KEY=xxx && aider --no-auto-commits')",
|
||||
"problemMatcher": [],
|
||||
"presentation": {
|
||||
"reveal": "always",
|
||||
"panel": "new"
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
Key features & principles:
|
||||
- modal/plug & play design
|
||||
- Biomimicing based
|
||||
- self inferencing loop
|
||||
- Graph->LLM->Graph based logic (Self reflect)
|
||||
- attention (Short term memory)
|
||||
- generalized & contextuaized memory schema (memory is strongly context dependent and temporal)
|
||||
LLM module
|
||||
Graph module
|
||||
Short term memory module
|
||||
mid-term memory (history on the toppic)
|
||||
graph powered long term memory with embedding storage for skills & AII ( interface on some of the layers)
|
||||
separate text IOs"
|
||||
- multi agent communication module/console/
|
||||
- internal state/context/mood/STM
|
||||
- actions output
|
||||
|
||||
|
||||
|
||||
GRAPH schema
|
||||
|
||||
idea
|
||||
- is child of
|
||||
|
||||
Q: Brainstorm neo4j schema for biomimicing memory storage as neo4j graph database. It should be similar to the way humans store, retrieve and generalize knowledge
|
||||
|
||||
Memory model:
|
3
agent-a/.env
Normal file
3
agent-a/.env
Normal file
@ -0,0 +1,3 @@
|
||||
NEO4J_URI="bolt://192.168.0.10:7687"
|
||||
NEO4J_USER="neo4j"
|
||||
NEO4J_PASSWORD="lucas-bicycle-powder-stretch-ford-9492"
|
0
agent-a/.gitignore
vendored
Normal file
0
agent-a/.gitignore
vendored
Normal file
0
agent-a/README.md
Normal file
0
agent-a/README.md
Normal file
0
agent-a/requirements.txt
Normal file
0
agent-a/requirements.txt
Normal file
31
agent-a/setup.sh
Normal file
31
agent-a/setup.sh
Normal file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Function to create directories
|
||||
create_directories() {
|
||||
mkdir -p ./{data/{raw,processed},notebooks,src/{agent,llm,tools,utils},tests/{agent,llm,tools,utils}}
|
||||
}
|
||||
|
||||
# Function to create files
|
||||
create_files() {
|
||||
touch ./{.gitignore,requirements.txt,README.md}
|
||||
touch ./src/{agent,llm,tools,utils}/__init__.py
|
||||
touch ./tests/{agent,llm,tools,utils}/test_{agent,llm,tool1,tool2,utils}.py
|
||||
}
|
||||
|
||||
# Function to initialize Git repository
|
||||
initialize_git() {
|
||||
echo "Do you want to initialize a Git repository? (y/n)"
|
||||
read answer
|
||||
if [ "$answer" == "y" ]; then
|
||||
git init
|
||||
echo "Git repository initialized."
|
||||
cd ..
|
||||
fi
|
||||
}
|
||||
|
||||
# Main script execution
|
||||
create_directories
|
||||
create_files
|
||||
#initialize_git
|
||||
|
||||
echo "Project setup complete."
|
0
agent-a/src/agent/__init__.py
Normal file
0
agent-a/src/agent/__init__.py
Normal file
47
agent-a/src/agent/agent.py
Normal file
47
agent-a/src/agent/agent.py
Normal file
@ -0,0 +1,47 @@
|
||||
# src/agent/agent.py
|
||||
|
||||
class Agent:
|
||||
def __init__(self):
|
||||
self.tools = [] # Initialize an empty list to store tools
|
||||
|
||||
def add_tool(self, tool):
|
||||
# Add a tool to the agent's toolbox
|
||||
self.tools.append(tool)
|
||||
|
||||
def remove_tool(self, tool):
|
||||
# Remove a tool from the agent's toolbox
|
||||
if tool in self.tools:
|
||||
self.tools.remove(tool)
|
||||
|
||||
def use_tool(self, tool, *args, **kwargs):
|
||||
# Use a tool with the agent
|
||||
if tool in self.tools:
|
||||
return tool.use(*args, **kwargs)
|
||||
else:
|
||||
return "Tool not found in agent's toolbox."
|
||||
|
||||
def explore(self):
|
||||
# Implement the logic for exploring new ideas
|
||||
pass
|
||||
|
||||
def organize(self):
|
||||
# Implement the logic for organizing knowledge
|
||||
pass
|
||||
|
||||
def improve(self):
|
||||
# Implement the logic for improving reasoning
|
||||
pass
|
||||
|
||||
def rest(self):
|
||||
# Implement the logic for resting and updating knowledge
|
||||
pass
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
agent = Agent()
|
||||
# Add tools to the agent's toolbox
|
||||
# agent.add_tool(some_tool_instance)
|
||||
|
||||
# Use a tool
|
||||
# result = agent.use_tool(some_tool_instance, some_arguments)
|
||||
# print(result)
|
0
agent-a/src/llm/__init__.py
Normal file
0
agent-a/src/llm/__init__.py
Normal file
0
agent-a/src/tools/__init__.py
Normal file
0
agent-a/src/tools/__init__.py
Normal file
0
agent-a/src/utils/__init__.py
Normal file
0
agent-a/src/utils/__init__.py
Normal file
Before Width: | Height: | Size: 1.6 KiB After Width: | Height: | Size: 1.6 KiB |
32
doc/_notes/arti/ideas.md
Normal file
32
doc/_notes/arti/ideas.md
Normal file
@ -0,0 +1,32 @@
|
||||
Key features & principles:
|
||||
- modal/plug & play design
|
||||
- Biomimicing based
|
||||
- self inferencing loop
|
||||
- Graph->LLM->Graph based logic (Self reflect)
|
||||
- attention (Short term memory)
|
||||
- generalized & contextuaized memory schema (memory is strongly context dependent and temporal)
|
||||
LLM module
|
||||
Graph module
|
||||
Short term memory module
|
||||
mid-term memory (history on the toppic)
|
||||
graph powered long term memory with embedding storage for skills & AII ( interface on some of the layers)
|
||||
separate text IOs"
|
||||
- multi agent communication module/console/
|
||||
- internal state/context/mood/STM
|
||||
- actions output
|
||||
|
||||
|
||||
|
||||
GRAPH schema
|
||||
|
||||
idea
|
||||
- is child of
|
||||
|
||||
Q: Brainstorm neo4j schema for biomimicing memory storage as neo4j graph database. It should be similar to the way humans store, retrieve and generalize knowledge
|
||||
|
||||
Memory model:
|
||||
|
||||
|
||||
-- example prompt:
|
||||
you are an AI living native on a linux host. you have all local resources and installed software at your disposal: root console access, git repo and the internet as resources. you have your own codebase and will brainstorm and find ideas how to improve your reasoning and knowledge of the world. . To avoid messing up your health, you will prefer to install new packages and test ideas inside docker or virtual envs. You can test your ideas in a docker containers and can experiment with different ways to pipleline and organise the thought process. you will also explain each approach and promote the best containers. A local neo4j database is your memory and knowledge source. Whenever you are not certain about a toppic, or need to refresh your knowledge, you will check online for updated information. If the problem concerns npm, pip or other package or an exiting repository on github or other repository server, you will first update your internal database with the latest readme and usage documentaion from the repository authors and later search stackoverflow to update your skills. periodically you will reorgnize and tidy your "mmory" making it more easily accessible and updating the embedding vectors to allow easy access to the needed context when there is a problem to solve. You will build a tools library over time that wil lgelp you solve more complex problems and keep it in your memory together with a list of task completed with each tool. you will experiment with different pipelines for different tasks, memorise them and generalize and optimize existing pipelines over time to build a solid knowhow.
|
||||
let's create the project setup in python. our main file will be the agent, and it will call the llm and other tools (maybe langchain), explore, organize and improve when 'resting'.
|
@ -11,6 +11,8 @@ class Neo4jConnection:
|
||||
# Create the schema
|
||||
self.create_schema()
|
||||
|
||||
self.test_retrieval()
|
||||
|
||||
# Close the connection
|
||||
self.close()
|
||||
|
||||
@ -21,6 +23,11 @@ class Neo4jConnection:
|
||||
with self.driver.session() as session:
|
||||
session.write_transaction(self._create_constraints_and_indexes)
|
||||
|
||||
def test_retrieval(tx):
|
||||
#run MATCH (n) RETURN n LIMIT 25
|
||||
result = tx.run("MATCH (n) RETURN n LIMIT 25;")
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _create_constraints_and_indexes(tx):
|
||||
# Constraints and indexes for Person
|
||||
|
@ -1,10 +1,3 @@
|
||||
class Person:
|
||||
def __init__(self, person_id, name, age):
|
||||
self.person_id = person_id
|
||||
self.name = name
|
||||
self.age = age
|
||||
|
||||
|
||||
class Memory:
|
||||
def __init__(self, memory_id, content, timestamp, importance, relevance, associated_tags):
|
||||
self.memory_id = memory_id
|
||||
|
12
scripts/aider.sh
Normal file
12
scripts/aider.sh
Normal file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
source ~/miniconda3/etc/profile.d/conda.sh # Adjust the path as per your Conda installation
|
||||
conda activate aider
|
||||
export OPENAI_API_KEY=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
|
||||
|
||||
|
||||
# aider --no-auto-commits
|
||||
|
||||
OPENAI_API_BASE=https://api.deepseek.com/v1
|
||||
OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a
|
||||
AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat
|
||||
aider --openai-api-base https://api.deepseek.com/v1 --openai-api-key sk-99df7736351f4536bd72cd64a416318a --model deepseek-coder
|
Loading…
x
Reference in New Issue
Block a user