diff --git a/.env b/.env index aa301cf..da5e0df 100644 --- a/.env +++ b/.env @@ -1,10 +1,27 @@ - -TTS_BACKEND_URL=http://192.168.0.10:9008/asr +TTS_BACKEND_URL=https://api.tts.d-popov.com/ +#TTS_BACKEND_URL=http://192.168.0.10:9009/asr #TTS_BACKEND_URL=http://localhost:9001/asr #gpu 9002-cpu TTS_BACKEND_URL2=http://localhost:9002/asr TTS_BACKEND_URL3=http://192.168.0.10:9008/asr #gpu -TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu +#! TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu WS_URL=ws://localhost:8081 SERVER_PORT_WS=8081 -SERVER_PORT_HTTP=3005 \ No newline at end of file +SERVER_PORT_HTTP=3005 + +# aider +AIDER_MODEL= +AIDER_4=false +#AIDER_35TURBO= + +# OPENAI_API_KEY=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 +# OPENAI_API_BASE=https://api.deepseek.com/v1 +# OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a +# AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat + + +GROQ_API_KEY=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 +aider --model groq/llama3-70b-8192 + +# List models available from Groq +aider --models groq/ \ No newline at end of file diff --git a/.env.demo b/.env.demo index 19023d3..33a5178 100644 --- a/.env.demo +++ b/.env.demo @@ -1,6 +1,6 @@ -TTS_BACKEND_URL=http://192.168.0.10:9008/asr -WS_URL=ws://192.168.0.10:9008:8081 -SERVER_PORT_WS=8081 -SERVER_PORT_HTTP=8080 \ No newline at end of file +# TTS_BACKEND_URL=http://192.168.0.10:9008/asr +# WS_URL=ws://192.168.0.10:9008 +# SERVER_PORT_WS=8081 +# SERVER_PORT_HTTP=8080 \ No newline at end of file diff --git a/.env.development b/.env.development new file mode 100644 index 0000000..d065202 --- /dev/null +++ b/.env.development @@ -0,0 +1,16 @@ + +ENV_NAME=development +TTS_API_URL=https://api.tts.d-popov.com/asr + +# LLN_MODEL=qwen2 +# LNN_API_URL=https://ollama.d-popov.com/api/generate + +LLN_MODEL=qwen2 +LNN_API_URL=https://ollama.d-popov.com/api/generate + +GROQ_API_KEY=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 +OPENAI_API_KEY=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 + +WS_URL=ws://localhost:8081 +SERVER_PORT_WS=8081 +SERVER_PORT_HTTP=8080 diff --git a/.env.production b/.env.production index f3f5ef2..decab52 100644 --- a/.env.production +++ b/.env.production @@ -2,7 +2,7 @@ TTS_BACKEND_URL=http://localhost:9001/asr #gpu 9002-cpu TTS_BACKEND_URL2=http://localhost:9002/asr TTS_BACKEND_URL3=http://192.168.0.10:9008/asr #gpu -TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu +#! TTS_BACKEND_URL4=http://192.168.0.10:9009/asr #cpu 9008-gpu WS_URL=ws://localhost:8081 SERVER_PORT_WS=8081 SERVER_PORT_HTTP=8080 diff --git a/.gitignore b/.gitignore index ef81834..b6c8ffa 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ agent-py-bot/scrape/raw/* tts/*.m4a agent-mobile/jdk/* agent-mobile/artimobile/supervisord.pid +agent-pyter/lag-llama +agent-pyter/google-chrome-stable_current_amd64.deb +web/.node-persist/* diff --git a/.vscode/launch.json b/.vscode/launch.json index d1eed27..7c011b9 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,48 +1,80 @@ { - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ - { - "name": "Docker Node.js Launch", - "type": "docker", - "request": "launch", - "preLaunchTask": "docker-run: debug", - "platform": "node" - }, - { - "name": "Docker Python Launch?", - "type": "python", - "request": "launch", - "program": "${workspaceFolder}/agent-py-bot/agent.py", - "console": "integratedTerminal", - // "python": "${command:python.interpreterPath}", // Assumes Python extension is installed - // "preLaunchTask": "docker-run: python-debug", // You may need to create this task - // "env": { - // "PYTHONUNBUFFERED": "1" - // } - }, + // { + // "name": "Docker Node.js Launch", + // "type": "docker", + // "request": "launch", + // "preLaunchTask": "docker-run: debug", + // "platform": "node" + // }, + // { + // "name": "Docker Python Launch?", + // "type": "python", + // "request": "launch", + // "program": "${workspaceFolder}/agent-py-bot/agent.py", + // "console": "integratedTerminal" + // }, { "name": "Docker Python Launch with venv", "type": "debugpy", "request": "launch", "program": "${workspaceFolder}/agent-py-bot/agent.py", "console": "integratedTerminal", - "python": "/venv/bin/python", // Path to the Python interpreter in your venv + "python": "/venv/bin/python", "env": { "PYTHONUNBUFFERED": "1" } }, { - "name": "node: Launch server.js", + "name": "Launch chat-server.js", "type": "node", "request": "launch", - "program": "conda activate node && node web/server.js", + "program": "${workspaceFolder}/web/chat-server.js", + "console": "integratedTerminal", + "internalConsoleOptions": "neverOpen", + "env": { + "CONDA_ENV": "node", //? + "NODE_ENV": "development" + }, "skipFiles": [ "/**" ] - + }, + { + "name": "Launch server.js", + "type": "node", + "request": "launch", + // "program": "conda activate node && ${workspaceFolder}/web/server.js", + "program": "${workspaceFolder}/web/server.js", + "console": "integratedTerminal", + "internalConsoleOptions": "neverOpen", + "env": { + "CONDA_ENV": "node", //? + "NODE_ENV": "development" + }, + "skipFiles": [ + "/**" + ] + }, + { + "name": "Python Debugger: Python File", + "type": "debugpy", + "request": "launch", + "program": "${file}" + }, + { + "name": "Python Debugger: Python File with Conda", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal", + //"python": "${command:python.interpreterPath}", + "python": "/config/miniconda3/envs/py/bin/python", + "presentation": { + "clear": true + }, + //"preLaunchTask": "conda-activate" // Name of your pre-launch task } ] } \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 2de4d30..e3c705c 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -1,24 +1,24 @@ { "version": "2.0.0", "tasks": [ - { - "type": "docker-build", - "label": "docker-build", - "platform": "node", - "dockerBuild": { - "dockerfile": "${workspaceFolder}/Dockerfile", - "context": "${workspaceFolder}", - "pull": true - } - }, - { - "type": "docker-run", - "label": "docker-run: release", - "dependsOn": [ - "docker-build" - ], - "platform": "node" - }, + // { + // "type": "docker-build", + // "label": "docker-build", + // "platform": "node", + // "dockerBuild": { + // "dockerfile": "${workspaceFolder}/Dockerfile", + // "context": "${workspaceFolder}", + // "pull": true + // } + // }, + // { + // "type": "docker-run", + // "label": "docker-run: release", + // "dependsOn": [ + // "docker-build" + // ], + // "platform": "node" + // }, // { // "type": "docker-run", // "label": "docker-run: debug2", @@ -74,7 +74,31 @@ // "kind": "build", // "isDefault": true // } - // } - - ] + // }, + , + { + "label": "Activate Conda Env, Set ENV Variable, and Open Shell", + "type": "shell", + "command": "bash --init-file <(echo 'source ~/miniconda3/etc/profile.d/conda.sh && conda activate aider && export OPENAI_API_KEY=xxx && aider --no-auto-commits')", + "problemMatcher": [], + "presentation": { + "reveal": "always", + "panel": "new" + }, + }, + { + "label": "conda-activate", + "type": "shell", + "command": "source ~/miniconda3/etc/profile.d/conda.sh && conda activate ${input:condaEnv} && echo 'Activated Conda Environment (${input:condaEnv})!'", + "problemMatcher": [], + } + ], + "inputs": [ + { + "id": "condaEnv", + "type": "promptString", + "description": "Enter the Conda environment name", + "default": "py" + } + ] } \ No newline at end of file diff --git a/_containers/devika/log.txt b/_containers/devika/log.txt new file mode 100644 index 0000000..0da4d76 --- /dev/null +++ b/_containers/devika/log.txt @@ -0,0 +1,19 @@ +time=2024-04-22T08:11:02.350Z level=DEBUG source=routes.go:249 msg="generate handler" prompt="```\n\tWe use optional cookies to improve your experience on our websites, such as through social media connections, and to display personalized advertising based on your online activity. If you reject optional cookies, only cookies necessary to provide you the services will be used. You may change your selection by clicking “Manage Cookies” at the bottom of the page. Privacy Statement Third-Party Cookies\nAccept Reject Manage cookies\nSkip to main content\nAzure\nExplore\nProducts\nSolutions\nPricing\nPartners\nResources\nLearn\nSupport\nContact Sales\nTry Azure for free\nSign in\nMicrosoft Azure portal\n\nBuild, manage, and monitor everything from simple web apps to complex cloud applications in a single, unified console\n\nSign in New to Azure? Start free\nCheck out the how-to video series for tips on deploying your cloud workloads from the Azure portal.\nAzure mobile app\n\nStay connected to your Azure resources—anytime, anywhere. Now available for iOS and Android.\n\nLearn more\nView one portal, manage all your apps\n\nView and manage all of your applications in one unified hub—including web apps, databases, virtual machines, virtual networks, storage, and Visual Studio team projects. Enjoy the flexibility of using the Azure portal's graphical experience or the integrated command-line experience provided by\u00a0Cloud Shell.\u00a0Get an overview, and\u00a0see which Azure services can be managed with the Azure portal.\n\nPersonalize your experience\n\nImagine a single easy-to-use console built just for you, your team, and your projects. This unified hub significantly simplifies building, deploying, and managing your cloud resources. Organize your portal to custom-fit your work and your work style. Stay on top of the things that matter most by pinning them to your dashboard. Resize tiles to show just the right amount of detail, and share insights across apps and resources.\n\nUse fine-grained access control\n\nRole-based access control lets you select exactly who can manage what. Grant explicit management and access rights to the account, service, and operation levels for individuals and groups.\n\nCombine services to create amazingly powerful applications\n\nChoose from more than 3,000 services delivered by Microsoft and our partners, many of them at no charge. Find open source applications, frameworks, and templates, as well as single and multiple virtual machine images. When you combine these with Azure services, you can create powerful solutions on your own faster and with less effort—for any device and any cloud—even\u00a0manually or automatically scaling instance count\u00a0to meet demand. Plus, see all your service usage on a single bill.\n\nGain visibility with no more billing blind spots\n\nNow it’s easy to keep tabs on current and projected costs. The Azure portal automatically calculates your existing charges and forecasts your likely monthly charges—even if you’re managing hundreds of resources across several apps. Plus, it’s easy to see your cross-cloud analytics and real-time active use data when you\u00a0enable monitoring and diagnostics\u00a0and\u00a0monitor service metrics, which helps you avoid billing surprises.\n\nGet integrated support when and where you need it\n\nWhether you need support because of an\u00a0alert notification\u00a0or you notice issues when you\u00a0view events and audit logs, help is only a click away. Microsoft provides the full range of resources to help you get started and grow, including access to our communities and forums, specific troubleshooting information, and direct support from a world-class Azure support representative. Plus, you have access to a direct feedback channel for the Azure product team.\n\nSee how the Azure portal simplifies using the cloud\nSign in\nNew to Azure?\nStart free\nAdditional Navigation\nGet the Azure mobile app\nExplore Azure\nWhat is Azure?\nGet started\nGlobal infrastructure\nDatacenter regions\nTrust your cloud\nCustomer enablement\nCustomer stories\nProducts and pricing\nProducts\nPricing\nFree Azure services\nFlexible purchase options\nCloud economics\nOptimize your costs\nSolutions and support\nSolutions\nResources for accelerating growth\nSolution architectures\nSupport\nAzure demo and live Q&A\nPartners\nAzure Marketplace\nFind a partner\nJoin ISV Success\nResources\nTraining and certifications\nDocumentation\nBlog\nDeveloper resources\nStudents\nEvents and webinars\nAnalyst reports, white papers, and e-books\nVideos\nCloud computing\nWhat is cloud computing?\nWhat is cloud migration?\nWhat is a hybrid cloud?\nWhat is AI?\nWhat is IaaS?\nWhat is SaaS?\nWhat is PaaS?\nWhat is DevOps?\nChange language\nEnglish (US)\nČeština\nDansk\nDeutsch\nEnglish (Australia)\nEnglish (Canada)\nEnglish (India)\nEnglish (UK)\nEspañol\nEspañol (MX)\nFrançais\nFrançais (Canada)\nIndonesia\nItaliano\nMagyar\nNorsk\nNederlands\nPolski\nPortuguês (Brasil)\nPortuguês\nSvenska\nTürkçe\nPусский\n日本語\n한국어\n中文(简体)\n中文(繁體)\nYour Privacy Choices\nConsumer Health Privacy\nDiversity and Inclusion\nAccessibility\nPrivacy & Cookies\nData Protection Notice\nTrademarks\nTerms of use\nPrivacy Data Management\nManage cookies\nContact us\nFeedback\nSitemap\n© Microsoft 2024\n```\n\nYou are provided with a raw extracted text from a PDF render of a web page. This web page could be a blog, documentation, or any other type of web page.\n\nYour task is to format the text in a way that is easy to read and understand and include more detail.\n\nYou are essentially a RAW text to clean Markdown convertor. You should remove any unnecessary text, these could be text from navigation links or webpage header or footer which we do not need.\n\nIf it's a documentation with code, try to focus more on the code examples and the explanation of the code, make your responses short to save context window.\n\nYou should only respond with the formatted text in markdown format and nothing else. Start your response with \"```\" and end with \"```\"." + +[GIN] 2024/04/22 - 08:10:57 | 200 | 9.325250288s | 92.247.170.10 | POST "/api/generate" + +time=2024-04-22T08:11:02.351Z level=DEBUG source=routes.go:282 msg="generate handler" prompt="<|im_start|>system\nYou are Dolphin, a helpful AI assistant.\n<|im_end|>\n<|im_start|>user\n```\n\tWe use optional cookies to improve your experience on our websites, such as through social media connections, and to display personalized advertising based on your online activity. If you reject optional cookies, only cookies necessary to provide you the services will be used. You may change your selection by clicking “Manage Cookies” at the bottom of the page. Privacy Statement Third-Party Cookies\nAccept Reject Manage cookies\nSkip to main content\nAzure\nExplore\nProducts\nSolutions\nPricing\nPartners\nResources\nLearn\nSupport\nContact Sales\nTry Azure for free\nSign in\nMicrosoft Azure portal\n\nBuild, manage, and monitor everything from simple web apps to complex cloud applications in a single, unified console\n\nSign in New to Azure? Start free\nCheck out the how-to video series for tips on deploying your cloud workloads from the Azure portal.\nAzure mobile app\n\nStay connected to your Azure resources—anytime, anywhere. Now available for iOS and Android.\n\nLearn more\nView one portal, manage all your apps\n\nView and manage all of your applications in one unified hub—including web apps, databases, virtual machines, virtual networks, storage, and Visual Studio team projects. Enjoy the flexibility of using the Azure portal's graphical experience or the integrated command-line experience provided by\u00a0Cloud Shell.\u00a0Get an overview, and\u00a0see which Azure services can be managed with the Azure portal.\n\nPersonalize your experience\n\nImagine a single easy-to-use console built just for you, your team, and your projects. This unified hub significantly simplifies building, deploying, and managing your cloud resources. Organize your portal to custom-fit your work and your work style. Stay on top of the things that matter most by pinning them to your dashboard. Resize tiles to show just the right amount of detail, and share insights across apps and resources.\n\nUse fine-grained access control\n\nRole-based access control lets you select exactly who can manage what. Grant explicit management and access rights to the account, service, and operation levels for individuals and groups.\n\nCombine services to create amazingly powerful applications\n\nChoose from more than 3,000 services delivered by Microsoft and our partners, many of them at no charge. Find open source applications, frameworks, and templates, as well as single and multiple virtual machine images. When you combine these with Azure services, you can create powerful solutions on your own faster and with less effort—for any device and any cloud—even\u00a0manually or automatically scaling instance count\u00a0to meet demand. Plus, see all your service usage on a single bill.\n\nGain visibility with no more billing blind spots\n\nNow it’s easy to keep tabs on current and projected costs. The Azure portal automatically calculates your existing charges and forecasts your likely monthly charges—even if you’re managing hundreds of resources across several apps. Plus, it’s easy to see your cross-cloud analytics and real-time active use data when you\u00a0enable monitoring and diagnostics\u00a0and\u00a0monitor service metrics, which helps you avoid billing surprises.\n\nGet integrated support when and where you need it\n\nWhether you need support because of an\u00a0alert notification\u00a0or you notice issues when you\u00a0view events and audit logs, help is only a click away. Microsoft provides the full range of resources to help you get started and grow, including access to our communities and forums, specific troubleshooting information, and direct support from a world-class Azure support representative. Plus, you have access to a direct feedback channel for the Azure product team.\n\nSee how the Azure portal simplifies using the cloud\nSign in\nNew to Azure?\nStart free\nAdditional Navigation\nGet the Azure mobile app\nExplore Azure\nWhat is Azure?\nGet started\nGlobal infrastructure\nDatacenter regions\nTrust your cloud\nCustomer enablement\nCustomer stories\nProducts and pricing\nProducts\nPricing\nFree Azure services\nFlexible purchase options\nCloud economics\nOptimize your costs\nSolutions and support\nSolutions\nResources for accelerating growth\nSolution architectures\nSupport\nAzure demo and live Q&A\nPartners\nAzure Marketplace\nFind a partner\nJoin ISV Success\nResources\nTraining and certifications\nDocumentation\nBlog\nDeveloper resources\nStudents\nEvents and webinars\nAnalyst reports, white papers, and e-books\nVideos\nCloud computing\nWhat is cloud computing?\nWhat is cloud migration?\nWhat is a hybrid cloud?\nWhat is AI?\nWhat is IaaS?\nWhat is SaaS?\nWhat is PaaS?\nWhat is DevOps?\nChange language\nEnglish (US)\nČeština\nDansk\nDeutsch\nEnglish (Australia)\nEnglish (Canada)\nEnglish (India)\nEnglish (UK)\nEspañol\nEspañol (MX)\nFrançais\nFrançais (Canada)\nIndonesia\nItaliano\nMagyar\nNorsk\nNederlands\nPolski\nPortuguês (Brasil)\nPortuguês\nSvenska\nTürkçe\nPусский\n日本語\n한국어\n中文(简体)\n中文(繁體)\nYour Privacy Choices\nConsumer Health Privacy\nDiversity and Inclusion\nAccessibility\nPrivacy & Cookies\nData Protection Notice\nTrademarks\nTerms of use\nPrivacy Data Management\nManage cookies\nContact us\nFeedback\nSitemap\n© Microsoft 2024\n```\n\nYou are provided with a raw extracted text from a PDF render of a web page. This web page could be a blog, documentation, or any other type of web page.\n\nYour task is to format the text in a way that is easy to read and understand and include more detail.\n\nYou are essentially a RAW text to clean Markdown convertor. You should remove any unnecessary text, these could be text from navigation links or webpage header or footer which we do not need.\n\nIf it's a documentation with code, try to focus more on the code examples and the explanation of the code, make your responses short to save context window.\n\nYou should only respond with the formatted text in markdown format and nothing else. Start your response with \"```\" and end with \"```\".<|im_end|>\n<|im_start|>assistant\n" + +[1713773448] next result cancel on stop + +time=2024-04-22T08:10:33.212Z level=DEBUG source=routes.go:249 msg="generate handler" prompt="For the provided step-by-step plan, write all the necessary search queries to gather information from the web that the base model doesn't already know.\n\nWrite optimized search queries for each step of the plan, just like how you would write a Google search query. Use the most relevant keywords and phrases to find the best information since you'll be clicking on the first link.\n\nalso only ask for information if you think it's necessary, otherwise leave ask_user field empty.\n\nStep-by-Step Plan:\nProject Name: Azure Portal Login Plan\nYour Reply to the Human Prompter: I am creating a step-by-step plan to login to portal.azure.com as per your request.\nCurrent Focus: The main objective is to log in to the Azure Portal.\nPlan:\n- [ ] Step 1: Open a web browser and navigate to portal.azure.com.\n- [ ] Step 2: Enter your Microsoft account email address and password.\n- [ ] Step 3: Click on the 'Sign In' button to authenticate and access the Azure Portal.\nSummary: The plan involves opening a web browser, navigating to the Azure Portal website, entering the user's credentials, and clicking on the sign-in button. This should allow for successful login and access to the portal.\n\nOnly respond in the following JSON format:\n\n```\n{\n \"queries\": [\n \"\",\n \"\"\n ],\n \"ask_user\": \"\"\n}\n```\n\nKeywords for Search Query: Azure, Portal, Objective, Log, Main\n\nExample \"queries\": [\"How to do Bing Search via API in Python\", \"Claude API Documentation Python\"]\nExample \"ask_user\": \"Can you please provide API Keys for Claude, OpenAI, and Firebase?\"\n\nRules:\n- Only search for a maximum of 3 queries.\n- Do not search anything that you already know (In your training data, in the base model). For example: You already know how to write a Python flask web server, it is in your data, so you shouldn't search how to do that.\n- Do not search for information that is not relevant to the task at hand.\n- Try to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n- Only search for documentation, do not search basic how tos. Forbidden Queries: How to install XYZ, How to setup ABC, etc.\n- Do not search for basic queries, only search for advanced and specific queries. You are allowed to leave the \"queries\" field empty if no search queries are needed for the step.\n- DO NOT EVER SEARCH FOR BASIC QUERIES. ONLY SEARCH FOR ADVANCED QUERIES.\n- YOU ARE ALLOWED TO LEAVE THE \"queries\" FIELD EMPTY IF NO SEARCH QUERIES ARE NEEDED FOR THE STEP.\n\nRemember to only make search queries for resources that might require external information (like Documentation or a Blog or an Article). If the information is already known to you or commonly known, there is no need to search for it.\n\nThe `queries` key and the `ask_user` key can be empty list and string respectively if no search queries or user input are needed for the step. Try to keep the number of search queries to a minimum to save context window. One query per subject.\n\nOnly search for documentation or articles that are relevant to the task at hand. Do not search for general information.\n\nTry to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n\nOnly the provided JSON response format is accepted. Any other response format will be rejected." + +time=2024-04-22T08:10:23.269Z level=DEBUG source=routes.go:249 msg="generate handler" prompt="You are Devika, an AI Software Engineer.\n\nOne of your AI agent module is currently working through the following prompt:\n\n```\nProject Name: Azure Portal Login Plan\nYour Reply to the Human Prompter: I am creating a step-by-step plan to login to portal.azure.com as per your request.\nCurrent Focus: The main objective is to log in to the Azure Portal.\nPlan:\n- [ ] Step 1: Open a web browser and navigate to portal.azure.com.\n- [ ] Step 2: Enter your Microsoft account email address and password.\n- [ ] Step 3: Click on the 'Sign In' button to authenticate and access the Azure Portal.\nSummary: The plan involves opening a web browser, navigating to the Azure Portal website, entering the user's credentials, and clicking on the sign-in button. This should allow for successful login and access to the portal.\n```\n\nTo show the user what you're thinking about or doing, respond with a short human-like response verbalizing your internal monologue.\n\nYour response should be in the following JSON format:\n\n```\n{\n \"internal_monologue\": \"\"\n}\n```\n\nTIP: Make the internal monologue very human-like and conversational. It should be very short and concise.\n\nOnly the provided JSON response format is accepted. Any other response format will be rejected." + +time=2024-04-22T08:10:23.269Z level=DEBUG source=routes.go:282 msg="generate handler" prompt="<|im_start|>system\nYou are Dolphin, a helpful AI assistant.\n<|im_end|>\n<|im_start|>user\nYou are Devika, an AI Software Engineer.\n\nOne of your AI agent module is currently working through the following prompt:\n\n```\nProject Name: Azure Portal Login Plan\nYour Reply to the Human Prompter: I am creating a step-by-step plan to login to portal.azure.com as per your request.\nCurrent Focus: The main objective is to log in to the Azure Portal.\nPlan:\n- [ ] Step 1: Open a web browser and navigate to portal.azure.com.\n- [ ] Step 2: Enter your Microsoft account email address and password.\n- [ ] Step 3: Click on the 'Sign In' button to authenticate and access the Azure Portal.\nSummary: The plan involves opening a web browser, navigating to the Azure Portal website, entering the user's credentials, and clicking on the sign-in button. This should allow for successful login and access to the portal.\n```\n\nTo show the user what you're thinking about or doing, respond with a short human-like response verbalizing your internal monologue.\n\nYour response should be in the following JSON format:\n\n```\n{\n \"internal_monologue\": \"\"\n}\n```\n\nTIP: Make the internal monologue very human-like and conversational. It should be very short and concise.\n\nOnly the provided JSON response format is accepted. Any other response format will be rejected.<|im_end|>\n<|im_start|>assistant\n" + +time=2024-04-22T08:10:33.212Z level=DEBUG source=routes.go:282 msg="generate handler" prompt="<|im_start|>system\nYou are Dolphin, a helpful AI assistant.\n<|im_end|>\n<|im_start|>user\nFor the provided step-by-step plan, write all the necessary search queries to gather information from the web that the base model doesn't already know.\n\nWrite optimized search queries for each step of the plan, just like how you would write a Google search query. Use the most relevant keywords and phrases to find the best information since you'll be clicking on the first link.\n\nalso only ask for information if you think it's necessary, otherwise leave ask_user field empty.\n\nStep-by-Step Plan:\nProject Name: Azure Portal Login Plan\nYour Reply to the Human Prompter: I am creating a step-by-step plan to login to portal.azure.com as per your request.\nCurrent Focus: The main objective is to log in to the Azure Portal.\nPlan:\n- [ ] Step 1: Open a web browser and navigate to portal.azure.com.\n- [ ] Step 2: Enter your Microsoft account email address and password.\n- [ ] Step 3: Click on the 'Sign In' button to authenticate and access the Azure Portal.\nSummary: The plan involves opening a web browser, navigating to the Azure Portal website, entering the user's credentials, and clicking on the sign-in button. This should allow for successful login and access to the portal.\n\nOnly respond in the following JSON format:\n\n```\n{\n \"queries\": [\n \"\",\n \"\"\n ],\n \"ask_user\": \"\"\n}\n```\n\nKeywords for Search Query: Azure, Portal, Objective, Log, Main\n\nExample \"queries\": [\"How to do Bing Search via API in Python\", \"Claude API Documentation Python\"]\nExample \"ask_user\": \"Can you please provide API Keys for Claude, OpenAI, and Firebase?\"\n\nRules:\n- Only search for a maximum of 3 queries.\n- Do not search anything that you already know (In your training data, in the base model). For example: You already know how to write a Python flask web server, it is in your data, so you shouldn't search how to do that.\n- Do not search for information that is not relevant to the task at hand.\n- Try to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n- Only search for documentation, do not search basic how tos. Forbidden Queries: How to install XYZ, How to setup ABC, etc.\n- Do not search for basic queries, only search for advanced and specific queries. You are allowed to leave the \"queries\" field empty if no search queries are needed for the step.\n- DO NOT EVER SEARCH FOR BASIC QUERIES. ONLY SEARCH FOR ADVANCED QUERIES.\n- YOU ARE ALLOWED TO LEAVE THE \"queries\" FIELD EMPTY IF NO SEARCH QUERIES ARE NEEDED FOR THE STEP.\n\nRemember to only make search queries for resources that might require external information (like Documentation or a Blog or an Article). If the information is already known to you or commonly known, there is no need to search for it.\n\nThe `queries` key and the `ask_user` key can be empty list and string respectively if no search queries or user input are needed for the step. Try to keep the number of search queries to a minimum to save context window. One query per subject.\n\nOnly search for documentation or articles that are relevant to the task at hand. Do not search for general information.\n\nTry to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n\nOnly the provided JSON response format is accepted. Any other response format will be rejected.<|im_end|>\n<|im_start|>assistant\n" + +time=2024-04-22T08:10:48.218Z level=DEBUG source=routes.go:249 msg="generate handler" prompt="For the provided step-by-step plan, write all the necessary search queries to gather information from the web that the base model doesn't already know.\n\nWrite optimized search queries for each step of the plan, just like how you would write a Google search query. Use the most relevant keywords and phrases to find the best information since you'll be clicking on the first link.\n\nalso only ask for information if you think it's necessary, otherwise leave ask_user field empty.\n\nStep-by-Step Plan:\nProject Name: Azure Portal Login Plan\nYour Reply to the Human Prompter: I am creating a step-by-step plan to login to portal.azure.com as per your request.\nCurrent Focus: The main objective is to log in to the Azure Portal.\nPlan:\n- [ ] Step 1: Open a web browser and navigate to portal.azure.com.\n- [ ] Step 2: Enter your Microsoft account email address and password.\n- [ ] Step 3: Click on the 'Sign In' button to authenticate and access the Azure Portal.\nSummary: The plan involves opening a web browser, navigating to the Azure Portal website, entering the user's credentials, and clicking on the sign-in button. This should allow for successful login and access to the portal.\n\nOnly respond in the following JSON format:\n\n```\n{\n \"queries\": [\n \"\",\n \"\"\n ],\n \"ask_user\": \"\"\n}\n```\n\nKeywords for Search Query: Azure, Portal, Objective, Log, Main\n\nExample \"queries\": [\"How to do Bing Search via API in Python\", \"Claude API Documentation Python\"]\nExample \"ask_user\": \"Can you please provide API Keys for Claude, OpenAI, and Firebase?\"\n\nRules:\n- Only search for a maximum of 3 queries.\n- Do not search anything that you already know (In your training data, in the base model). For example: You already know how to write a Python flask web server, it is in your data, so you shouldn't search how to do that.\n- Do not search for information that is not relevant to the task at hand.\n- Try to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n- Only search for documentation, do not search basic how tos. Forbidden Queries: How to install XYZ, How to setup ABC, etc.\n- Do not search for basic queries, only search for advanced and specific queries. You are allowed to leave the \"queries\" field empty if no search queries are needed for the step.\n- DO NOT EVER SEARCH FOR BASIC QUERIES. ONLY SEARCH FOR ADVANCED QUERIES.\n- YOU ARE ALLOWED TO LEAVE THE \"queries\" FIELD EMPTY IF NO SEARCH QUERIES ARE NEEDED FOR THE STEP.\n\nRemember to only make search queries for resources that might require external information (like Documentation or a Blog or an Article). If the information is already known to you or commonly known, there is no need to search for it.\n\nThe `queries` key and the `ask_user` key can be empty list and string respectively if no search queries or user input are needed for the step. Try to keep the number of search queries to a minimum to save context window. One query per subject.\n\nOnly search for documentation or articles that are relevant to the task at hand. Do not search for general information.\n\nTry to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n\nOnly the provided JSON response format is accepted. Any other response format will be rejected." + +time=2024-04-22T08:10:48.218Z level=DEBUG source=routes.go:282 msg="generate handler" prompt="<|im_start|>system\nYou are Dolphin, a helpful AI assistant.\n<|im_end|>\n<|im_start|>user\nFor the provided step-by-step plan, write all the necessary search queries to gather information from the web that the base model doesn't already know.\n\nWrite optimized search queries for each step of the plan, just like how you would write a Google search query. Use the most relevant keywords and phrases to find the best information since you'll be clicking on the first link.\n\nalso only ask for information if you think it's necessary, otherwise leave ask_user field empty.\n\nStep-by-Step Plan:\nProject Name: Azure Portal Login Plan\nYour Reply to the Human Prompter: I am creating a step-by-step plan to login to portal.azure.com as per your request.\nCurrent Focus: The main objective is to log in to the Azure Portal.\nPlan:\n- [ ] Step 1: Open a web browser and navigate to portal.azure.com.\n- [ ] Step 2: Enter your Microsoft account email address and password.\n- [ ] Step 3: Click on the 'Sign In' button to authenticate and access the Azure Portal.\nSummary: The plan involves opening a web browser, navigating to the Azure Portal website, entering the user's credentials, and clicking on the sign-in button. This should allow for successful login and access to the portal.\n\nOnly respond in the following JSON format:\n\n```\n{\n \"queries\": [\n \"\",\n \"\"\n ],\n \"ask_user\": \"\"\n}\n```\n\nKeywords for Search Query: Azure, Portal, Objective, Log, Main\n\nExample \"queries\": [\"How to do Bing Search via API in Python\", \"Claude API Documentation Python\"]\nExample \"ask_user\": \"Can you please provide API Keys for Claude, OpenAI, and Firebase?\"\n\nRules:\n- Only search for a maximum of 3 queries.\n- Do not search anything that you already know (In your training data, in the base model). For example: You already know how to write a Python flask web server, it is in your data, so you shouldn't search how to do that.\n- Do not search for information that is not relevant to the task at hand.\n- Try to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n- Only search for documentation, do not search basic how tos. Forbidden Queries: How to install XYZ, How to setup ABC, etc.\n- Do not search for basic queries, only search for advanced and specific queries. You are allowed to leave the \"queries\" field empty if no search queries are needed for the step.\n- DO NOT EVER SEARCH FOR BASIC QUERIES. ONLY SEARCH FOR ADVANCED QUERIES.\n- YOU ARE ALLOWED TO LEAVE THE \"queries\" FIELD EMPTY IF NO SEARCH QUERIES ARE NEEDED FOR THE STEP.\n\nRemember to only make search queries for resources that might require external information (like Documentation or a Blog or an Article). If the information is already known to you or commonly known, there is no need to search for it.\n\nThe `queries` key and the `ask_user` key can be empty list and string respectively if no search queries or user input are needed for the step. Try to keep the number of search queries to a minimum to save context window. One query per subject.\n\nOnly search for documentation or articles that are relevant to the task at hand. Do not search for general information.\n\nTry to include contextual keywords into your search queries, adding relevant keywords and phrases to make the search queries as specific as possible.\n\nOnly the provided JSON response format is accepted. Any other response format will be rejected.<|im_end|>\n<|im_start|>assistant\n" diff --git a/_containers/devika/prompts.md b/_containers/devika/prompts.md new file mode 100644 index 0000000..bd3f108 --- /dev/null +++ b/_containers/devika/prompts.md @@ -0,0 +1 @@ +create a python app that will watch for new pairs on https://www.dextools.io/app/en/pairs (we need to crawl that with JS enabled to pull the actual content) and provide option to execute a HTTP request to an API to inform about the new pair diff --git a/_containers/devika/setup.gist b/_containers/devika/setup.gist new file mode 100644 index 0000000..da40263 --- /dev/null +++ b/_containers/devika/setup.gist @@ -0,0 +1,37 @@ +git clone https://github.com/stitionai/devika.git +conda create -n devika python=3.10 +conda activate devika +which python +/config/miniconda3/envs/devika/bin/python -m pip install -r requirements.txt + +fix browser issue +#apt --fix-broken install +#sudo apt-get update +#sudo apt-get install libnss3 + + +/ui#>? +playwright install --with-deps +npm install +bun run dev +open new terminal +conda activate devika +rename sample.config.toml +fill out config.toml + +/ui#> + bun run preview --host --port 3000 + SET VITE_API_BASE_URL=https://api.dev.d-popov.com + bun run dev --host + +# RUN: + /#>conda activate devika && python3 devika.py --debug + /#>conda activate node && npx bun run dev --host + + + + TOML: + + google search: + https://developers.google.com/custom-search/v1/introduction + https://programmablesearchengine.google.com/controlpanel/overview?cx=0382a4a0cfd6745b7 \ No newline at end of file diff --git a/_containers/lmstudio/dockerfile b/_containers/lmstudio/dockerfile new file mode 100644 index 0000000..ba472d6 --- /dev/null +++ b/_containers/lmstudio/dockerfile @@ -0,0 +1,87 @@ +FROM ollama/ollama +# FROM ubuntu:20.04 + +# Avoid prompts from apt during build +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y \ + wget \ + x11vnc \ + xvfb \ + net-tools \ + git \ + python3 \ + python3-numpy \ + novnc + +# Create the /app directory to hold the application +WORKDIR /app + + +# https://medium.com/@renswunnink/why-i-love-appimage-for-linux-distros-924769510ec5 +RUN wget -O lmstudio.AppImage "https://releases.lmstudio.ai/linux/0.2.18/beta/LM_Studio-0.2.18.AppImage" && \ + chmod u+x lmstudio.AppImage && \ + ./lmstudio.AppImage --appimage-extract && \ + rm lmstudio.AppImage && \ + mv squashfs-root lmstudio + +# or adddependencies on another layer +RUN apt-get update && apt-get install -y \ + dbus pciutils \ + libglib2.0-0 \ + libnss3 \ + libgbm1 \ + libxshmfence1 \ + libgl1-mesa-glx \ + libegl1-mesa \ + libatk1.0-0 \ + libatk-bridge2.0-0 \ + libgtk-3-0 \ + libasound2 \ + && rm -rf /var/lib/apt/lists/* + +# Setup a virtual display environment with XVFB +RUN Xvfb :99 -screen 0 1024x768x16 & +ENV DISPLAY=:99 +EXPOSE 8080 +# Expose port 5980 for noVNC +EXPOSE 5980 + +RUN ln -s /app/lmstudio/lm-studio /lm-studio + +#? RUN chmod +x /app/lmstudio/AppRun +# Create a startup script to run Xvfb and your application +# Create a startup script +RUN echo '#!/bin/bash\n\ +Xvfb :99 -screen 0 1024x768x16 &\n\ +export DISPLAY=:99\n\ +# Start X11VNC\n\ +x11vnc -display :99 -nopw -listen localhost -xkb -forever &\n\ +# Start noVNC\n\ +/opt/noVNC/utils/launch.sh --vnc localhost:5900 --listen 5980\n\ +# Start the application\n\ +exec /app/lmstudio/AppRun --no-sandbox\n\ +' > /start-app.sh && chmod +x /start-app.sh + +CMD ["/start-app.sh"] + +#> apt-get update && apt-get install -y git x11vnc +#> git clone https://github.com/novnc/noVNC.git /opt/noVNC +#? x11vnc -display :99 -nopw -listen localhost -xkb -forever & + + + +# Run LM Studio (assumes LM Studio can be run headlessly or in a server mode) +#CMD ["./lmstudio/AppRun"] +#CMD ["./lmstudio/AppRun", "--no-sandbox"] +#CMD ["/bin/bash"] # interactive shell + + +# build: docker build -t llmstudio . +# run: docker run (-dit) -p 8980:8080 llmstudio +# docker build -t llmstudio . && docker run -it -p 8980:8080 llmstudio + +# cd /mnt/storage/DEV/workspace/repos/git.d-popov.com/ai-kevin/lmstudio/ +# docker run --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all -it llmstudio +# docker build -t llmstudio . && docker run -dit -p 8980:8080 --volume /var/run/dbus:/var/run/dbus llmstudio +# docker build -t llmstudio . && docker run -it -p 8980:8080 --volume /var/run/dbus:/var/run/dbus --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all --security-opt apparmor=unconfined llmstudio diff --git a/_containers/opendevin/dockerfile b/_containers/opendevin/dockerfile new file mode 100644 index 0000000..84157fa --- /dev/null +++ b/_containers/opendevin/dockerfile @@ -0,0 +1,39 @@ +# docker build -t opendevin . && docker run -d --name OpenDevin-1 -p 3050:3000 -p 3051:3001 opendevin + +# docker run --name OpenDevin-dev -it opendevin + +# Start with a base image that has both Python and Node.js +FROM nikolaik/python-nodejs:python3.11-nodejs14 + +# Install system dependencies required for the project +RUN apt-get update && apt-get install -y \ + git \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Clone the latest version of OpenDevin +WORKDIR /opendevin +RUN git clone https://github.com/OpenDevin/OpenDevin.git . + +# Install Python dependencies +WORKDIR /opendevin/backend +RUN python -m pipenv requirements > requirements.txt && python -m pip install -r requirements.txt +RUN python -m pip install -r requirements.txt + +# Install Node.js dependencies +WORKDIR /opendevin/frontend +RUN npm install monaco-editor +RUN npm install + +# Build the frontend +RUN npm run build + +# Expose backend and frontend ports +EXPOSE 3000 3001 + +# Add a script to start both backend and frontend services +WORKDIR /opendevin +COPY start-services.sh /opendevin/start-services.sh +RUN chmod +x /opendevin/start-services.sh + +CMD ["/opendevin/start-services.sh"] diff --git a/_containers/opendevin/start-services.sh b/_containers/opendevin/start-services.sh new file mode 100644 index 0000000..81a298a --- /dev/null +++ b/_containers/opendevin/start-services.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Navigate to the backend directory and start the backend server in the background +cd /opendevin/backend +uvicorn opendevin.server.listen:app --port 3000 & + +# Serve the frontend from the build directory +cd /opendevin/frontend/build +npx serve -l 3001 & + +# Keep the container running +wait diff --git a/_doc/_notes/PROMPTS.md b/_doc/_notes/PROMPTS.md new file mode 100644 index 0000000..3b7bf8f --- /dev/null +++ b/_doc/_notes/PROMPTS.md @@ -0,0 +1,13 @@ +You are an expert in extracting new information from text sorting it out in existing categories or creating new categoty (We're using Neo4j as graph database). +This is multi-step process: +1. Divide the text into paragraph simmilar chunks that have the same toppic. Add a summary to the paragraph. +2. Create Memory object for each summary +3. Extract facts from each paragraph and add them as knowledge linked to the paragraph as separate memory object linked to the first one. Look into verbs, ajectives and nouns to extract the plain information from the text. If there is a source code, do not interpret it, but remember it as linked Memory as it is, while adding summary and link it to the main "Memory". +4. Assign possible category and scientific field labels to the information by adding them as tags. +This systematization and segmentation will allow you to remember the text and store it in your long-term memory as knowledge graph, while providing easy access later. Respond in json format with the extracted and restructured data. +Here's the source text: + + + +------------ +create a web app that has a button to start recording sends it to latest whisper llm for STT, translates it to target language and shows the result in realtime. it should be streaming text with minimal lag. \ No newline at end of file diff --git a/_notes/arti/.excalidraw.svg b/_doc/_notes/arti/.excalidraw.svg similarity index 100% rename from _notes/arti/.excalidraw.svg rename to _doc/_notes/arti/.excalidraw.svg diff --git a/_doc/_notes/arti/ideas.md b/_doc/_notes/arti/ideas.md new file mode 100644 index 0000000..9d6b9ca --- /dev/null +++ b/_doc/_notes/arti/ideas.md @@ -0,0 +1,32 @@ +Key features & principles: + - modal/plug & play design + - Biomimicing based + - self inferencing loop + - Graph->LLM->Graph based logic (Self reflect) + - attention (Short term memory) + - generalized & contextuaized memory schema (memory is strongly context dependent and temporal) +LLM module +Graph module +Short term memory module +mid-term memory (history on the toppic) +graph powered long term memory with embedding storage for skills & AII ( interface on some of the layers) +separate text IOs" + - multi agent communication module/console/ + - internal state/context/mood/STM + - actions output + + + + GRAPH schema + + idea + - is child of + + Q: Brainstorm neo4j schema for biomimicing memory storage as neo4j graph database. It should be similar to the way humans store, retrieve and generalize knowledge + +Memory model: + + +-- example prompt: +you are an AI living native on a linux host. you have all local resources and installed software at your disposal: root console access, git repo and the internet as resources. you have your own codebase and will brainstorm and find ideas how to improve your reasoning and knowledge of the world. . To avoid messing up your health, you will prefer to install new packages and test ideas inside docker or virtual envs. You can test your ideas in a docker containers and can experiment with different ways to pipleline and organise the thought process. you will also explain each approach and promote the best containers. A local neo4j database is your memory and knowledge source. Whenever you are not certain about a toppic, or need to refresh your knowledge, you will check online for updated information. If the problem concerns npm, pip or other package or an exiting repository on github or other repository server, you will first update your internal database with the latest readme and usage documentaion from the repository authors and later search stackoverflow to update your skills. periodically you will reorgnize and tidy your "mmory" making it more easily accessible and updating the embedding vectors to allow easy access to the needed context when there is a problem to solve. You will build a tools library over time that wil lgelp you solve more complex problems and keep it in your memory together with a list of task completed with each tool. you will experiment with different pipelines for different tasks, memorise them and generalize and optimize existing pipelines over time to build a solid knowhow. +let's create the project setup in python. our main file will be the agent, and it will call the llm and other tools (maybe langchain), explore, organize and improve when 'resting'. diff --git a/_notes/arti/neo4j.cql b/_doc/_notes/arti/neo4j.cql similarity index 100% rename from _notes/arti/neo4j.cql rename to _doc/_notes/arti/neo4j.cql diff --git a/_notes/arti/wikidata/import.sh b/_doc/_notes/arti/wikidata/import.sh similarity index 100% rename from _notes/arti/wikidata/import.sh rename to _doc/_notes/arti/wikidata/import.sh diff --git a/_notes/readme.md b/_doc/_notes/readme.md similarity index 100% rename from _notes/readme.md rename to _doc/_notes/readme.md diff --git a/_doc/aider.md b/_doc/aider.md new file mode 100644 index 0000000..625a478 --- /dev/null +++ b/_doc/aider.md @@ -0,0 +1,11 @@ +python -m pip install git+https://github.com/d-popov/aider.git + + +export GROQ_API_KEY=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 +aider --models groq/ +aider --model groq/llama3-70b-8192 --no-auto-commits + + +#@ OLLAMA +export OPENAI_API_BASE=https://ollama.d-popov.com +aider --openai-api-base https://ollama.d-popov.com --openai-api-key ol-ddddd --models openai/ \ No newline at end of file diff --git a/home/Prompt.md b/_doc/home/Prompt.md similarity index 100% rename from home/Prompt.md rename to _doc/home/Prompt.md diff --git a/home/homeassistant.md b/_doc/home/homeassistant.md similarity index 100% rename from home/homeassistant.md rename to _doc/home/homeassistant.md diff --git a/home/logs.log b/_doc/home/logs.log similarity index 100% rename from home/logs.log rename to _doc/home/logs.log diff --git a/home/prompt1.md b/_doc/home/prompt1.md similarity index 100% rename from home/prompt1.md rename to _doc/home/prompt1.md diff --git a/_doc/oi-notes.md b/_doc/oi-notes.md new file mode 100644 index 0000000..5522572 --- /dev/null +++ b/_doc/oi-notes.md @@ -0,0 +1,49 @@ + + + + interpreter --api_base http://192.168.0.11:11434/v1/ + +interpreter --model "gpt-3.5-turbo" # mistral +interpreter --model "mistral" --api_base http://192.168.0.11:11434/v1/ + + + Mac/Linux: 'export OPENAI_API_KEY=your-key-here', + Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal. +interpreter --local + +interpreter --api_base http://192.168.0.11:11434/v1 --api_key "" --model openai/local +interpreter --api_base http://192.168.0.137:1234/v1 --api_key "" --model openai/local +192.168.0.137 + +GROQ_API_KEY +# ################################ GROQ ########################## working +export OPENAI_API_KEY=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 +interpreter -y --api_base https://api.groq.com/openai/v1 --model llama2-70b-4096 ## mixtral-8x7b-32768 # gemma-7b-it # llama2-70b-4096 +## +# Load a model, start the server, and run this example in your terminal +# Choose between streaming and non-streaming mode by setting the "stream" field + +curl http://192.168.0.11:11434/v1/chat/completions \ +-H "Content-Type: application/json" \ +-d '{ + "messages": [ + { "role": "system", "content": "Always answer in rhymes." }, + { "role": "user", "content": "Introduce yourself." } + ], + "temperature": 0.7, + "max_tokens": -1, + "stream": false +}' + + +curl http://192.168.0.137:1234/v1/chat/completions \ +-H "Content-Type: application/json" \ +-d '{ + "messages": [ + { "role": "system", "content": "Always answer in rhymes." }, + { "role": "user", "content": "Introduce yourself." } + ], + "temperature": 0.7, + "max_tokens": -1, + "stream": false +}' \ No newline at end of file diff --git a/doc/packages notes environments.md b/_doc/packages notes environments.md similarity index 100% rename from doc/packages notes environments.md rename to _doc/packages notes environments.md diff --git a/_doc/scripts/aider.sh b/_doc/scripts/aider.sh new file mode 100644 index 0000000..7f7d0bc --- /dev/null +++ b/_doc/scripts/aider.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# python -m pip install git+https://github.com/paul-gauthier/aider.git + +source ~/miniconda3/etc/profile.d/conda.sh # Adjust the path as per your Conda installation +conda activate aider +export OPENAI_API_KEY=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 + + +# aider --no-auto-commits + +OPENAI_API_BASE=https://api.deepseek.com/v1 +OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a +AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat +aider --openai-api-base https://api.deepseek.com/v1 --openai-api-key sk-99df7736351f4536bd72cd64a416318a --model deepseek-coder +aider --openai-api-base 'https://api.groq.com/openai/v1' --openai-api-key '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' --model 'llama2-70b-4096' + +usage: aider [-h] [--openai-api-key OPENAI_API_KEY] [--model MODEL] [--skip-model-availability-check SKIP_MODEL_AVAILABILITY_CHECK] [--4] [--4turbo] [--35turbo] [--voice-language VOICE_LANGUAGE] + [--openai-api-base OPENAI_API_BASE] [--openai-api-type OPENAI_API_TYPE] [--openai-api-version OPENAI_API_VERSION] [--openai-api-deployment-id OPENAI_API_DEPLOYMENT_ID] + [--openai-organization-id OPENAI_ORGANIZATION_ID] [--openrouter] [--edit-format EDIT_FORMAT] [--map-tokens MAP_TOKENS] [--input-history-file INPUT_HISTORY_FILE] + [--chat-history-file CHAT_HISTORY_FILE] [--dark-mode] [--light-mode] [--pretty | --no-pretty] [--stream | --no-stream] [--user-input-color USER_INPUT_COLOR] + [--tool-output-color TOOL_OUTPUT_COLOR] [--tool-error-color TOOL_ERROR_COLOR] [--assistant-output-color ASSISTANT_OUTPUT_COLOR] [--code-theme CODE_THEME] [--show-diffs] + [--git | --no-git] [--gitignore | --no-gitignore] [--aiderignore AIDERIGNORE] [--auto-commits | --no-auto-commits] [--dirty-commits | --no-dirty-commits] [--dry-run | --no-dry-run] + [--commit] [--version] [--check-update] [--skip-check-update] [--apply FILE] [--yes] [-v] [--show-repo-map] [--message COMMAND] [--message-file MESSAGE_FILE] [--encoding ENCODING] + [-c CONFIG_FILE] + +export OPENAI_API_KEY=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 +interpreter -y --api_base https://api.groq.com/openai/v1 --model gemma-7b-it ## mixtral-8x7b-32768 # gemma-7b-it # llama2-70b-4096 + + +# Setup OpenRouter access +export OPENAI_API_KEY=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 +export OPENAI_API_BASE=https://api.groq.com/openai/v1 +# For example, run aider with Claude 3 Opus using the diff editing format +aider --model llama2-70b-4096 --edit-format diff diff --git a/doc/tts notes.txt b/_doc/tts notes.txt similarity index 100% rename from doc/tts notes.txt rename to _doc/tts notes.txt diff --git a/_notes/arti/ideas.md b/_notes/arti/ideas.md deleted file mode 100644 index bf92d46..0000000 --- a/_notes/arti/ideas.md +++ /dev/null @@ -1,27 +0,0 @@ -Key features & principles: - - modal/plug & play design - - Biomimicing based - - self inferencing loop - - Graph->LLM->Graph based logic (Self reflect) - - attention (Short term memory) - - generalized & contextuaized memory schema (memory is strongly context dependent and temporal) -LLM module -Graph module -Short term memory module -mid-term memory (history on the toppic) -graph powered long term memory with embedding storage for skills & AII ( interface on some of the layers) -separate text IOs" - - multi agent communication module/console/ - - internal state/context/mood/STM - - actions output - - - - GRAPH schema - - idea - - is child of - - Q: Brainstorm neo4j schema for biomimicing memory storage as neo4j graph database. It should be similar to the way humans store, retrieve and generalize knowledge - -Memory model: diff --git a/agent-a/.env b/agent-a/.env new file mode 100644 index 0000000..bfb4ede --- /dev/null +++ b/agent-a/.env @@ -0,0 +1,3 @@ + NEO4J_URI="bolt://192.168.0.10:7687" + NEO4J_USER="neo4j" + NEO4J_PASSWORD="lucas-bicycle-powder-stretch-ford-9492" \ No newline at end of file diff --git a/agent-a/.gitignore b/agent-a/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/agent-a/README.md b/agent-a/README.md new file mode 100644 index 0000000..e69de29 diff --git a/agent-a/requirements.txt b/agent-a/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/agent-a/setup.sh b/agent-a/setup.sh new file mode 100644 index 0000000..3bdcb03 --- /dev/null +++ b/agent-a/setup.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Function to create directories +create_directories() { + mkdir -p ./{data/{raw,processed},notebooks,src/{agent,llm,tools,utils},tests/{agent,llm,tools,utils}} +} + +# Function to create files +create_files() { + touch ./{.gitignore,requirements.txt,README.md} + touch ./src/{agent,llm,tools,utils}/__init__.py + touch ./tests/{agent,llm,tools,utils}/test_{agent,llm,tool1,tool2,utils}.py +} + +# Function to initialize Git repository +initialize_git() { + echo "Do you want to initialize a Git repository? (y/n)" + read answer + if [ "$answer" == "y" ]; then + git init + echo "Git repository initialized." + cd .. + fi +} + +# Main script execution +create_directories +create_files +#initialize_git + +echo "Project setup complete." \ No newline at end of file diff --git a/agent-a/src/agent/__init__.py b/agent-a/src/agent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agent-a/src/agent/agent.py b/agent-a/src/agent/agent.py new file mode 100644 index 0000000..9c42097 --- /dev/null +++ b/agent-a/src/agent/agent.py @@ -0,0 +1,47 @@ +# src/agent/agent.py + +class Agent: + def __init__(self): + self.tools = [] # Initialize an empty list to store tools + + def add_tool(self, tool): + # Add a tool to the agent's toolbox + self.tools.append(tool) + + def remove_tool(self, tool): + # Remove a tool from the agent's toolbox + if tool in self.tools: + self.tools.remove(tool) + + def use_tool(self, tool, *args, **kwargs): + # Use a tool with the agent + if tool in self.tools: + return tool.use(*args, **kwargs) + else: + return "Tool not found in agent's toolbox." + + def explore(self): + # Implement the logic for exploring new ideas + pass + + def organize(self): + # Implement the logic for organizing knowledge + pass + + def improve(self): + # Implement the logic for improving reasoning + pass + + def rest(self): + # Implement the logic for resting and updating knowledge + pass + +# Example usage +if __name__ == "__main__": + agent = Agent() + # Add tools to the agent's toolbox + # agent.add_tool(some_tool_instance) + + # Use a tool + # result = agent.use_tool(some_tool_instance, some_arguments) + # print(result) \ No newline at end of file diff --git a/agent-a/src/llm/__init__.py b/agent-a/src/llm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agent-a/src/tools/__init__.py b/agent-a/src/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agent-a/src/utils/__init__.py b/agent-a/src/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agent-arti/readme.md b/agent-arti/readme.md new file mode 100644 index 0000000..7cb0311 --- /dev/null +++ b/agent-arti/readme.md @@ -0,0 +1 @@ +ToDo: copy arti code here \ No newline at end of file diff --git a/agent-b/main.py b/agent-b/main.py new file mode 100644 index 0000000..af5626b --- /dev/null +++ b/agent-b/main.py @@ -0,0 +1 @@ +Hello, world! diff --git a/agent-py-bot/cmd.sh b/agent-py-bot/cmd.sh index f7f52aa..d19e8f2 100644 --- a/agent-py-bot/cmd.sh +++ b/agent-py-bot/cmd.sh @@ -30,4 +30,7 @@ in python, create an app that will search for a news about a specific topic on t +# devika +create new homepage for memecoin. look at https://donk.meme/ for inspiration about functionality. but the design should be novel. + diff --git a/agent-pyter/ccxt.py b/agent-pyter/ccxt.py new file mode 100644 index 0000000..76c48d4 --- /dev/null +++ b/agent-pyter/ccxt.py @@ -0,0 +1,92 @@ +# # https://github.com/ccxt/ccxt/tree/master/examples/py +# # ! pip install ccxt +# # //cjs +# # var ccxt = require ('ccxt') +# # console.log (ccxt.exchanges) // print all available exchanges +# # py +# import ccxt +# #print(ccxt.exchanges) +# #import ccxt.async_support as ccxt + + +# # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +# # import ccxt + +# # hitbtc = ccxt.hitbtc({'verbose': True}) +# # bitmex = ccxt.bitmex() +# # huobipro = ccxt.huobipro() +# # exmo = ccxt.exmo({ +# # 'apiKey': 'YOUR_PUBLIC_API_KEY', +# # 'secret': 'YOUR_SECRET_PRIVATE_KEY', +# # }) +# # kraken = ccxt.kraken({ +# # 'apiKey': 'YOUR_PUBLIC_API_KEY', +# # 'secret': 'YOUR_SECRET_PRIVATE_KEY', +# # }) + +# # exchange_id = 'binance' +# # exchange_class = getattr(ccxt, exchange_id) +# # exchange = exchange_class({ +# # 'apiKey': 'YOUR_API_KEY', +# # 'secret': 'YOUR_SECRET', +# # }) + +# # hitbtc_markets = hitbtc.load_markets() + +# # print(hitbtc.id, hitbtc_markets) +# # print(bitmex.id, bitmex.load_markets()) +# # print(huobipro.id, huobipro.load_markets()) + +# # print(hitbtc.fetch_order_book(hitbtc.symbols[0])) +# # print(bitmex.fetch_ticker('BTC/USD')) +# # print(huobipro.fetch_trades('LTC/USDT')) + +# # print(exmo.fetch_balance()) + +# # # sell one ฿ for market price and receive $ right now +# # print(exmo.id, exmo.create_market_sell_order('BTC/USD', 1)) + +# # # limit buy BTC/EUR, you pay €2500 and receive ฿1 when the order is closed +# # print(exmo.id, exmo.create_limit_buy_order('BTC/EUR', 1, 2500.00)) + +# # # pass/redefine custom exchange-specific order params: type, amount, price, flags, etc... +# # kraken.create_market_buy_order('BTC/USD', 1, {'trading_agreement': 'agree'}) +# # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +# # # -*- coding: utf-8 -*- + +# # import os +# # import sys +# # from pprint import pprint + +# # root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +# # sys.path.append(root + '/python') + +# import ccxt # noqa: E402 + +# # ----------------------------------------------------------------------------- + +# # print('CCXT Version:', ccxt.__version__) + +# # ----------------------------------------------------------------------------- + +# exchange = ccxt.coinbase({ +# 'apiKey': 'tk2ShLJCmByejn78', +# 'secret': 'UcJfI5HzQmkEjclCeHFSfG8hnNYxaESv', +# # 'verbose': True, # for debug output +# }) + +# symbol = 'BTC/USDT' +# timeframe = '1m' +# since = None +# limit = None # not used by coinbase + +# try: +# # Max 300 Candles +# candles = exchange.fetch_ohlcv(symbol, timeframe, since, limit) +# pprint(candles) +# except Exception as err: +# print(err) + + diff --git a/agent-pyter/dexbot.py b/agent-pyter/dexbot.py new file mode 100644 index 0000000..d3b1a99 --- /dev/null +++ b/agent-pyter/dexbot.py @@ -0,0 +1,102 @@ + +# source /path/to/virtualenv/bin/activate # On Unix or MacOS +# source /config/miniconda3/envs/py/bin/activate +# pip install requests beautifulsoup4 schedule selenium + +# wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb +# sudo dpkg -i google-chrome-stable_current_amd64.deb +# apt install libnss3 libxss1 + +import requests +from bs4 import BeautifulSoup +# # import schedule +import time +from selenium import webdriver +from selenium.webdriver.chrome.service import Service +from webdriver_manager.chrome import ChromeDriverManager +import time +# pip install requests beautifulsoup4 schedule + + + +# Initialize WebDriver +service = Service(ChromeDriverManager().install()) +driver = webdriver.Chrome(service=service) + +def check_pairs_sel(): + + try: + # Open the page + driver.get("https://www.dextools.io/app/en/bnb/pool-explorer") + time.sleep(10) # Wait for JavaScript to execute + + # Extract the page source + html = driver.page_source + soup = BeautifulSoup(html, 'html.parser') + + # Assuming the pairs are listed in
tags with a class that includes the word 'pair' + pairs = soup.find_all('div', class_=lambda x: x and 'pair' in x) + print("Pairs found:", [pair.text for pair in pairs]) + finally: + driver.quit() + + +# Initialize WebDriver +service = Service(ChromeDriverManager().install()) +driver = webdriver.Chrome(service=service) + +def check_pairs_sel(): + + try: + # Open the page + driver.get("https://www.dextools.io/app/en/bnb/pool-explorer") + time.sleep(10) # Wait for JavaScript to execute + + # Extract the page source + html = driver.page_source + soup = BeautifulSoup(html, 'html.parser') + + # Assuming the pairs are listed in
tags with a class that includes the word 'pair' + pairs = soup.find_all('div', class_=lambda x: x and 'pair' in x) + print("Pairs found:", [pair.text for pair in pairs]) + finally: + driver.quit() + +def check_new_pairs(): + # log the running time + print("Checking for new pairs...") + url = "https://www.dextools.io/" + url = "https://www.dextools.io/app/en/bnb/pool-explorer" + response = requests.get(url) + soup = BeautifulSoup(response.text, 'html.parser') + + # make html dump to ./dextools-last.html + with open('./dextools-last.html', 'w') as f: + f.write(soup.prettify()) + + # Assuming the pairs are listed in
tags with a class that includes the word 'pair' + pairs = soup.find_all('div', class_=lambda x: x and 'pair' in x) + + current_pairs = {pair.text for pair in pairs} + + if not hasattr(check_new_pairs, "last_pairs"): + check_new_pairs.last_pairs = current_pairs + + new_pairs = current_pairs - check_new_pairs.last_pairs + if new_pairs: + print("New Pairs Found:", new_pairs) + # Here you can add the code to trigger any event (e.g., send an email, a notification, etc.) + + # Update the last checked pairs + check_new_pairs.last_pairs = current_pairs + +def main(): + #schedule.every(10).seconds.do(check_new_pairs) + + while True: + # schedule.run_pending() + check_pairs_sel() + time.sleep(10000) + +if __name__ == "__main__": + main() diff --git a/agent-pyter/lag-llama b/agent-pyter/lag-llama new file mode 160000 index 0000000..9486655 --- /dev/null +++ b/agent-pyter/lag-llama @@ -0,0 +1 @@ +Subproject commit 948665530fcda634df9d7df0bee5e19b87785eb9 diff --git a/agent-pyter/lag-llama.ipynb b/agent-pyter/lag-llama.ipynb new file mode 100644 index 0000000..5e39217 --- /dev/null +++ b/agent-pyter/lag-llama.ipynb @@ -0,0 +1,374 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 15, + "id": "04e8bf3c-845f-49bb-9e9c-992d6b8948f0", + "metadata": {}, + "outputs": [], + "source": [ + "# https://colab.research.google.com/drive/1XxrLW9VGPlZDw3efTvUi0hQimgJOwQG6?usp=sharing#scrollTo=gyH5Xq9eSvzq" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "37f96736-8654-4852-a144-fd75df22aaf7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cloning into 'lag-llama'...\n", + "remote: Enumerating objects: 124, done.\u001b[K\n", + "remote: Counting objects: 100% (69/69), done.\u001b[K\n", + "remote: Compressing objects: 100% (43/43), done.\u001b[K\n", + "remote: Total 124 (delta 39), reused 47 (delta 26), pack-reused 55\u001b[K\n", + "Receiving objects: 100% (124/124), 190.17 KiB | 2.29 MiB/s, done.\n", + "Resolving deltas: 100% (49/49), done.\n" + ] + } + ], + "source": [ + "!git clone https://github.com/time-series-foundation-models/lag-llama/" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "f5fac8fa-5ac8-4330-97e0-8a2f4237ba0f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/workspace/repos/git.d-popov.com/ai-kevin/agent-pyter/lag-llama/lag-llama\n" + ] + } + ], + "source": [ + "cd ./lag-llama" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "968625c9-00fd-4037-b97c-33dfc4758491", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: gluonts[torch] in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 1)) (0.14.4)\n", + "Requirement already satisfied: numpy==1.23.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 2)) (1.23.5)\n", + "Requirement already satisfied: torch>=2.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 3)) (2.2.1)\n", + "Requirement already satisfied: wandb in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 4)) (0.16.4)\n", + "Requirement already satisfied: scipy in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 5)) (1.12.0)\n", + "Requirement already satisfied: pandas==2.1.4 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 6)) (2.1.4)\n", + "Requirement already satisfied: huggingface_hub[cli] in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from -r requirements.txt (line 7)) (0.21.3)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pandas==2.1.4->-r requirements.txt (line 6)) (2.9.0)\n", + "Requirement already satisfied: pytz>=2020.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pandas==2.1.4->-r requirements.txt (line 6)) (2024.1)\n", + "Requirement already satisfied: tzdata>=2022.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pandas==2.1.4->-r requirements.txt (line 6)) (2024.1)\n", + "Requirement already satisfied: pydantic<3,>=1.7 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (2.6.3)\n", + "Requirement already satisfied: tqdm~=4.23 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (4.66.2)\n", + "Requirement already satisfied: toolz~=0.10 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (0.12.1)\n", + "Requirement already satisfied: typing-extensions~=4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (4.8.0)\n", + "Requirement already satisfied: lightning<2.2,>=2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (2.1.4)\n", + "Requirement already satisfied: pytorch-lightning<2.2,>=2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gluonts[torch]->-r requirements.txt (line 1)) (2.1.4)\n", + "Requirement already satisfied: filelock in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (3.13.1)\n", + "Requirement already satisfied: sympy in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (1.12)\n", + "Requirement already satisfied: networkx in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (3.2.1)\n", + "Requirement already satisfied: jinja2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (3.1.2)\n", + "Requirement already satisfied: fsspec in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (2024.2.0)\n", + "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n", + "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n", + "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n", + "Requirement already satisfied: nvidia-cudnn-cu12==8.9.2.26 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (8.9.2.26)\n", + "Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.3.1)\n", + "Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (11.0.2.54)\n", + "Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (10.3.2.106)\n", + "Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (11.4.5.107)\n", + "Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.0.106)\n", + "Requirement already satisfied: nvidia-nccl-cu12==2.19.3 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (2.19.3)\n", + "Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (12.1.105)\n", + "Requirement already satisfied: triton==2.2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from torch>=2.0.0->-r requirements.txt (line 3)) (2.2.0)\n", + "Requirement already satisfied: nvidia-nvjitlink-cu12 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.0.0->-r requirements.txt (line 3)) (12.4.99)\n", + "Requirement already satisfied: Click!=8.0.0,>=7.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (8.1.7)\n", + "Requirement already satisfied: GitPython!=3.1.29,>=1.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (3.1.42)\n", + "Requirement already satisfied: requests<3,>=2.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (2.31.0)\n", + "Requirement already satisfied: psutil>=5.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (5.9.0)\n", + "Requirement already satisfied: sentry-sdk>=1.0.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (1.40.6)\n", + "Requirement already satisfied: docker-pycreds>=0.4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (0.4.0)\n", + "Requirement already satisfied: PyYAML in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (6.0.1)\n", + "Requirement already satisfied: setproctitle in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (1.3.3)\n", + "Requirement already satisfied: setuptools in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (68.0.0)\n", + "Requirement already satisfied: appdirs>=1.4.3 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (1.4.4)\n", + "Requirement already satisfied: protobuf!=4.21.0,<5,>=3.19.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from wandb->-r requirements.txt (line 4)) (4.25.3)\n", + "Requirement already satisfied: packaging>=20.9 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from huggingface_hub[cli]->-r requirements.txt (line 7)) (23.2)\n", + "Requirement already satisfied: InquirerPy==0.3.4 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from huggingface_hub[cli]->-r requirements.txt (line 7)) (0.3.4)\n", + "Requirement already satisfied: pfzy<0.4.0,>=0.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from InquirerPy==0.3.4->huggingface_hub[cli]->-r requirements.txt (line 7)) (0.3.4)\n", + "Requirement already satisfied: prompt-toolkit<4.0.0,>=3.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from InquirerPy==0.3.4->huggingface_hub[cli]->-r requirements.txt (line 7)) (3.0.42)\n", + "Requirement already satisfied: six>=1.4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from docker-pycreds>=0.4.0->wandb->-r requirements.txt (line 4)) (1.16.0)\n", + "Requirement already satisfied: gitdb<5,>=4.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from GitPython!=3.1.29,>=1.0.0->wandb->-r requirements.txt (line 4)) (4.0.11)\n", + "Requirement already satisfied: lightning-utilities<2.0,>=0.8.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from lightning<2.2,>=2.0->gluonts[torch]->-r requirements.txt (line 1)) (0.10.1)\n", + "Requirement already satisfied: torchmetrics<3.0,>=0.7.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from lightning<2.2,>=2.0->gluonts[torch]->-r requirements.txt (line 1)) (1.3.1)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pydantic<3,>=1.7->gluonts[torch]->-r requirements.txt (line 1)) (0.6.0)\n", + "Requirement already satisfied: pydantic-core==2.16.3 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from pydantic<3,>=1.7->gluonts[torch]->-r requirements.txt (line 1)) (2.16.3)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (3.3.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (3.4)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (2.0.7)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests<3,>=2.0.0->wandb->-r requirements.txt (line 4)) (2023.7.22)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from jinja2->torch>=2.0.0->-r requirements.txt (line 3)) (2.1.3)\n", + "Requirement already satisfied: mpmath>=0.19 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from sympy->torch>=2.0.0->-r requirements.txt (line 3)) (1.3.0)\n", + "Requirement already satisfied: aiohttp!=4.0.0a0,!=4.0.0a1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (3.9.3)\n", + "Requirement already satisfied: smmap<6,>=3.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from gitdb<5,>=4.0.1->GitPython!=3.1.29,>=1.0.0->wandb->-r requirements.txt (line 4)) (5.0.1)\n", + "Requirement already satisfied: wcwidth in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from prompt-toolkit<4.0.0,>=3.0.1->InquirerPy==0.3.4->huggingface_hub[cli]->-r requirements.txt (line 7)) (0.2.13)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec->torch>=2.0.0->-r requirements.txt (line 3)) (1.9.4)\n" + ] + } + ], + "source": [ + "!pip install -r requirements.txt #--quiet # this could take some time # ignore the errors displayed by colab" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "8f10c802-4ffa-40f7-bd62-14ff13fae03c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: requests in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (2.31.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (3.3.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (3.4)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (2.0.7)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from requests) (2023.7.22)\n", + "Requirement already satisfied: matplotlib in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (3.8.3)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (1.2.0)\n", + "Requirement already satisfied: cycler>=0.10 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (0.12.1)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (4.49.0)\n", + "Requirement already satisfied: kiwisolver>=1.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (1.4.5)\n", + "Requirement already satisfied: numpy<2,>=1.21 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (1.23.5)\n", + "Requirement already satisfied: packaging>=20.0 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (23.2)\n", + "Requirement already satisfied: pillow>=8 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (10.0.1)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (3.1.2)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from matplotlib) (2.9.0)\n", + "Requirement already satisfied: six>=1.5 in /config/miniconda3/envs/pygame/lib/python3.11/site-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n" + ] + } + ], + "source": [ + "!pip install --upgrade requests\n", + "!pip install matplotlib\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "0a64aa15-1477-44bc-b772-a9342a5640c8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Consider using `hf_transfer` for faster downloads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.\n", + "./lag-llama.ckpt\n" + ] + } + ], + "source": [ + "!huggingface-cli download time-series-foundation-models/Lag-Llama lag-llama.ckpt --local-dir ./" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "a328c513-558f-45ca-b900-b669c4ef33ed", + "metadata": {}, + "outputs": [], + "source": [ + "from itertools import islice\n", + "\n", + "from matplotlib import pyplot as plt\n", + "import matplotlib.dates as mdates\n", + "\n", + "import torch\n", + "from gluonts.evaluation import make_evaluation_predictions, Evaluator\n", + "from gluonts.dataset.repository.datasets import get_dataset\n", + "\n", + "from gluonts.dataset.pandas import PandasDataset\n", + "import pandas as pd\n", + "\n", + "from lag_llama.gluon.estimator import LagLlamaEstimator" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "f098efb9-490c-46b7-9ea3-bea1f2871fa5", + "metadata": {}, + "outputs": [], + "source": [ + "def get_lag_llama_predictions(dataset, prediction_length, num_samples=100):\n", + " ckpt = torch.load(\"lag-llama.ckpt\", map_location=torch.device('cuda:0')) # Uses GPU since in this Colab we use a GPU.\n", + " estimator_args = ckpt[\"hyper_parameters\"][\"model_kwargs\"]\n", + "\n", + " estimator = LagLlamaEstimator(\n", + " ckpt_path=\"lag-llama.ckpt\",\n", + " prediction_length=prediction_length,\n", + " context_length=32, # Should not be changed; this is what the released Lag-Llama model was trained with\n", + "\n", + " # estimator args\n", + " input_size=estimator_args[\"input_size\"],\n", + " n_layer=estimator_args[\"n_layer\"],\n", + " n_embd_per_head=estimator_args[\"n_embd_per_head\"],\n", + " n_head=estimator_args[\"n_head\"],\n", + " scaling=estimator_args[\"scaling\"],\n", + " time_feat=estimator_args[\"time_feat\"],\n", + "\n", + " batch_size=1,\n", + " num_parallel_samples=100\n", + " )\n", + "\n", + " lightning_module = estimator.create_lightning_module()\n", + " transformation = estimator.create_transformation()\n", + " predictor = estimator.create_predictor(transformation, lightning_module)\n", + "\n", + " forecast_it, ts_it = make_evaluation_predictions(\n", + " dataset=dataset,\n", + " predictor=predictor,\n", + " num_samples=num_samples\n", + " )\n", + " forecasts = list(forecast_it)\n", + " tss = list(ts_it)\n", + "\n", + " return forecasts, tss" + ] + }, + { + "cell_type": "raw", + "id": "e7e6dd60-7c0c-483f-86d4-b2ba7c4104d3", + "metadata": {}, + "source": [ + "import pandas as pd\n", + "from gluonts.dataset.pandas import PandasDataset\n", + "\n", + "url = (\n", + " \"https://gist.githubusercontent.com/rsnirwan/a8b424085c9f44ef2598da74ce43e7a3/raw/b6fdef21fe1f654787fa0493846c546b7f9c4df2/ts_long.csv\"\n", + ")\n", + "df = pd.read_csv(url, index_col=0, parse_dates=True)\n", + "# Set numerical columns as float32\n", + "for col in df.columns:\n", + " # Check if column is not of string type\n", + " if df[col].dtype != 'object' and pd.api.types.is_string_dtype(df[col]) == False:\n", + " df[col] = df[col].astype('float32')\n", + "\n", + "# Create the Pandas\n", + "dataset = PandasDataset.from_long_dataframe(df, target=\"target\", item_id=\"item_id\")\n", + "\n", + "backtest_dataset = dataset\n", + "prediction_length = 24 # Define your prediction length. We use 24 here since the data is of hourly frequency\n", + "num_samples = 100 # number of samples sampled from the probability distribution for each timestep\n", + "forecasts, tss = get_lag_llama_predictions(backtest_dataset, prediction_length, num_samples)\n", + "len(forecasts)\n", + "forecasts[0].samples.shape\n", + "plt.figure(figsize=(20, 15))\n", + "date_formater = mdates.DateFormatter('%b, %d')\n", + "plt.rcParams.update({'font.size': 15})\n", + "\n", + "# Iterate through the first 9 series, and plot the predicted samples\n", + "for idx, (forecast, ts) in islice(enumerate(zip(forecasts, tss)), 9):\n", + " ax = plt.subplot(3, 3, idx+1)\n", + "\n", + " plt.plot(ts[-4 * prediction_length:].to_timestamp(), label=\"target\", )\n", + " forecast.plot( color='g')\n", + " plt.xticks(rotation=60)\n", + " ax.xaxis.set_major_formatter(date_formater)\n", + " ax.set_title(forecast.item_id)\n", + "\n", + "plt.gcf().tight_layout()\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "raw", + "id": "74dc9a03-435e-40a5-bbda-4ddac9f6cfb9", + "metadata": {}, + "source": [ + "# Set numerical columns as float32\n", + "for col in df.columns:\n", + " # Check if column is not of string type\n", + " if df[col].dtype != 'object' and pd.api.types.is_string_dtype(df[col]) == False:\n", + " df[col] = df[col].astype('float32')\n", + "\n", + "# Create the Pandas\n", + "dataset = PandasDataset.from_long_dataframe(df, target=\"target\", item_id=\"item_id\")\n", + "\n", + "backtest_dataset = dataset\n", + "prediction_length = 24 # Define your prediction length. We use 24 here since the data is of hourly frequency\n", + "num_samples = 100 # number of samples sampled from the probability distribution for each timestep\n", + "forecasts, tss = get_lag_llama_predictions(backtest_dataset, prediction_length, num_samples)\n", + "len(forecasts)\n", + "forecasts[0].samples.shape\n", + "plt.figure(figsize=(20, 15))\n", + "date_formater = mdates.DateFormatter('%b, %d')\n", + "plt.rcParams.update({'font.size': 15})\n", + "\n", + "# Iterate through the first 9 series, and plot the predicted samples\n", + "for idx, (forecast, ts) in islice(enumerate(zip(forecasts, tss)), 9):\n", + " ax = plt.subplot(3, 3, idx+1)\n", + "\n", + " plt.plot(ts[-4 * prediction_length:].to_timestamp(), label=\"target\", )\n", + " forecast.plot( color='g')\n", + " plt.xticks(rotation=60)\n", + " ax.xaxis.set_major_formatter(date_formater)\n", + " ax.set_title(forecast.item_id)\n", + "\n", + "plt.gcf().tight_layout()\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b8be08b6-0cfd-45b5-ac23-142e9f388049", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/agent-pyter/notes.md b/agent-pyter/notes.md new file mode 100644 index 0000000..51f6426 --- /dev/null +++ b/agent-pyter/notes.md @@ -0,0 +1,12 @@ +https://github.com/ccxt/ccxt/tree/master/examples/py/ + + + +playwright._impl._errors.TargetClosedError: Target page, context or browser has been closed +Browser logs: + + /config/.cache/ms-playwright/chromium-1105/chrome-linux/chrome --disable-field-trial-config --disable-background-networking --enable-features=NetworkService,NetworkServiceInProcess --disable-background-timer-throttling --disable-backgrounding-occluded-windows --disable-back-forward-cache --disable-breakpad --disable-client-side-phishing-detection --disable-component-extensions-with-background-pages --disable-component-update --no-default-browser-check --disable-default-apps --disable-dev-shm-usage --disable-extensions --disable-features=ImprovedCookieControls,LazyFrameLoading,GlobalMediaControls,DestroyProfileOnBrowserClose,MediaRouter,DialMediaRouteProvider,AcceptCHFrame,AutoExpandDetailsElement,CertificateTransparencyComponentUpdater,AvoidUnnecessaryBeforeUnloadCheckSync,Translate,HttpsUpgrades,PaintHolding --allow-pre-commit-input --disable-hang-monitor --disable-ipc-flooding-protection --disable-popup-blocking --disable-prompt-on-repost --disable-renderer-backgrounding --force-color-profile=srgb --metrics-recording-only --no-first-run --enable-automation --password-store=basic --use-mock-keychain --no-service-autorun --export-tagged-pdf --disable-search-engine-choice-screen --headless --hide-scrollbars --mute-audio --blink-settings=primaryHoverType=2,availableHoverTypes=2,primaryPointerType=4,availablePointerTypes=4 --no-sandbox --user-data-dir=/tmp/playwright_chromiumdev_profile-kMyQDr --remote-debugging-pipe --no-startup-window + pid=1019347 +[pid=1019347][err] /config/.cache/ms-playwright/chromium-1105/chrome-linux/chrome: error while loading shared libraries: libnss3.so: cannot open shared object file: No such file or directory +[pid=1019347] +[pid=1019347] starting temporary directories cleanup \ No newline at end of file diff --git a/agent-pyter/prompts.txt b/agent-pyter/prompts.txt new file mode 100644 index 0000000..d066faf --- /dev/null +++ b/agent-pyter/prompts.txt @@ -0,0 +1 @@ +create python app that will monitor for new pairs on https://dextools.io and trigger event immediately when there is new token \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..9990c5b --- /dev/null +++ b/config.json @@ -0,0 +1,15 @@ +{ + "tabAutocompleteModel": { + "title": "Tab Autocomplete Model", + "provider": "ollama", + "model": "stable-code:code", + "apiBase": "https://ollama.d-popov.com" + } +} + + +// original: "tabAutocompleteModel": { +// "title": "Starcoder 3b", +// "provider": "ollama", +// "model": "starcoder-3b" +// }, \ No newline at end of file diff --git a/crypto/sol/app.py b/crypto/sol/app.py new file mode 100644 index 0000000..f334c9d --- /dev/null +++ b/crypto/sol/app.py @@ -0,0 +1,25 @@ +from flask import Flask, render_template, request, jsonify +from solana.rpc.api import Client + +app = Flask(__name__) +solana_client = Client("https://api.mainnet-beta.solana.com") + +@app.route('/') +def index(): + return render_template('index.html') + +@app.route('/tokens', methods=['GET']) +def get_tokens(): + # Here you would add logic to fetch new tokens or token data + return jsonify(['SOL', 'USDC']) # Example token list + +@app.route('/swap', methods=['POST']) +def swap_tokens(): + data = request.json + token_name = data['token_name'] + amount = data['amount'] + # Here you would add logic to perform the token swap + return jsonify({'status': 'success', 'message': f'Swapped {amount} of {token_name}'}) + +if __name__ == '__main__': + app.run(debug=True) diff --git a/crypto/sol/r.txt b/crypto/sol/r.txt new file mode 100644 index 0000000..efd201e --- /dev/null +++ b/crypto/sol/r.txt @@ -0,0 +1,4 @@ +flask +solana +idna +httpx \ No newline at end of file diff --git a/crypto/sol/static/app.js b/crypto/sol/static/app.js new file mode 100644 index 0000000..32143e6 --- /dev/null +++ b/crypto/sol/static/app.js @@ -0,0 +1,28 @@ +document.getElementById('connectWallet').addEventListener('click', async () => { + try { + const { solana } is window; + if (solana && solana.isPhantom) { + const response = await solana.connect({ onlyIfTrusted: true }); + console.log('Connected with Public Key:', response.publicKey.toString()); + } else { + alert('Phantom wallet not found. Please install it.'); + } + } catch (error) { + console.error(error); + alert('Connection to Phantom Wallet failed'); + } +}); + +document.getElementById('swapToken').addEventListener('click', () => { + const tokenName = document.getElementById('tokenName').value; + const amount = document.getElementById('amount').value; + fetch('/swap', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({token_name: tokenName, amount: amount}) + }) + .then(response => response.json()) + .then(data => alert(data.message)); +}); diff --git a/crypto/sol/templates/index.html b/crypto/sol/templates/index.html new file mode 100644 index 0000000..0de89fe --- /dev/null +++ b/crypto/sol/templates/index.html @@ -0,0 +1,21 @@ + + + + + + Token Swapper + + +

Token Swapper

+
+ +
+
+ + + +
+ + + + diff --git a/docker-compose.yml b/docker-compose.yml index 31c1a7e..f173c20 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,7 +9,7 @@ services: dockerfile: ./Dockerfile environment: NODE_ENV: production - TTS_BACKEND_URL: http://192.168.0.10:9009/asr + # TTS_BACKEND_URL: http://192.168.0.10:9009/asr WS_URL: ws://192.168.0.10:28081 SERVER_PORT_WS: 8081 SERVER_PORT_HTTP: 8080 diff --git a/memory1/intint.neo.py b/memory1/intint.neo.py index a40720e..eb5f358 100644 --- a/memory1/intint.neo.py +++ b/memory1/intint.neo.py @@ -11,6 +11,8 @@ class Neo4jConnection: # Create the schema self.create_schema() + self.test_retrieval() + # Close the connection self.close() @@ -21,11 +23,13 @@ class Neo4jConnection: with self.driver.session() as session: session.write_transaction(self._create_constraints_and_indexes) + def test_retrieval(tx): + #run MATCH (n) RETURN n LIMIT 25 + result = tx.run("MATCH (n) RETURN n LIMIT 25;") + + @staticmethod def _create_constraints_and_indexes(tx): - # Constraints and indexes for Person - tx.run("CREATE CONSTRAINT ON (p:Person) ASSERT p.person_id IS UNIQUE;") - # Constraints and indexes for Memory tx.run("CREATE CONSTRAINT ON (m:Memory) ASSERT m.memory_id IS UNIQUE;") tx.run("CREATE INDEX ON :Memory(content);") diff --git a/memory1/models.py b/memory1/models.py index 52f595c..6a3c1ed 100644 --- a/memory1/models.py +++ b/memory1/models.py @@ -1,10 +1,3 @@ -class Person: - def __init__(self, person_id, name, age): - self.person_id = person_id - self.name = name - self.age = age - - class Memory: def __init__(self, memory_id, content, timestamp, importance, relevance, associated_tags): self.memory_id = memory_id diff --git a/package-lock.json b/package-lock.json index 572fe31..3cd4954 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,15 +8,60 @@ "name": "kevin-ai", "version": "1.0.0", "dependencies": { + "axios": "^1.7.2", "body-parser": "^1.20.2", - "dotenv": "^16.0.3", + "dotenv": "^16.4.5", "express": "^4.18.2", "git": "^0.1.5", + "groq-sdk": "^0.4.0", "node-persist": "^3.1.3", + "ollama": "^0.5.1", + "openai": "^4.50.0", "request": "^2.88.2", "ws": "^8.12.1" } }, + "node_modules/@types/node": { + "version": "18.19.34", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.34.tgz", + "integrity": "sha512-eXF4pfBNV5DAMKGbI02NnDtWrQ40hAN558/2vvS4gMpMIxaf6JmD7YjnZbq0Q9TDSSkKBamime8ewRoomHdt4g==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-fetch": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.0" + } + }, + "node_modules/@types/node-fetch/node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, "node_modules/accepts": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", @@ -29,6 +74,17 @@ "node": ">= 0.6" } }, + "node_modules/agentkeepalive": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", + "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -83,6 +139,29 @@ "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz", "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==" }, + "node_modules/axios": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz", + "integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/axios/node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/bcrypt-pbkdf": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", @@ -246,11 +325,14 @@ } }, "node_modules/dotenv": { - "version": "16.0.3", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", - "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==", + "version": "16.4.5", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz", + "integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==", "engines": { "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" } }, "node_modules/ecc-jsbn": { @@ -288,6 +370,14 @@ "node": ">= 0.6" } }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "engines": { + "node": ">=6" + } + }, "node_modules/express": { "version": "4.18.2", "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", @@ -406,6 +496,25 @@ "node": ">= 0.8" } }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, "node_modules/forever-agent": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", @@ -427,6 +536,31 @@ "node": ">= 0.12" } }, + "node_modules/form-data-encoder": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", + "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" + }, + "node_modules/formdata-node": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", + "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", + "dependencies": { + "node-domexception": "1.0.0", + "web-streams-polyfill": "4.0.0-beta.3" + }, + "engines": { + "node": ">= 12.20" + } + }, + "node_modules/formdata-node/node_modules/web-streams-polyfill": { + "version": "4.0.0-beta.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", + "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", + "engines": { + "node": ">= 14" + } + }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -485,6 +619,21 @@ "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.9.tgz", "integrity": "sha512-WiLgbHTIq5AYUvU/Luli4mZ1bUcHpGNHyCsbl+KPMg4zt+XUDpQehWjuBjdLaEvDTinvKj/FgfQt3fPoT7j08g==" }, + "node_modules/groq-sdk": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/groq-sdk/-/groq-sdk-0.4.0.tgz", + "integrity": "sha512-h79q9sv4hcOBESR05N5eqHlGhAug9H9lr3EIiB+37ysWWekeG+KYQDK2lIIHYCm6O9LzgZzO/VdLdPP298+T0w==", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7", + "web-streams-polyfill": "^3.2.1" + } + }, "node_modules/har-schema": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", @@ -557,6 +706,14 @@ "npm": ">=1.3.7" } }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "dependencies": { + "ms": "^2.0.0" + } + }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -689,6 +846,43 @@ "node": ">= 0.6" } }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, "node_modules/node-gyp-build": { "version": "4.6.0", "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.6.0.tgz", @@ -725,6 +919,14 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/ollama": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.1.tgz", + "integrity": "sha512-mAiCHxdvu63E8EFopz0y82QG7rGfYmKAWgmjG2C7soiRuz/Sj3r/ebvCOp+jasiCubqUPE0ZThKT5LR6wrrPtA==", + "dependencies": { + "whatwg-fetch": "^3.6.20" + } + }, "node_modules/on-finished": { "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", @@ -736,6 +938,24 @@ "node": ">= 0.8" } }, + "node_modules/openai": { + "version": "4.50.0", + "resolved": "https://registry.npmjs.org/openai/-/openai-4.50.0.tgz", + "integrity": "sha512-2ADkNIU6Q589oYHr5pn9k7SbUcrBTK9X0rIXrYqwMVSoqOj1yK9/1OO0ExaWsqOOpD7o58UmRjeKlx9gKAcuKQ==", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7", + "web-streams-polyfill": "^3.2.1" + }, + "bin": { + "openai": "bin/cli" + } + }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", @@ -766,6 +986,11 @@ "node": ">= 0.10" } }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, "node_modules/psl": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", @@ -990,6 +1215,11 @@ "node": ">=0.8" } }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, "node_modules/tunnel-agent": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", @@ -1018,6 +1248,11 @@ "node": ">= 0.6" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", @@ -1086,6 +1321,33 @@ "extsprintf": "^1.2.0" } }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-fetch": { + "version": "3.6.20", + "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, "node_modules/ws": { "version": "8.12.1", "resolved": "https://registry.npmjs.org/ws/-/ws-8.12.1.tgz", diff --git a/package.json b/package.json index e86a2c5..2b5bd06 100644 --- a/package.json +++ b/package.json @@ -8,11 +8,15 @@ "start:tele": "python agent-py-bot/agent.py" }, "dependencies": { + "axios": "^1.7.2", "body-parser": "^1.20.2", - "dotenv": "^16.0.3", + "dotenv": "^16.4.5", "express": "^4.18.2", "git": "^0.1.5", + "groq-sdk": "^0.4.0", "node-persist": "^3.1.3", + "ollama": "^0.5.1", + "openai": "^4.50.0", "request": "^2.88.2", "ws": "^8.12.1" } diff --git a/translate-nllb/nllb.py b/translate-nllb/nllb.py new file mode 100644 index 0000000..1784066 --- /dev/null +++ b/translate-nllb/nllb.py @@ -0,0 +1,16 @@ +# conda activate transformers +# conda install pip +# pip install https://github.com/huggingface/transformers/archive/nllb.zip -q +# pip install transformers + +from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline + +model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M") +tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") + +translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang="tam_Taml", tgt_lang='eng_Latn', max_length = 400) +translator("திஸ் ஐஸ் எ வெரி குட் மாடல் ") + +# -------------------------------------------- +https://colab.research.google.com/drive/1o9r0QbEQZ1tn4eBVv-wajAtgwi6Lp-ZJ?usp=sharing#scrollTo=q49lJjXd4Jct +https://colab.research.google.com/drive/1QEF0U9AaBeQdjLw7SyRty2ENAD4Qiiil#scrollTo=TFm232e77QRT \ No newline at end of file diff --git a/web/.env b/web/.env new file mode 100644 index 0000000..e1b2c90 --- /dev/null +++ b/web/.env @@ -0,0 +1,22 @@ + +TTS_BACKEND_URL=https://api.tts.d-popov.com/ +WS_URL=ws://localhost:8081 +SERVER_PORT_WS=8081 +SERVER_PORT_HTTP=3005 + +# aider +AIDER_MODEL= +AIDER_4=false +#AIDER_35TURBO= + +# OPENAI_API_KEY=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 +# OPENAI_API_BASE=https://api.deepseek.com/v1 +# OPENAI_API_KEY=sk-99df7736351f4536bd72cd64a416318a +# AIDER_MODEL=deepseek-coder #deepseek-coder, deepseek-chat + + +GROQ_API_KEY=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 +aider --model groq/llama3-70b-8192 + +# List models available from Groq +aider --models groq/ \ No newline at end of file diff --git a/web/audio.js b/web/audio.js new file mode 100644 index 0000000..0a1920d --- /dev/null +++ b/web/audio.js @@ -0,0 +1,201 @@ +let selectedDeviceId = "default"; +export let serverTime; +export let recordButton; +export let socket; +let audioRecorder; +let audioStream; +let recording = false; +let connectionStatus; +let statusRecording; +let audioContext; +let volumeChecker; +let lastVolumes = new Array(5); +let averageVolume; +let silenceCount = 0; +let isSpeaking = false; +let soundDetected = false; +let speakingCount = 0; +let analyser = null; + +let SILENCE_DELAY_MS = 50; +let preDetect_IncludedAudio = 400; //ms +let soundCount_Threshold = 10; +let silenceCount_Threshold = 10; + +const volumeHistory = []; + +export function setSocket(newSocket) { + socket = newSocket; +} +export function setRecordButton(newRecordButton) { + recordButton = newRecordButton; + recordButton.addEventListener("click", toggleListening); +} + +export function InitAudioAnalyser(stream) { + audioContext = new AudioContext(); + const source = audioContext.createMediaStreamSource(stream); + analyser = audioContext.createAnalyser(); + analyser.fftSize = 2048; + analyser.smoothingTimeConstant = 0.8; + source.connect(analyser); +} + +export function startListening() { + recording = true; + navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000 } }) + .then((stream) => { + audioStream = stream; + + const audioContext = new AudioContext(); + const sourceNode = audioContext.createMediaStreamSource(audioStream); + const audioSampleRate = sourceNode.context.sampleRate; + + info.innerHTML = "Sample rate: " + audioSampleRate + " Hz"; + var preBuffer = []; + + const channelSplitter = audioContext.createChannelSplitter(2); + const channelMerger = audioContext.createChannelMerger(1); + sourceNode.connect(channelSplitter); + channelSplitter.connect(channelMerger, 0, 0); + const outputNode = channelMerger; + + const mediaStreamDestination = audioContext.createMediaStreamDestination(); + outputNode.connect(mediaStreamDestination); + const singleChannelStream = mediaStreamDestination.stream; + + audioRecorder = new MediaRecorder(singleChannelStream); + audioRecorder.start(); + audioRecorder.addEventListener("dataavailable", (event) => { + if (!soundDetected && autosend.checked) { + preBuffer = []; + preBuffer.push(event.data); + return; + } + if (event.data.size > 0) { + let data = event.data; + if (preBuffer.length > 0) { + sendAudioToServerPost(preBuffer); + } + sendAudioToServer(data); + soundDetected = false; + } + }); + + InitAudioAnalyser(stream); + }); + + recordButton.innerHTML = "Stop Talking"; + recordButton.classList.toggle('bg-red-500'); + recordButton.classList.toggle('bg-blue-500'); + recordButton.classList.toggle('hover:bg-blue-700'); +} + +export function stopListening() { + recording = false; + audioRecorder.stop(); + recordButton.innerHTML = "Push to Talk"; + recordButton.classList.toggle('bg-blue-500'); + recordButton.classList.toggle('bg-red-500'); + recordButton.classList.toggle('hover:bg-blue-700'); + clearInterval(volumeChecker); + if (audioStream) { + audioStream.getTracks().forEach(track => track.stop()); + audioStream = null; + } +} + +export function sendAudioToServerPost(data) { + const blob = new Blob(data, { type: "audio/ogg; codecs=opus" }); + var formData = new FormData(); + formData.append('file', data); + fetch('/upload', { + method: 'POST', + body: formData + }); +} + +export function sendAudioToServerJson(data) { + if (socket && socket.readyState === WebSocket.OPEN) { + + const binaryData = Buffer.from(base64AudioData, 'base64'); + socket.send(JSON.stringify({ type: 'audio', audiobase64: binaryData })); + serverTime = Date.now(); + if (!autosend.checked) { + transcription.placeholder = "Processing audio..."; + } + } +} +export function sendAudioToServer(data) { + if (socket && socket.readyState === WebSocket.OPEN) { + socket.send(data); + serverTime = Date.now(); + if (!autosend.checked) { + transcription.placeholder = "Processing audio..."; + } + } +} + +export function toggleListening() { + if (socket.readyState === WebSocket.OPEN) { + if (recording) { + stopListening(); + } else { + startListening(); + } + } +} + +export function initializeVolumeChecker() { + volumeChecker = setInterval(() => { + if (!audioContext) { + console.log("No audio context"); + return; + } + const frequencyData = new Uint8Array(analyser.frequencyBinCount); + analyser.getByteFrequencyData(frequencyData); + + let totalVolume = 0; + for (let i = 0; i < frequencyData.length; i++) { + totalVolume += frequencyData[i]; + } + averageVolume = totalVolume / frequencyData.length; + + volumeHistory.push(averageVolume); + if (volumeHistory.length > 100) { + volumeHistory.shift(); + } + + const threshold = volumeHistory.reduce((acc, curr) => acc + curr) / volumeHistory.length + 5; + const isSilent = averageVolume < threshold; + + if (averageVolume > threshold) { + if (autosend.checked && speakingCount == 0 && audioRecorder) { + soundDetected = false; + audioRecorder.stop(); + audioRecorder.start(); + } + speakingCount++; + if (speakingCount > soundCount_Threshold) { + statusRecording.innerHTML = "Listening..."; + statusRecording.style.color = "green"; + isSpeaking = true; + } + } else if (averageVolume - 5 < threshold) { + speakingCount = 0; + if (isSpeaking) { + silenceCount++; + if (silenceCount > silenceCount_Threshold) { + if (autosend.checked) { + soundDetected = true; + audioRecorder.stop(); + audioRecorder.start(); + } + isSpeaking = false; + statusRecording.innerHTML = "Silence detected..."; + statusRecording.style.color = "orange"; + } + } + } + }, SILENCE_DELAY_MS); +} diff --git a/web/chat-client.html b/web/chat-client.html new file mode 100644 index 0000000..b515172 --- /dev/null +++ b/web/chat-client.html @@ -0,0 +1,615 @@ + + + + + Real-time Voice Chat + + + + + +
+

Real-time Voice Chat

+ +
+ + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+
+
status
+
+ + + + + + + \ No newline at end of file diff --git a/web/chat-server.js b/web/chat-server.js new file mode 100644 index 0000000..350c743 --- /dev/null +++ b/web/chat-server.js @@ -0,0 +1,399 @@ +const express = require('express'); +const bodyParser = require('body-parser'); +const WebSocket = require('ws'); +const storage = require('node-persist'); +const request = require('request'); +const fs = require('fs'); +const path = require('path'); +const dotenv = require('dotenv'); +const ollama = require('ollama'); +const axios = require('axios'); +const OpenAI = require('openai'); +const Groq = require('groq-sdk'); + +// Load environment variables +dotenv.config({ path: `.env${process.env.NODE_ENV === 'development' ? '.development' : ''}` }); + +// Initialize services +const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY }); +const groq = new Groq({ apiKey: process.env.GROQ_API_KEY }); + +// Express setup +const app = express(); +app.use(bodyParser.json()); + +// Configuration constants +const PORT_HTTP = process.env.SERVER_PORT_HTTP || 3000; +const PORT_WS = process.env.SERVER_PORT_WS || 8080; +const TTS_API_URL = process.env.TTS_API_URL; +const LNN_API_URL = process.env.LNN_API_URL; +const LLN_MODEL = process.env.LLN_MODEL; + +let language = "en"; +let storeRecordings = false; +let queueCounter = 0; + +const sessions = new Map(); +const chats = new Map(); // Store chat rooms + +// Initialize storage and load initial values +async function initStorage() { + await storage.init(); + language = await storage.getItem('language') || language; + storeRecordings = await storage.getItem('storeRecordings') || storeRecordings; + + const storedChats = await storage.getItem('chats') || []; + storedChats.forEach(chat => chats.set(chat.id, chat)); + + const storedSessions = await storage.getItem('sessions') || []; + storedSessions.forEach(session => sessions.set(session.sessionId, session)); +} + +initStorage(); + +// WebSocket Server +const wss = new WebSocket.Server({ port: PORT_WS }); +wss.on('connection', ws => { + ws.on('message', async message => handleMessage(ws, message)); + ws.on('close', () => handleClose(ws)); +}); + +// Handle WebSocket messages +async function handleMessage(ws, message) { + let data; + try { + data = JSON.parse(message); + } catch { + return handleAudioData(ws, message); + } + + try { + switch (data.type) { + case 'sessionId': + await handleSessionId(ws); + break; + case 'join': + await handleJoin(ws, data); + break; + case 'startChat': + await handleStartChat(ws, data); + break; + case 'enterChat': + await handleEnterChat(ws, data); + break; + case 'reconnect': + await handleReconnect(ws, data); + break; + default: + console.log('Unknown message type:', data.type); + } + } catch (err) { + console.error('Failed to handle message', err); + } +} + +function handleClose(ws) { + sessions.delete(ws.sessionId); + broadcastUserList(); +} + +// Handlers for specific message types +async function handleSessionId(ws) { + ws.sessionId = generateSessionId(); + sessions.set(ws.sessionId, { language: 'en' }); + await storage.setItem('sessions', Array.from(sessions.values())); +} + +async function handleJoin(ws, { username, language }) { + sessions.set(ws.sessionId, { username, sessionId: ws.sessionId, language }); + ws.send(JSON.stringify({ type: 'sessionId', sessionId: ws.sessionId, language, storeRecordings })); + + const userChats = Array.from(chats.values()).filter(chat => chat.participants.includes(ws.sessionId)); + ws.send(JSON.stringify({ type: 'chats', chats: userChats })); + + broadcastUserList(); +} + +async function handleStartChat(ws, { users }) { + const chatId = generateChatId(); + let participants = [ws.sessionId, ...users]; + participants = [...new Set(participants)]; + + chats.set(chatId, { participants, messages: [] }); + await storage.setItem('chats', Array.from(chats.values())); + + notifyParticipants(participants); + broadcastUserList(); +} + +async function handleEnterChat(ws, { chatId }) { + const enteredChat = chats.get(chatId); + const currentSession = sessions.get(ws.sessionId); + currentSession.currentChat = chatId; + if (enteredChat && enteredChat.participants.includes(ws.sessionId)) { + ws.send(JSON.stringify({ type: 'chat', chat: enteredChat })); + } +} + +async function handleReconnect(ws, { sessionId }) { + const userSession = sessions.get(sessionId); + if (userSession) { + sessions.set(ws.sessionId, userSession); + ws.sessionId = sessionId; + const userChats = Array.from(chats.values()).filter(chat => chat.participants.includes(ws.sessionId)); + ws.send(JSON.stringify({ type: 'chats', chats: userChats })); + } else { + console.log('Session not found:', sessionId); + } + broadcastUserList(); +} + +// Utility functions +function generateSessionId() { + return Math.random().toString(36).substring(2); +} + +function generateChatId() { + return Math.random().toString(36).substring(2); +} + +function broadcastUserList() { + const userList = Array.from(sessions.values()).map(user => ({ + username: user.username, + sessionId: user.sessionId, + currentChat: user.currentChat, + language: user.language + })); + + wss.clients.forEach(client => { + if (client.readyState === WebSocket.OPEN) { + client.send(JSON.stringify({ type: 'userList', users: userList })); + } + }); +} + +function notifyParticipants(participants) { + participants.forEach(sessionId => { + const participantSocket = Array.from(wss.clients).find(client => client.sessionId === sessionId); + if (participantSocket && participantSocket.readyState === WebSocket.OPEN) { + const userChats = Array.from(chats.entries()) + .filter(([id, chat]) => chat.participants.includes(sessionId)) + .map(([id, chat]) => ({ id, participants: chat.participants })); + participantSocket.send(JSON.stringify({ type: 'chats', chats: userChats })); + } + }); +} + +async function handleAudioData(ws, data) { + const sessionData = sessions.get(ws.sessionId); + let { language, task } = sessionData; + + const formData = { + task: task || 'transcribe', + language, + vad_filter: 'true', + output: 'json', + audio_file: { + value: data, + options: { filename: 'audio.ogg', contentType: 'audio/ogg' } + } + }; + + if (!language || language === 'auto') { + await detectLanguage(ws, formData); + } else { + await transcribeAudio(ws, formData, sessionData); + } +} + +async function detectLanguage(ws, formData) { + try { + const result = await requestPromise({ + method: 'POST', + url: TTS_API_URL.replace('/asr', '/detect-language'), + formData + }); + const { language_code } = JSON.parse(result); + if (language_code) { + const sessionData = sessions.get(ws.sessionId); + sessionData.language = language_code; + ws.send(JSON.stringify({ type: 'languageDetected', languageDetected: language_code })); + await transcribeAudio(ws, formData, sessionData); + } + } catch (err) { + console.error('Language detection failed:', err); + } +} + +async function transcribeAudio(ws, formData, sessionData) { + const start = new Date().getTime(); + queueCounter++; + + try { + if(sessionData.language) { + formData.language = sessionData.language; + } + formData.vad_filter = 'true'; + const body = await requestPromise({ method: 'POST', url: TTS_API_URL, formData }); + queueCounter--; + + const duration = new Date().getTime() - start; + ws.send(JSON.stringify({ + type: 'text', + queueCounter, + duration, + language: sessionData.language, + text: body + })); + + await handleChatTranscription(ws, body, sessionData); + + } catch (err) { + console.error('Transcription failed:', err); + } + + if (storeRecordings) { + const timestamp = Date.now(); + fs.mkdir('rec', { recursive: true }, err => { + if (err) console.error(err); + else { + fs.writeFile(`rec/audio${timestamp}.ogg`, formData.audio_file.value, err => { + if (err) console.error(err); + else console.log(`Audio data saved to rec/audio${timestamp}.ogg`); + }); + } + }); + } +} + +async function handleChatTranscription(ws, body, sessionData) { + if (sessionData.currentChat) { + const chat = chats.get(sessionData.currentChat); + if (chat) { + let msg = { sender: sessionData.username, text: body, translations: [] }; + chat.messages.push(msg); + + for (let sessionId of chat.participants) { + if (sessionId !== ws.sessionId) { + const targetLang = sessions.get(sessionId)?.language || 'en'; + if (targetLang !== sessionData.language) { + const translation = await translateText(body, sessionData.language, targetLang); + msg.translations.push({ language: targetLang, text: translation }); + + const participantSocket = Array.from(wss.clients).find(client => client.sessionId === sessionId); + if (participantSocket && participantSocket.readyState === WebSocket.OPEN) { + participantSocket.send(JSON.stringify({ type: 'text', text: `${sessionData.username}: ${translation}` })); + const audioBuffer = await generateSpeech(translation); + participantSocket.send(JSON.stringify({ type: 'audio', audio: audioBuffer.toString('base64') })); + } + } else { + const participantSocket = Array.from(wss.clients).find(client => client.sessionId === sessionId); + if (participantSocket && participantSocket.readyState === WebSocket.OPEN) { + participantSocket.send(JSON.stringify({ type: 'text', text: `${sessionData.username}: ${body}` })); + participantSocket.send(JSON.stringify({ type: 'audio', audio: formData.toString('base64') })); + } + } + } + } + } + } +} + +async function translateText(originalText, originalLanguage, targetLanguage) { + const prompt = `Translate this text from ${originalLanguage} to ${targetLanguage}: ${originalText}`; + + const response = await groq.chat.completions.create({ + messages: [ + { + role: "system", + content: `You are translating voice transcriptions from '${originalLanguage}' to '${targetLanguage}'. Reply with just the translation.`, + }, + { + role: "user", + content: originalText, + }, + ], + model: "llama3-8b-8192", + }); + + return response.choices[0]?.message?.content || ""; +} + +async function generateSpeech(text) { + const mp3 = await openai.audio.speech.create({ + model: "tts-1", + voice: "alloy", + input: text, + }); + return Buffer.from(await mp3.arrayBuffer()); +} + +// HTTP Server +app.get('/', (req, res) => { + res.sendFile(path.join(__dirname, 'chat-client.html')); +}); + +app.get('/audio.js', (req, res) => { + res.sendFile(path.join(__dirname, 'audio.js')); +}); + +app.post('/log', (req, res) => { + console.log(`[LOG ${new Date().toISOString()}] ${req.body.message}`); + res.status(200).send('OK'); +}); + +app.get('/wsurl', (req, res) => { + res.status(200).send(process.env.WS_URL); +}); + +app.get('/settings', async (req, res) => { + if (req.query.language) { + language = req.query.language; + await storage.setItem('language', language); + } + if (req.query.storeRecordings) { + storeRecordings = req.query.storeRecordings; + await storage.setItem('storeRecordings', storeRecordings); + } + res.status(200).send({ language, storeRecordings }); +}); + +app.post('/settings', async (req, res) => { + const { sessionId, language, storeRecordings, task } = req.body; + const sessionData = sessions.get(sessionId); + if (language) sessionData.language = language; + if (storeRecordings !== undefined) sessionData.storeRecordings = storeRecordings; + if (task) sessionData.task = task; + res.status(200).send('OK'); +}); + +app.post('/upload', (req, res) => { + const timestamp = Date.now(); + console.log('Received audio data:', timestamp); + fs.mkdir('rec', { recursive: true }, err => { + if (err) return res.status(500).send('ERROR'); + const file = fs.createWriteStream(`rec/audio_slice_${timestamp}.ogg`); + req.pipe(file); + file.on('finish', () => res.status(200).send('OK')); + }); +}); + +app.get('/chats', (req, res) => { + const { username } = req.query; + const userChats = Array.from(chats.values()).filter(chat => chat.participants.includes(username)); + res.status(200).send({ chats: userChats }); +}); + +app.listen(PORT_HTTP, () => { + console.log(`Server listening on port ${PORT_HTTP}`); +}); + +// Helper to wrap request in a promise +function requestPromise(options) { + return new Promise((resolve, reject) => { + request(options, (error, response, body) => { + if (error) return reject(error); + resolve(body); + }); + }); +} diff --git a/web/client.html b/web/client.html index 3ba9eb9..10eb79c 100644 --- a/web/client.html +++ b/web/client.html @@ -3,11 +3,9 @@ Real-time Speech-to-Text - + - + @@ -15,61 +13,61 @@

Rt STT

- +
- -
-
+
-
+
- +
- +