agent scraper

This commit is contained in:
Dobromir Popov
2023-12-23 00:41:19 +00:00
parent 73803096b0
commit 317a876ec4
7 changed files with 132 additions and 14 deletions

View File

@@ -3,9 +3,7 @@ import asyncio, nest_asyncio
from telegram import Bot, Message, Update
from telegram.constants import ParseMode
from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes, CallbackContext
# import "gopkg.in/telebot.v3/middleware"
import os
import requests
import json
import base64
@@ -33,10 +31,7 @@ DEVELOPER_CHAT_ID = "777826553"
# LLM API Endpoint
LLM_ENDPOINT = "http://192.168.0.11:11434/api/chat"
#! Selenium WebDriver setup for screenshots
#chrome_options = Options()
#chrome_options.add_argument("--headless")
#driver = webdriver.Chrome(options=chrome_options)
APPEND_RESULTS = os.getenv('APPEND_RESULTS', 'True') == 'True'
async def start(update: Update, context: CallbackContext):
await context.bot.send_message(chat_id=update.effective_chat.id, text="Hi! I'm your AI bot. Ask me aything with /ask")
@@ -78,7 +73,11 @@ async def ok(update: Update, context: CallbackContext):
context.chat_data['messages'] = []
await update.message.reply_text("Exiting ask mode.")
#https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
# CODE RUNNER
import re
from agents.runner import execute_python_code
#https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
async def query_llm(user_message):
"""Query the LLM with the user's message."""
data = {
@@ -95,7 +94,22 @@ async def query_llm(user_message):
logger.error(f"LLM Error: {error_message}")
# Return a user-friendly error message
return "Sorry, there was an error processing your request."
return response_data.get('message', {}).get('content', 'No response from AI')
# handle response
content = response_data.get('message', {}).get('content', 'No response')
# Find and execute all code blocks
code_blocks = re.findall(r"```(.*?)```", content, re.DOTALL)
for code in code_blocks:
execution_result = execute_python_code(code.strip())
if APPEND_RESULTS:
# Append the result after the code block
content = content.replace(f"```{code}```", f"```{code}```\n```{execution_result}```")
else:
# Replace the code block with its result
content = content.replace(f"```{code}```", f"```{execution_result}```")
return content
else:
logger.error(f"Error reaching LLM: {response.text}")
return "Error: Unable to reach the AI agent."
@@ -172,10 +186,44 @@ async def error_handler(update: object, context: ContextTypes.DEFAULT_TYPE) -> N
async def bad_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Raise an error to trigger the error handler."""
await context.bot.wrong_method_name() # type: ignore[attr-defined]
if __name__ == '__main__':
#------------------------- webagent --------------------------#
import schedule
import time
from agents.webagent import run_web_agent
async def async_main():
# Assuming this is your asynchronous main function with its full details
loop = asyncio.get_event_loop()
if loop.is_running():
loop.create_task(main())
else:
asyncio.run(main())
await main()
def sync_main():
# Synchronous part for scheduling
topic = "TSLA"
interval = 1 # in minutes
folder = "agent-py-bot/scrape/raw"
schedule.every(interval).minutes.do(run_web_agent, topic=topic, folder=folder)
# Run once at the start
run_web_agent(topic=topic, folder=folder)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
# Run the asynchronous part
if loop.is_running():
loop.create_task(async_main())
else:
loop.run_until_complete(async_main())
# Run the synchronous part
sync_main()