actually call OLLAMA and log the errors
This commit is contained in:
@ -41,50 +41,32 @@ async def echo(update: Update, context):
|
||||
await context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
|
||||
|
||||
|
||||
def query_llm(user_message):
|
||||
async def query_llm_simple(user_message):
|
||||
"""Query the LLM with the user's message."""
|
||||
data = {
|
||||
"model": "llama2",
|
||||
"messages": [{"role": "user", "content": user_message}]
|
||||
"messages": [{"role": "user", "content": user_message}],
|
||||
"stream": "false"
|
||||
}
|
||||
response = requests.post(LLM_ENDPOINT, json=data)
|
||||
if response.status_code == 200:
|
||||
response_data = response.json()
|
||||
return response_data.get('message', {}).get('content', 'No response')
|
||||
else:
|
||||
response_data = response.json()
|
||||
if "error" in response_data:
|
||||
error_message = response_data.get('error', 'Unknown error')
|
||||
# Log the error
|
||||
logger.error(f"LLM Error: {error_message}")
|
||||
# Return a user-friendly error message
|
||||
return "Sorry, there was an error processing your request."
|
||||
return response_data.get('message', {}).get('content', 'No response')
|
||||
else:
|
||||
logger.error(f"Error reaching LLM: {response.text}")
|
||||
return "Error: Unable to reach LLM"
|
||||
|
||||
|
||||
def ask(update, context):
|
||||
async def ask(update, context):
|
||||
user_message = ' '.join(context.args)
|
||||
llm_response = query_llm(user_message)
|
||||
update.message.reply_text(llm_response)
|
||||
|
||||
async def screenshot(update, context):
|
||||
"""Take a screenshot of a webpage."""
|
||||
url = ' '.join(context.args)
|
||||
update.message.reply_text('This will noramlly get a screenshot from: '.url)# url.', but currently under development')
|
||||
|
||||
|
||||
# driver.get(url)
|
||||
# screenshot = driver.get_screenshot_as_png()
|
||||
# image_stream = BytesIO(screenshot)
|
||||
# image_stream.seek(0)
|
||||
# image = Image.open(image_stream)
|
||||
# image_stream.close()
|
||||
# image.save('screenshot.png')
|
||||
# update.message.reply_photo(photo=open('screenshot.png', 'rb'))
|
||||
|
||||
async def error_handler(update: Update): #context: CallbackContext
|
||||
"""Handle errors occurred during the execution of commands."""
|
||||
# Log the error before we do anything else
|
||||
# logger.error(f"An error occurred: {context.error}")
|
||||
logger.error(f"An error occurred:")
|
||||
|
||||
# Send a message to the user
|
||||
# error_message = "Sorry, an unexpected error occurred. Please try again later."
|
||||
# if update.effective_message:
|
||||
# await context.bot.send_message(chat_id=update.effective_chat.id, text=error_message)
|
||||
|
||||
llm_response = await query_llm_simple(user_message)
|
||||
await update.message.reply_text(llm_response)
|
||||
|
||||
async def main():
|
||||
"""Start the bot."""
|
||||
@ -94,21 +76,50 @@ async def main():
|
||||
# Add handlers to the application
|
||||
# Command handlers should be registered before the generic message handler
|
||||
application.add_handler(CommandHandler("start", start))
|
||||
application.add_handler(CommandHandler("screenshot", screenshot)) # Ensure screenshot function is async
|
||||
application.add_handler(CommandHandler("ai", ask))
|
||||
# application.add_handler(CommandHandler("screenshot", screenshot)) # Ensure screenshot function is async
|
||||
application.add_handler(CommandHandler("ai", query_llm_simple))
|
||||
application.add_handler(CommandHandler("ask", ask))
|
||||
application.add_handler(CommandHandler("llm", ask))
|
||||
|
||||
# This handler should be last as it's the most generic
|
||||
application.add_handler(MessageHandler(filters.TEXT, echo))
|
||||
|
||||
# Register the error handler
|
||||
application.add_error_handler(error_handler)
|
||||
# application.add_error_handler(error_handler)
|
||||
|
||||
# Run the bot
|
||||
await application.run_polling()
|
||||
|
||||
|
||||
# oldasync def query_llm(user_message):
|
||||
"""Query the LLM with the user's message."""
|
||||
data = {
|
||||
"model": "llama2",
|
||||
"messages": [{"role": "user", "content": user_message}]
|
||||
}
|
||||
response = requests.post(LLM_ENDPOINT, json=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
# Split the response into individual JSON objects
|
||||
response_parts = response.text.split('\n')
|
||||
|
||||
# Aggregate the content from each part
|
||||
full_response = ''
|
||||
for part in response_parts:
|
||||
try:
|
||||
json_part = json.loads(part)
|
||||
if 'message' in json_part and 'content' in json_part['message']:
|
||||
full_response += json_part['message']['content'] + ' '
|
||||
if json_part.get('done', False):
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
# Handle possible JSON decode error
|
||||
continue
|
||||
|
||||
return full_response.strip()
|
||||
else:
|
||||
return "Error: Unable to reach LLM"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loop = asyncio.get_event_loop()
|
||||
if loop.is_running():
|
||||
|
@ -13,3 +13,10 @@ pip install ....
|
||||
|
||||
|
||||
cd agent-py-bot/
|
||||
|
||||
|
||||
# ToDo
|
||||
make the telegram bot think while getting the response
|
||||
|
||||
/ask who are you
|
||||
/ai test
|
Reference in New Issue
Block a user