129 lines
4.3 KiB
Python
129 lines
4.3 KiB
Python
import logging
|
|
import asyncio, nest_asyncio
|
|
from telegram import Bot, Update
|
|
from telegram.ext import Application, CommandHandler, MessageHandler, filters
|
|
|
|
# import "gopkg.in/telebot.v3/middleware"
|
|
|
|
import requests
|
|
import json
|
|
import base64
|
|
from selenium import webdriver
|
|
from selenium.webdriver.chrome.options import Options
|
|
from io import BytesIO
|
|
from PIL import Image
|
|
|
|
# Apply nest_asyncio
|
|
nest_asyncio.apply()
|
|
|
|
# Set up logging
|
|
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Telegram Bot Token
|
|
# t.me/kevin_ai_robot
|
|
TOKEN = '6805059978:AAHNJKuOeazMSJHc3-BXRCsFfEVyFHeFnjw'
|
|
# t.me/artitherobot 6749075936:AAHUHiPTDEIu6JH7S2fQdibwsu6JVG3FNG0
|
|
|
|
|
|
# LLM API Endpoint
|
|
LLM_ENDPOINT = "http://192.168.0.11:11434/api/chat"
|
|
|
|
#! Selenium WebDriver setup for screenshots
|
|
#chrome_options = Options()
|
|
#chrome_options.add_argument("--headless")
|
|
#driver = webdriver.Chrome(options=chrome_options)
|
|
|
|
async def start(update: Update, context):
|
|
await context.bot.send_message(chat_id=update.effective_chat.id, text="Hi! I'm your AI bot. Ask me aything with /ask")
|
|
|
|
async def echo(update: Update, context):
|
|
await context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
|
|
|
|
|
|
async def query_llm_simple(user_message):
|
|
"""Query the LLM with the user's message."""
|
|
data = {
|
|
"model": "llama2",
|
|
"messages": [{"role": "user", "content": user_message}],
|
|
"stream": "false"
|
|
}
|
|
response = requests.post(LLM_ENDPOINT, json=data)
|
|
if response.status_code == 200:
|
|
response_data = response.json()
|
|
if "error" in response_data:
|
|
error_message = response_data.get('error', 'Unknown error')
|
|
# Log the error
|
|
logger.error(f"LLM Error: {error_message}")
|
|
# Return a user-friendly error message
|
|
return "Sorry, there was an error processing your request."
|
|
return response_data.get('message', {}).get('content', 'No response')
|
|
else:
|
|
logger.error(f"Error reaching LLM: {response.text}")
|
|
return "Error: Unable to reach LLM"
|
|
|
|
|
|
async def ask(update, context):
|
|
user_message = ' '.join(context.args)
|
|
llm_response = await query_llm_simple(user_message)
|
|
await update.message.reply_text(llm_response)
|
|
|
|
async def main():
|
|
"""Start the bot."""
|
|
# Create an Application instance
|
|
application = Application.builder().token(TOKEN).build()
|
|
|
|
# Add handlers to the application
|
|
# Command handlers should be registered before the generic message handler
|
|
application.add_handler(CommandHandler("start", start))
|
|
# application.add_handler(CommandHandler("screenshot", screenshot)) # Ensure screenshot function is async
|
|
application.add_handler(CommandHandler("ai", query_llm_simple))
|
|
application.add_handler(CommandHandler("ask", ask))
|
|
|
|
# This handler should be last as it's the most generic
|
|
application.add_handler(MessageHandler(filters.TEXT, echo))
|
|
|
|
# Register the error handler
|
|
# application.add_error_handler(error_handler)
|
|
|
|
# Run the bot
|
|
await application.run_polling()
|
|
|
|
|
|
# oldasync def query_llm(user_message):
|
|
"""Query the LLM with the user's message."""
|
|
data = {
|
|
"model": "llama2",
|
|
"messages": [{"role": "user", "content": user_message}]
|
|
}
|
|
response = requests.post(LLM_ENDPOINT, json=data)
|
|
|
|
if response.status_code == 200:
|
|
# Split the response into individual JSON objects
|
|
response_parts = response.text.split('\n')
|
|
|
|
# Aggregate the content from each part
|
|
full_response = ''
|
|
for part in response_parts:
|
|
try:
|
|
json_part = json.loads(part)
|
|
if 'message' in json_part and 'content' in json_part['message']:
|
|
full_response += json_part['message']['content'] + ' '
|
|
if json_part.get('done', False):
|
|
break
|
|
except json.JSONDecodeError:
|
|
# Handle possible JSON decode error
|
|
continue
|
|
|
|
return full_response.strip()
|
|
else:
|
|
return "Error: Unable to reach LLM"
|
|
|
|
|
|
if __name__ == '__main__':
|
|
loop = asyncio.get_event_loop()
|
|
if loop.is_running():
|
|
loop.create_task(main())
|
|
else:
|
|
asyncio.run(main())
|