LLM proxy integration

This commit is contained in:
Dobromir Popov
2025-08-26 18:37:00 +03:00
parent 9a76624904
commit b404191ffa
5 changed files with 572 additions and 21 deletions

View File

@@ -6,6 +6,15 @@ system:
log_level: "INFO" # DEBUG, INFO, WARNING, ERROR
session_timeout: 3600 # Session timeout in seconds
# LLM Proxy Configuration
llm_proxy:
base_url: "http://localhost:1234" # LLM server base URL
model: "openai/gpt-oss-20b" # Model name
temperature: 0.7 # Response creativity (0.0-1.0)
max_tokens: -1 # Max response tokens (-1 for unlimited)
timeout: 30 # Request timeout in seconds
api_key: null # API key if required
# Cold Start Mode Configuration
cold_start:
enabled: true # Enable cold start mode logic