LLM proxy integration
This commit is contained in:
@@ -6,6 +6,15 @@ system:
|
||||
log_level: "INFO" # DEBUG, INFO, WARNING, ERROR
|
||||
session_timeout: 3600 # Session timeout in seconds
|
||||
|
||||
# LLM Proxy Configuration
|
||||
llm_proxy:
|
||||
base_url: "http://localhost:1234" # LLM server base URL
|
||||
model: "openai/gpt-oss-20b" # Model name
|
||||
temperature: 0.7 # Response creativity (0.0-1.0)
|
||||
max_tokens: -1 # Max response tokens (-1 for unlimited)
|
||||
timeout: 30 # Request timeout in seconds
|
||||
api_key: null # API key if required
|
||||
|
||||
# Cold Start Mode Configuration
|
||||
cold_start:
|
||||
enabled: true # Enable cold start mode logic
|
||||
|
Reference in New Issue
Block a user