181 lines
5.5 KiB
YAML
181 lines
5.5 KiB
YAML
version: '3.8'
|
|
|
|
services:
|
|
# Your existing trading dashboard
|
|
trading-dashboard:
|
|
image: python:3.11-slim
|
|
container_name: trading-dashboard
|
|
ports:
|
|
- "8050:8050" # Dash/Streamlit port
|
|
volumes:
|
|
- ./config:/config
|
|
- ./models:/models
|
|
environment:
|
|
- MODEL_RUNNER_URL=http://docker-model-runner:11434
|
|
- LLAMA_CPP_URL=http://llama-cpp-server:8000
|
|
- DASHBOARD_PORT=8050
|
|
depends_on:
|
|
- docker-model-runner
|
|
command: >
|
|
sh -c "
|
|
pip install dash requests &&
|
|
python -c '
|
|
import dash
|
|
from dash import html, dcc
|
|
import requests
|
|
|
|
app = dash.Dash(__name__)
|
|
|
|
def get_models():
|
|
try:
|
|
response = requests.get(\"http://docker-model-runner:11434/api/tags\")
|
|
return response.json()
|
|
except:
|
|
return {\"models\": []}
|
|
|
|
app.layout = html.Div([
|
|
html.H1(\"Trading Dashboard with AI Models\"),
|
|
html.Div([
|
|
html.H3(\"Available Models:\"),
|
|
html.Pre(str(get_models()))
|
|
]),
|
|
dcc.Input(id=\"prompt\", type=\"text\", placeholder=\"Enter your prompt...\"),
|
|
html.Button(\"Generate\", id=\"generate-btn\"),
|
|
html.Div(id=\"output\")
|
|
])
|
|
|
|
@app.callback(
|
|
dash.dependencies.Output(\"output\", \"children\"),
|
|
[dash.dependencies.Input(\"generate-btn\", \"n_clicks\")],
|
|
[dash.dependencies.State(\"prompt\", \"value\")]
|
|
)
|
|
def generate_text(n_clicks, prompt):
|
|
if n_clicks and prompt:
|
|
try:
|
|
response = requests.post(
|
|
\"http://docker-model-runner:11434/api/generate\",
|
|
json={\"model\": \"ai/smollm2:135M-Q4_K_M\", \"prompt\": prompt}
|
|
)
|
|
return response.json().get(\"response\", \"No response\")
|
|
except Exception as e:
|
|
return f\"Error: {str(e)}\"
|
|
return \"Enter a prompt and click Generate\"
|
|
|
|
if __name__ == \"__main__\":
|
|
app.run_server(host=\"0.0.0.0\", port=8050, debug=True)
|
|
'
|
|
"
|
|
networks:
|
|
- model-runner-network
|
|
|
|
# AI-powered trading analysis service
|
|
trading-analysis:
|
|
image: python:3.11-slim
|
|
container_name: trading-analysis
|
|
volumes:
|
|
- ./config:/config
|
|
- ./models:/models
|
|
- ./data:/data
|
|
environment:
|
|
- MODEL_RUNNER_URL=http://docker-model-runner:11434
|
|
- ANALYSIS_INTERVAL=300 # 5 minutes
|
|
depends_on:
|
|
- docker-model-runner
|
|
command: >
|
|
sh -c "
|
|
pip install requests pandas numpy &&
|
|
python -c '
|
|
import time
|
|
import requests
|
|
import json
|
|
|
|
def analyze_market():
|
|
prompt = \"Analyze current market conditions and provide trading insights\"
|
|
try:
|
|
response = requests.post(
|
|
\"http://docker-model-runner:11434/api/generate\",
|
|
json={\"model\": \"ai/smollm2:135M-Q4_K_M\", \"prompt\": prompt}
|
|
)
|
|
analysis = response.json().get(\"response\", \"Analysis unavailable\")
|
|
print(f\"[{time.strftime(\"%Y-%m-%d %H:%M:%S\")}] Market Analysis: {analysis[:200]}...\")
|
|
except Exception as e:
|
|
print(f\"[{time.strftime(\"%Y-%m-%d %H:%M:%S\")}] Error: {str(e)}\")
|
|
|
|
print(\"Trading Analysis Service Started\")
|
|
while True:
|
|
analyze_market()
|
|
time.sleep(300) # 5 minutes
|
|
'
|
|
"
|
|
networks:
|
|
- model-runner-network
|
|
|
|
# Model performance monitor
|
|
model-monitor:
|
|
image: python:3.11-slim
|
|
container_name: model-monitor
|
|
ports:
|
|
- "9091:9091" # Monitoring dashboard
|
|
environment:
|
|
- MODEL_RUNNER_URL=http://docker-model-runner:11434
|
|
- MONITOR_PORT=9091
|
|
depends_on:
|
|
- docker-model-runner
|
|
command: >
|
|
sh -c "
|
|
pip install flask requests psutil &&
|
|
python -c '
|
|
from flask import Flask, jsonify
|
|
import requests
|
|
import time
|
|
import psutil
|
|
|
|
app = Flask(__name__)
|
|
start_time = time.time()
|
|
|
|
@app.route(\"/health\")
|
|
def health():
|
|
return jsonify({
|
|
\"status\": \"healthy\",
|
|
\"uptime\": time.time() - start_time,
|
|
\"cpu_percent\": psutil.cpu_percent(),
|
|
\"memory\": psutil.virtual_memory()._asdict()
|
|
})
|
|
|
|
@app.route(\"/models\")
|
|
def models():
|
|
try:
|
|
response = requests.get(\"http://docker-model-runner:11434/api/tags\")
|
|
return jsonify(response.json())
|
|
except Exception as e:
|
|
return jsonify({\"error\": str(e)})
|
|
|
|
@app.route(\"/performance\")
|
|
def performance():
|
|
try:
|
|
# Test model response time
|
|
start = time.time()
|
|
response = requests.post(
|
|
\"http://docker-model-runner:11434/api/generate\",
|
|
json={\"model\": \"ai/smollm2:135M-Q4_K_M\", \"prompt\": \"test\"}
|
|
)
|
|
response_time = time.time() - start
|
|
|
|
return jsonify({
|
|
\"response_time\": response_time,
|
|
\"status\": \"ok\" if response.status_code == 200 else \"error\"
|
|
})
|
|
except Exception as e:
|
|
return jsonify({\"error\": str(e)})
|
|
|
|
print(\"Model Monitor Service Started on port 9091\")
|
|
app.run(host=\"0.0.0.0\", port=9091)
|
|
'
|
|
"
|
|
networks:
|
|
- model-runner-network
|
|
|
|
networks:
|
|
model-runner-network:
|
|
external: true # Use the network created by the main compose file
|