using LLM for sentiment analysis
This commit is contained in:
39
verify_docker_model_runner.sh
Normal file
39
verify_docker_model_runner.sh
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Quick verification script for Docker Model Runner
|
||||
echo "=== Docker Model Runner Verification ==="
|
||||
|
||||
# Check if container is running
|
||||
if docker ps | grep -q docker-model-runner; then
|
||||
echo "✅ Docker Model Runner container is running"
|
||||
else
|
||||
echo "❌ Docker Model Runner container is not running"
|
||||
echo "Run: ./docker_model_runner_gpu_setup.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check API endpoint
|
||||
echo ""
|
||||
echo "Testing API endpoint..."
|
||||
if curl -s http://localhost:11434/api/tags | grep -q "models"; then
|
||||
echo "✅ API is responding"
|
||||
else
|
||||
echo "❌ API is not responding"
|
||||
fi
|
||||
|
||||
# Check GPU support
|
||||
echo ""
|
||||
echo "Checking GPU support..."
|
||||
if docker logs docker-model-runner-gpu 2>/dev/null | grep -q "gpuSupport=true"; then
|
||||
echo "✅ GPU support is enabled"
|
||||
else
|
||||
echo "⚠️ GPU support may not be enabled (check logs)"
|
||||
fi
|
||||
|
||||
# Test basic model operations
|
||||
echo ""
|
||||
echo "Testing model operations..."
|
||||
docker exec docker-model-runner-gpu /app/model-runner list 2>/dev/null | head -5
|
||||
|
||||
echo ""
|
||||
echo "=== Verification Complete ==="
|
Reference in New Issue
Block a user