using LLM for sentiment analysis
This commit is contained in:
44
setup_amd_model.sh
Normal file
44
setup_amd_model.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Setup AMD GPU Model Runner with a default model
|
||||
echo "=== AMD GPU Model Runner Setup ==="
|
||||
echo ""
|
||||
|
||||
# Create models directory
|
||||
mkdir -p models data config
|
||||
|
||||
# Download a small test model (SmolLM) that works well with AMD GPUs
|
||||
MODEL_URL="https://huggingface.co/HuggingFaceTB/SmolLM-135M/resolve/main/model.safetensors"
|
||||
MODEL_FILE="models/current_model.gguf"
|
||||
|
||||
echo "Setting up test model..."
|
||||
echo "Note: For production, replace with your preferred GGUF model"
|
||||
echo ""
|
||||
|
||||
# Create a placeholder model file (you'll need to replace this with a real GGUF model)
|
||||
cat > models/current_model.gguf << 'EOF'
|
||||
# Placeholder for GGUF model
|
||||
# Replace this file with a real GGUF model from:
|
||||
# - Hugging Face (search for GGUF models)
|
||||
# - TheBloke models: https://huggingface.co/TheBloke
|
||||
# - SmolLM: https://huggingface.co/HuggingFaceTB/SmolLM-135M
|
||||
#
|
||||
# Example download command:
|
||||
# wget -O models/current_model.gguf "https://huggingface.co/TheBloke/SmolLM-135M-GGUF/resolve/main/smollm-135m.Q4_K_M.gguf"
|
||||
#
|
||||
# This is just a placeholder - the container will fail to start without a real model
|
||||
EOF
|
||||
|
||||
echo "✅ Model directory setup complete"
|
||||
echo "⚠️ IMPORTANT: You need to replace models/current_model.gguf with a real GGUF model"
|
||||
echo ""
|
||||
echo "Download a real model with:"
|
||||
echo "wget -O models/current_model.gguf 'YOUR_GGUF_MODEL_URL'"
|
||||
echo ""
|
||||
echo "Recommended models for AMD GPUs:"
|
||||
echo "- SmolLM-135M: https://huggingface.co/TheBloke/SmolLM-135M-GGUF"
|
||||
echo "- TinyLlama: https://huggingface.co/TheBloke/TinyLlama-1.1B-GGUF"
|
||||
echo "- Phi-2: https://huggingface.co/TheBloke/phi-2-GGUF"
|
||||
echo ""
|
||||
echo "Once you have a real model, run:"
|
||||
echo "docker-compose up -d amd-model-runner"
|
Reference in New Issue
Block a user