49 lines
1.5 KiB
Markdown
49 lines
1.5 KiB
Markdown
|
|
|
|
|
|
interpreter --api_base http://192.168.0.11:11434/v1/
|
|
|
|
interpreter --model "gpt-3.5-turbo" # mistral
|
|
interpreter --model "mistral" --api_base http://192.168.0.11:11434/v1/
|
|
|
|
|
|
Mac/Linux: 'export OPENAI_API_KEY=your-key-here',
|
|
Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.
|
|
interpreter --local
|
|
|
|
interpreter --api_base http://192.168.0.11:11434/v1 --api_key "" --model openai/local
|
|
interpreter --api_base http://192.168.0.137:1234/v1 --api_key "" --model openai/local
|
|
192.168.0.137
|
|
|
|
|
|
# ################################ GROQ ########################## working
|
|
export OPENAI_API_KEY=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
|
|
interpreter -y --api_base https://api.groq.com/openai/v1 --model llama2-70b-4096 ## mixtral-8x7b-32768 # gemma-7b-it # llama2-70b-4096
|
|
##
|
|
# Load a model, start the server, and run this example in your terminal
|
|
# Choose between streaming and non-streaming mode by setting the "stream" field
|
|
|
|
curl http://192.168.0.11:11434/v1/chat/completions \
|
|
-H "Content-Type: application/json" \
|
|
-d '{
|
|
"messages": [
|
|
{ "role": "system", "content": "Always answer in rhymes." },
|
|
{ "role": "user", "content": "Introduce yourself." }
|
|
],
|
|
"temperature": 0.7,
|
|
"max_tokens": -1,
|
|
"stream": false
|
|
}'
|
|
|
|
|
|
curl http://192.168.0.137:1234/v1/chat/completions \
|
|
-H "Content-Type: application/json" \
|
|
-d '{
|
|
"messages": [
|
|
{ "role": "system", "content": "Always answer in rhymes." },
|
|
{ "role": "user", "content": "Introduce yourself." }
|
|
],
|
|
"temperature": 0.7,
|
|
"max_tokens": -1,
|
|
"stream": false
|
|
}' |