diff --git a/_doc/oi-notes.md b/_doc/oi-notes.md new file mode 100644 index 0000000..5f72b29 --- /dev/null +++ b/_doc/oi-notes.md @@ -0,0 +1,49 @@ + + + + interpreter --api_base http://192.168.0.11:11434/v1/ + +interpreter --model "gpt-3.5-turbo" # mistral +interpreter --model "mistral" --api_base http://192.168.0.11:11434/v1/ + + + Mac/Linux: 'export OPENAI_API_KEY=your-key-here', + Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal. +interpreter --local + +interpreter --api_base http://192.168.0.11:11434/v1 --api_key "" --model openai/local +interpreter --api_base http://192.168.0.137:1234/v1 --api_key "" --model openai/local +192.168.0.137 + + +################################# GROQ +export OPENAI_API_KEY=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 +interpreter -y --api_base https://api.groq.com/openai/v1 --model gemma-7b-it ## mixtral-8x7b-32768 # gemma-7b-it # llama2-70b-4096 +## +# Load a model, start the server, and run this example in your terminal +# Choose between streaming and non-streaming mode by setting the "stream" field + +curl http://192.168.0.11:11434/v1/chat/completions \ +-H "Content-Type: application/json" \ +-d '{ + "messages": [ + { "role": "system", "content": "Always answer in rhymes." }, + { "role": "user", "content": "Introduce yourself." } + ], + "temperature": 0.7, + "max_tokens": -1, + "stream": false +}' + + +curl http://192.168.0.137:1234/v1/chat/completions \ +-H "Content-Type: application/json" \ +-d '{ + "messages": [ + { "role": "system", "content": "Always answer in rhymes." }, + { "role": "user", "content": "Introduce yourself." } + ], + "temperature": 0.7, + "max_tokens": -1, + "stream": false +}' \ No newline at end of file