diff --git a/agent-mobile/artimobile/VoiceHandler.js b/agent-mobile/artimobile/VoiceHandler.js
index f4223ac..fb278a1 100644
--- a/agent-mobile/artimobile/VoiceHandler.js
+++ b/agent-mobile/artimobile/VoiceHandler.js
@@ -5,7 +5,13 @@ import Voice from '@react-native-voice/voice';
// import Config from 'react-native-config';
// process.env.TTS_BACKEND_URL = Config.TTS_BACKEND_URL;
-process.env.TTS_BACKEND_URL = "http://192.168.0.10:9008/asr"
+// process.env.TTS_BACKEND_URL = "http://192.168.0.10:9008/asr"
+process.env.TTS_BACKEND_URL = "https://tts.d-popov.com/asr"
+
+const LLM_ENDPOINT = "https://ws.ai.d-popov.com/api/chat";
+// const LLM_ENDPOINT = "http://192.168.0.11:11434/api/chat";
+
+const LOG_ENDPOINT = "ws://192.168.0.11:9999";
class VoiceHandler extends Component {
constructor(props) {
@@ -31,19 +37,23 @@ class VoiceHandler extends Component {
onSpeechStart(e) {
this.setState({
started: '√',
+ status: "listening..."
});
}
onSpeechRecognized(e) {
this.setState({
- recognized: '√',
+ status: "Recognized"
});
+ console.log("onSpeechRecognized()");
}
onSpeechResults(e) {
this.setState({
- results: e.value,
+ recognized: [...recognized, e.value],
+ status: this.state.status+ "\nonSpeechResults():" + e.value
});
+ console.log("onSpeechResults():" + e.value);
}
async _startRecognizing(e) {
@@ -51,6 +61,7 @@ class VoiceHandler extends Component {
recognized: '',
started: '',
results: [],
+ status: "Starting...",
isRecording: true,
});
@@ -69,28 +80,29 @@ class VoiceHandler extends Component {
this.setState({
isRecording: false,
isProcessing:true,
- recognized: this.state.results.join(' ')
+ status: this.state.status+ "\nstopRecognizing()" + this.state.recognized
});
// Assuming you have the audio data, send it to your backend
- this._sendTranscribedTextToLLM(this.state.results.join(' '));
+ this._sendTranscribedTextToLLM("who is the president of thr USA");
+ //this._sendTranscribedTextToLLM(this.state.recognized);
} catch (e) {
console.error(e);
}
}
async _sendTranscribedTextToLLM(transcribedText) {
- const LLM_ENDPOINT = "http://192.168.0.11:11434/api/chat";
const model = "openhermes:latest";
+ const prompt = "I have a question. Answer briefly and precise as an expert: \n" + transcribedText ;
const data = {
model: model,
- messages: [{ role: "user", content: "I have a request: " + transcribedText }],
+ messages: [{ role: "user", content: `${prompt}`}],
stream: false,
};
this.setState({
- status: ["sending to LLM:" + transcribedText]
+ status: this.state.status + "\nsending to LLM:\n" + prompt
})
+
try {
-
console.log('sending text to LLM at ', LLM_ENDPOINT, ": '", transcribedText, "'");
const response = await fetch(LLM_ENDPOINT, {
method: 'POST',
@@ -165,7 +177,12 @@ class VoiceHandler extends Component {
Status: {this.state.status}
Recognized: {this.state.recognized}
Started: {this.state.started}
- Results: {this.state.results.join(' ')}
+ Results:
+
+ {this.state.results.map((r, index) => (
+ {r}
+ ))}
+
);
}