diff --git a/agent-mobile/artimobile/VoiceHandler.js b/agent-mobile/artimobile/VoiceHandler.js index c477f7f..2d38305 100644 --- a/agent-mobile/artimobile/VoiceHandler.js +++ b/agent-mobile/artimobile/VoiceHandler.js @@ -2,7 +2,9 @@ import React, { Component } from 'react'; import { View, Text, Button } from 'react-native'; import Voice from '@react-native-voice/voice'; + import Config from 'react-native-config'; +process.env.TTS_BACKEND_URL = Config.TTS_BACKEND_URL; class VoiceHandler extends Component { constructor(props) { @@ -12,6 +14,7 @@ class VoiceHandler extends Component { started: '', results: [], isRecording: false, + isProcessing: false, }; Voice.onSpeechStart = this.onSpeechStart.bind(this); @@ -54,7 +57,7 @@ class VoiceHandler extends Component { } catch (error) { console.error('There was an error starting voice recognition:', error); this.setState({ - isRecording: false, + isRecording: false, }); } } @@ -63,18 +66,68 @@ class VoiceHandler extends Component { await Voice.stop(); this.setState({ isRecording: false, + isProcessing:true }); // Assuming you have the audio data, send it to your backend - this._sendAudioToBackend(this.state.results); + this._sendTranscribedTextToLLM(this.state.results.join(' ')); } catch (e) { console.error(e); } } + async _sendTranscribedTextToLLM(transcribedText) { + + const LLM_ENDPOINT = "http://192.168.0.11:11434/api/chat"; + const model = "openhermes:latest"; + const data = { + model: model, + messages: [{ role: "user", content: "I have a request: " + transcribedText }], + stream: false, + }; + try { + + console.log('sending text to LLM at ', LLM_ENDPOINT, ": '", transcribedText, "'"); + const response = await fetch(LLM_ENDPOINT, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(data), + }); + + if (response.ok) { + const responseJson = await response.json(); + if (responseJson.error) { + console.error("LLM Error:", responseJson.error); + // Handle error appropriately in your app + } else { + // Handle successful response + console.log('LLM Response:', responseJson.message); + // Update your app state or UI based on LLM response + this.setState(prevState => ({ + results: [...prevState.results, responseJson.message.content], // Append the response to the existing results + })); + } + } else { + // Handle HTTP errors + console.error("HTTP Error:", response.status); + } + } catch (error) { + console.error('Request failed:', error); + // Handle request error + } + + finally{ + this.setState({ + isProcessing:false + }); + } + + } _sendAudioToBackend(results) { // Placeholder: Convert `results` or actual audio data to a format acceptable by your backend const formData = new FormData(); - // formData.append('audio', {uri: 'path_to_audio_file', type: 'audio/x-m4a', name: 'audio.m4a'}); - + //formData.append('audio', {uri: 'path_to_audio_file', type: 'audio/x-m4a', name: 'audio.m4a'}); + fetch(process.env.TTS_BACKEND_URL, { method: 'POST', body: formData, @@ -82,13 +135,16 @@ class VoiceHandler extends Component { 'Content-Type': 'multipart/form-data', }, }) - .then(response => response.text()) - .then(body => { - console.log('Audio sent to backend, response:', body); - }) - .catch(error => { - console.error('Failed to send audio:', error); - }); + .then(response => response.text()) + .then(body => { + console.log('Audio sent to backend, response:', body); + this.setState(prevState => ({ + results: [...prevState.results, body], // Append the response to the existing results + })); + }) + .catch(error => { + console.error('Failed to send audio:', error); + }); } render() { return ( @@ -97,6 +153,7 @@ class VoiceHandler extends Component {