notes; hook to LLM,

This commit is contained in:
Dobromir Popov
2024-02-15 01:25:33 +02:00
parent 627b5badb2
commit 947956a37b
3 changed files with 83 additions and 19 deletions

View File

@ -2,7 +2,9 @@
import React, { Component } from 'react';
import { View, Text, Button } from 'react-native';
import Voice from '@react-native-voice/voice';
import Config from 'react-native-config';
process.env.TTS_BACKEND_URL = Config.TTS_BACKEND_URL;
class VoiceHandler extends Component {
constructor(props) {
@ -12,6 +14,7 @@ class VoiceHandler extends Component {
started: '',
results: [],
isRecording: false,
isProcessing: false,
};
Voice.onSpeechStart = this.onSpeechStart.bind(this);
@ -54,7 +57,7 @@ class VoiceHandler extends Component {
} catch (error) {
console.error('There was an error starting voice recognition:', error);
this.setState({
isRecording: false,
isRecording: false,
});
}
}
@ -63,18 +66,68 @@ class VoiceHandler extends Component {
await Voice.stop();
this.setState({
isRecording: false,
isProcessing:true
});
// Assuming you have the audio data, send it to your backend
this._sendAudioToBackend(this.state.results);
this._sendTranscribedTextToLLM(this.state.results.join(' '));
} catch (e) {
console.error(e);
}
}
async _sendTranscribedTextToLLM(transcribedText) {
const LLM_ENDPOINT = "http://192.168.0.11:11434/api/chat";
const model = "openhermes:latest";
const data = {
model: model,
messages: [{ role: "user", content: "I have a request: " + transcribedText }],
stream: false,
};
try {
console.log('sending text to LLM at ', LLM_ENDPOINT, ": '", transcribedText, "'");
const response = await fetch(LLM_ENDPOINT, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(data),
});
if (response.ok) {
const responseJson = await response.json();
if (responseJson.error) {
console.error("LLM Error:", responseJson.error);
// Handle error appropriately in your app
} else {
// Handle successful response
console.log('LLM Response:', responseJson.message);
// Update your app state or UI based on LLM response
this.setState(prevState => ({
results: [...prevState.results, responseJson.message.content], // Append the response to the existing results
}));
}
} else {
// Handle HTTP errors
console.error("HTTP Error:", response.status);
}
} catch (error) {
console.error('Request failed:', error);
// Handle request error
}
finally{
this.setState({
isProcessing:false
});
}
}
_sendAudioToBackend(results) {
// Placeholder: Convert `results` or actual audio data to a format acceptable by your backend
const formData = new FormData();
// formData.append('audio', {uri: 'path_to_audio_file', type: 'audio/x-m4a', name: 'audio.m4a'});
//formData.append('audio', {uri: 'path_to_audio_file', type: 'audio/x-m4a', name: 'audio.m4a'});
fetch(process.env.TTS_BACKEND_URL, {
method: 'POST',
body: formData,
@ -82,13 +135,16 @@ class VoiceHandler extends Component {
'Content-Type': 'multipart/form-data',
},
})
.then(response => response.text())
.then(body => {
console.log('Audio sent to backend, response:', body);
})
.catch(error => {
console.error('Failed to send audio:', error);
});
.then(response => response.text())
.then(body => {
console.log('Audio sent to backend, response:', body);
this.setState(prevState => ({
results: [...prevState.results, body], // Append the response to the existing results
}));
})
.catch(error => {
console.error('Failed to send audio:', error);
});
}
render() {
return (
@ -97,6 +153,7 @@ class VoiceHandler extends Component {
<Button
onPress={() => this.state.isRecording ? this._stopRecognizing() : this._startRecognizing()}
title={this.state.isRecording ? "Stop Recognizing" : "Start Recognizing"}
color={this.state.isRecording ? "red" : this.state.isProcessing ? "orange" : "blue"} // Change color based on state
/>
<Text>Recognized: {this.state.recognized}</Text>
<Text>Started: {this.state.started}</Text>

View File

@ -1 +1 @@
7
9