adding chat

This commit is contained in:
Dobromir Popov 2024-06-10 14:35:49 +03:00
parent c98afa201e
commit 407affa979
3 changed files with 587 additions and 6 deletions

19
.vscode/launch.json vendored
View File

@ -27,28 +27,35 @@
} }
}, },
{ {
"name": "node: Launch server.js", "name": "Launch chat-server.js",
"type": "node", "type": "node",
"request": "launch", "request": "launch",
"program": "conda activate node && ${workspaceFolder}/web/server.js", "program": "${workspaceFolder}/web/chat-server.js",
"console": "integratedTerminal", "console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"env": {
"CONDA_ENV": "node", //?
"NODE_ENV": "development"
},
"skipFiles": [ "skipFiles": [
"<node_internals>/**" "<node_internals>/**"
] ]
}, },
{ {
"name": "conda task: Launch server.js", "name": "Launch server.js",
"type": "node", "type": "node",
"request": "launch", "request": "launch",
// "program": "conda activate node && ${workspaceFolder}/web/server.js",
"program": "${workspaceFolder}/web/server.js", "program": "${workspaceFolder}/web/server.js",
// "preLaunchTask": "conda-activate",
"console": "integratedTerminal", "console": "integratedTerminal",
"internalConsoleOptions": "neverOpen", "internalConsoleOptions": "neverOpen",
"env": { "env": {
"CONDA_ENV": "node", //? "CONDA_ENV": "node", //?
//set env to dev
"NODE_ENV": "development" "NODE_ENV": "development"
} },
"skipFiles": [
"<node_internals>/**"
]
}, },
{ {
"name": "Python Debugger: Python File", "name": "Python Debugger: Python File",

382
web/chat-client.html Normal file
View File

@ -0,0 +1,382 @@
<!DOCTYPE html>
<html>
<head>
<title>Real-time Voice Chat</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/tailwindcss/2.2.19/tailwind.min.css">
</head>
<body class="bg-gray-100">
<div class="container mx-auto px-4 py-8">
<h1 class="text-2xl font-bold mb-4 text-center">Real-time Voice Chat</h1>
<!-- Username Input -->
<div class="flex justify-center items-center mb-4">
<input type="text" id="username" class="border rounded p-2 mr-4" placeholder="Enter your username">
<button onclick="joinChat()" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded">Join Chat</button>
</div>
<!-- Active Users List -->
<div class="flex justify-center items-center mb-4">
<div class="w-1/3">
<h2 class="text-xl font-bold mb-2">Active Users</h2>
<ul id="users-list" class="list-disc list-inside bg-white p-4 rounded shadow">
<!-- Dynamic list of users -->
</ul>
</div>
<div class="w-2/3 ml-4">
<h2 class="text-xl font-bold mb-2">Chat Room</h2>
<div id="chat-room" class="bg-white p-4 rounded shadow">
<!-- Chat room content -->
<div class="mb-4">
<label class="flex items-center space-x-2">
<input type="checkbox" id="autosend" class="mr-2">
<span>Continuous</span>
</label>
</div>
<div class="mb-4">
<button id="record-button" disabled
class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded mr-4">Push to Talk</button>
</div>
<div id="transcription" class="border rounded p-4 h-48 overflow-y-scroll mb-4">
<!-- Transcription content -->
</div>
<canvas id="canvas" class="w-full mb-4"></canvas>
<div class="flex justify-between items-center">
<button id="copyButton" class="bg-gray-200 hover:bg-gray-300 text-gray-700 font-medium py-2 px-4 rounded focus:outline-none" onclick="copyToClipboard('transcription')">Copy</button>
<button id="clearButton" class="bg-gray-200 hover:bg-gray-300 text-gray-700 font-medium py-2 px-4 rounded focus:outline-none" onclick="clearTranscription()">Clear</button>
</div>
</div>
</div>
</div>
<!-- Connection Status and Info -->
<div class="flex justify-center items-center mb-4">
<div id="connection-status" class="mr-4"></div>
</div>
<div class="flex justify-center items-center mb-4">
<div id="info"></div>
</div>
<div id="status-recording" class="flex justify-center items-center mb-4"></div>
</div>
<script>
let sessionId;
let selectedDeviceId = "default";
let socket;
let audioRecorder;
let audioStream;
let recording = false;
let recordButton;
let connected = false;
let connectionStatus;
let statusRecording;
let audioContext;
let serverTime;
let users = [];
let volumeChecker;
let lastVolumes = new Array(5);
let averageVolume;
let silenceCount = 0;
let isSpeaking = false;
let soundDetected = false;
let speakingCount = 0;
let SILENCE_DELAY_MS = 50;
let preDetect_IncludedAudio = 400; //ms
let soundCount_Threshold = 10;
let silenceCount_Threshold = 10;
const volumeHistory = [];
let canvas = document.getElementById("canvas");
let canvasCtx = canvas.getContext("2d");
let barWidth = 10;
let barSpacing = 5;
document.getElementById('autosend').addEventListener('change', (event) => {
const autosend = event.target.checked;
fetch('/settings', {
method: 'POST',
body: JSON.stringify({ autosend, sessionId }),
headers: { 'Content-Type': 'application/json' },
credentials: 'same-origin'
});
});
function drawSlidingBarGraph(lastVolumes) {
canvasCtx.clearRect(0, 0, canvas.width, canvas.height);
for (let i = 0; i < lastVolumes.length; i++) {
let value = lastVolumes[i];
let barHeight = (value / 255) * canvas.height;
let x = i * (barWidth + barSpacing);
let y = canvas.height - barHeight;
canvasCtx.fillRect(x, y, barWidth, barHeight);
}
}
volumeChecker = setInterval(() => {
if (!audioContext) {
console.log("No audio context");
return;
}
const frequencyData = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(frequencyData);
let totalVolume = 0;
for (let i = 0; i < frequencyData.length; i++) {
totalVolume += frequencyData[i];
}
averageVolume = totalVolume / frequencyData.length;
volumeHistory.push(averageVolume);
if (volumeHistory.length > 100) {
volumeHistory.shift();
}
const threshold = volumeHistory.reduce((acc, curr) => acc + curr) / volumeHistory.length + 5;
const isSilent = averageVolume < threshold;
if (averageVolume > threshold) {
if (autosend.checked && speakingCount == 0 && audioRecorder) {
soundDetected = false;
audioRecorder.stop();
audioRecorder.start();
}
speakingCount++;
if (speakingCount > soundCount_Threshold) {
statusRecording.innerHTML = "Listening...";
statusRecording.style.color = "green";
isSpeaking = true;
}
} else if (averageVolume - 5 < threshold) {
speakingCount = 0;
if (isSpeaking) {
silenceCount++;
if (silenceCount > silenceCount_Threshold) {
if (autosend.checked) {
soundDetected = true;
audioRecorder.stop();
audioRecorder.start();
}
isSpeaking = false;
statusRecording.innerHTML = "Silence detected...";
statusRecording.style.color = "orange";
}
}
}
}, SILENCE_DELAY_MS);
function InitAudioAnalyser(stream) {
audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);
analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
analyser.smoothingTimeConstant = 0.8;
source.connect(analyser);
}
function connect() {
connectionStatus.innerHTML = "Connecting to WS...";
let wsurl = "ws://localhost:8081";
fetch("/wsurl")
.then((response) => response.text())
.then((data) => {
wsurl = data;
console.log("Got ws url: '" + wsurl + "'");
})
.then(() => {
socket = new WebSocket(wsurl);
socket.onopen = () => {
connectionStatus.innerHTML = "Connected to " + wsurl;
recordButton.disabled = false;
connected = true;
};
socket.onmessage = onmessage;
socket.onclose = () => {
connectionStatus.innerHTML = "Disconnected";
recordButton.disabled = true;
connected = false;
setTimeout(() => {
connect();
}, 5000);
};
})
.catch((error) => {
connectionStatus.innerHTML = "Error getting ws url: " + error;
});
};
function onmessage(event) {
try {
let json = JSON.parse(event.data);
if (json.hasOwnProperty("sessionId")) {
sessionId = json.sessionId;
console.log("Got session id: " + sessionId);
}
if (json.hasOwnProperty("languageDetected")) {
statusRecording.innerHTML = "Detected language: " + json.languageDetected;
}
if (json.hasOwnProperty("text")) {
transcription.innerHTML += "\r\n" + json.text;
}
if (json.hasOwnProperty("users")) {
users = json.users;
updateUserList();
}
return;
} catch (e) {
}
let latency = Date.now() - serverTime;
if (autosend.checked) {
const arr = event.data.split(/[(\)]/);
let queue = arr[1];
let text = arr[2].trim();
info.innerHTML = "latency: " + latency + "ms; server queue: " + queue + " requests";
transcription.value += text + " ";
statusRecording.innerHTML = "Listening...";
statusRecording.style.color = "black";
} else {
transcription.innerHTML = event.data;
}
}
function startListening() {
canvasCtx.fillStyle = "green";
recording = true;
navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000 } })
.then((stream) => {
audioStream = stream;
const audioContext = new AudioContext();
const sourceNode = audioContext.createMediaStreamSource(audioStream);
const audioSampleRate = sourceNode.context.sampleRate;
info.innerHTML = "Sample rate: " + audioSampleRate + " Hz";
var preBuffer = [];
const channelSplitter = audioContext.createChannelSplitter(2);
const channelMerger = audioContext.createChannelMerger(1);
sourceNode.connect(channelSplitter);
channelSplitter.connect(channelMerger, 0, 0);
const outputNode = channelMerger;
const mediaStreamDestination = audioContext.createMediaStreamDestination();
outputNode.connect(mediaStreamDestination);
const singleChannelStream = mediaStreamDestination.stream;
audioRecorder = new MediaRecorder(singleChannelStream);
audioRecorder.start();
audioRecorder.addEventListener("dataavailable", (event) => {
if (!soundDetected && autosend.checked) {
preBuffer = [];
preBuffer.push(event.data);
return;
}
if (event.data.size > 0) {
let data = event.data;
if (preBuffer.length > 0) {
sendAudioToServerPost(preBuffer);
}
sendAudioToServer(data);
soundDetected = false;
}
});
InitAudioAnalyser(stream);
});
recordButton.innerHTML = "Stop Talking";
recordButton.classList.toggle('bg-red-500');
recordButton.classList.toggle('bg-blue-500');
recordButton.classList.toggle('hover:bg-blue-700');
}
function stopListening() {
recording = false;
audioRecorder.stop();
recordButton.innerHTML = "Push to Talk";
recordButton.classList.toggle('bg-blue-500');
recordButton.classList.toggle('bg-red-500');
recordButton.classList.toggle('hover:bg-blue-700');
clearInterval(volumeChecker);
if (audioStream) {
audioStream.getTracks().forEach(track => track.stop());
audioStream = null;
}
}
function sendAudioToServerPost(data) {
const blob = new Blob(data, { type: "audio/ogg; codecs=opus" });
var formData = new FormData();
formData.append('file', data);
fetch('/upload', {
method: 'POST',
body: formData
});
}
function sendAudioToServer(data) {
if (connected) {
socket.send(data);
serverTime = Date.now();
if (!autosend.checked) {
transcription.innerHTML = "Processing audio...";
}
}
}
function toggleListening() {
if (socket.readyState === WebSocket.OPEN) {
if (recording) {
stopListening();
} else {
startListening();
}
}
}
function joinChat() {
const username = document.getElementById('username').value;
if (username.trim() === "") {
alert("Please enter a username");
return;
}
socket.send(JSON.stringify({ type: 'join', username }));
}
function updateUserList() {
const usersList = document.getElementById('users-list');
usersList.innerHTML = '';
users.forEach(user => {
const li = document.createElement('li');
li.innerText = user.username;
usersList.appendChild(li);
});
}
window.onload = () => {
recordButton = document.getElementById("record-button");
recordButton.addEventListener("click", toggleListening);
connectionStatus = document.getElementById("connection-status");
statusRecording = document.getElementById("status-recording");
connect(socket);
};
function copyToClipboard(id) {
var textarea = document.getElementById(id);
textarea.select();
document.execCommand('copy');
}
function clearTranscription() {
document.getElementById('transcription').innerText = '';
}
</script>
<script src="https://cdn.webrtc-experiment.com/MediaStreamRecorder.js"></script>
</body>
</html>

192
web/chat-server.js Normal file
View File

@ -0,0 +1,192 @@
// server.js
if (require('dotenv')) {
const envFile = process.env.NODE_ENV === 'development' ? '.env.development' : '.env';
require('dotenv').config({ path: envFile });
}
const express = require('express');
const bodyParser = require('body-parser');
const WebSocket = require('ws');
const storage = require('node-persist');
const request = require('request');
const fs = require('fs');
const path = require('path');
const app = express();
app.use(bodyParser.json());
const PORT_HTTP = process.env.SERVER_PORT_HTTP || 3000;
const PORT_WS = process.env.SERVER_PORT_WS || 8080;
const TTS_API_URL = process.env.TTS_API_URL;
let language = "en";
let storeRecordings = false;
let queueCounter = 0;
const sessions = new Map();
const users = new Map(); // Store users with their usernames and session IDs
storage.init().then(() => {
storage.getItem('language').then((value) => {
if (value !== undefined) language = value;
else storage.setItem('language', language);
});
storage.getItem('storeRecordings').then((value) => {
if (value !== undefined) storeRecordings = value;
else storage.setItem('storeRecordings', storeRecordings);
});
});
// WebSocket Server
const wss = new WebSocket.Server({ port: PORT_WS });
wss.on('connection', (ws) => {
ws.sessionId = Math.random().toString(36).substring(2);
sessions.set(ws.sessionId, { language: 'en' });
ws.send(JSON.stringify({ sessionId: ws.sessionId, language, storeRecordings }));
ws.on('message', (message) => {
try {
const data = JSON.parse(message);
if (data.type === 'join') {
const { username } = data;
users.set(ws.sessionId, { username, sessionId: ws.sessionId });
broadcastUserList();
} else if (data.type === 'audio') {
handleAudioData(ws, data.audio);
}
} catch (err) {
console.error('Failed to parse message', err);
}
});
ws.on('close', () => {
users.delete(ws.sessionId);
sessions.delete(ws.sessionId);
broadcastUserList();
});
});
function handleAudioData(ws, data) {
const sessionData = sessions.get(ws.sessionId);
let language = sessionData.language || 'en';
let task = sessionData.task || 'transcribe';
const formData = {
task,
language,
vad_filter: 'true',
output: 'json',
audio_file: {
value: data,
options: { filename: 'audio.ogg', contentType: 'audio/ogg' }
}
};
if (language === 'auto' || language === '') {
detectLanguage(ws, formData);
} else {
transcribeAudio(ws, formData, sessionData);
}
}
function detectLanguage(ws, formData) {
request.post({ url: TTS_API_URL.replace('/asr', '/detect-language'), formData }, (err, httpResponse, body) => {
if (err) return console.error('Language detection failed:', err);
const result = JSON.parse(body);
if (result && result.language_code) {
const language = result.language_code;
const sessionData = sessions.get(ws.sessionId);
sessionData.language = language;
ws.send(JSON.stringify({ languageDetected: result.detected_language }));
transcribeAudio(ws, formData, sessionData);
}
});
}
function transcribeAudio(ws, formData, sessionData) {
const start = new Date().getTime();
queueCounter++;
request.post({ url: TTS_API_URL, formData }, (err, httpResponse, body) => {
queueCounter--;
if (err) return console.error('Transcription failed:', err);
const duration = new Date().getTime() - start;
ws.send(JSON.stringify({
queueCounter,
duration,
language: sessionData.language,
text: body
}));
});
if (storeRecordings) {
const timestamp = Date.now();
fs.mkdir('rec', { recursive: true }, (err) => {
if (err) throw err;
});
fs.writeFile(`rec/audio${timestamp}.ogg`, formData.audio_file.value, (err) => {
if (err) console.log(err);
else console.log('Audio data saved to rec/audio' + timestamp + '.ogg');
});
}
}
function broadcastUserList() {
const userList = Array.from(users.values()).map(user => ({ username: user.username, sessionId: user.sessionId }));
wss.clients.forEach(client => {
if (client.readyState === WebSocket.OPEN) {
client.send(JSON.stringify({ type: 'userList', users: userList }));
}
});
}
// HTTP Server
app.get('/', (req, res) => {
res.sendFile(path.join(__dirname, 'chat-client.html'));
});
app.post('/log', (req, res) => {
console.log(`[LOG ${new Date().toISOString()}] ${req.body.message}`);
res.status(200).send('OK');
});
app.get('/wsurl', (req, res) => {
res.status(200).send(process.env.WS_URL);
});
app.get('/settings', (req, res) => {
if (req.query.language) {
language = req.query.language;
storage.setItem('language', language);
}
if (req.query.storeRecordings) {
storeRecordings = req.query.storeRecordings;
storage.setItem('storeRecordings', storeRecordings);
}
res.status(200).send({ language, storeRecordings });
});
app.post('/settings', (req, res) => {
const { sessionId, language, storeRecordings, task } = req.body;
const sessionData = sessions.get(sessionId);
if (language) sessionData.language = language;
if (storeRecordings) sessionData.storeRecordings = storeRecordings;
if (task) sessionData.task = task;
res.status(200).send('OK');
});
app.post('/upload', (req, res) => {
const timestamp = Date.now();
fs.mkdir('rec', { recursive: true }, (err) => {
if (err) return res.status(500).send('ERROR');
const file = fs.createWriteStream(`rec/audio_slice_${timestamp}.ogg`);
req.pipe(file);
file.on('finish', () => res.status(200).send('OK'));
});
});
app.listen(PORT_HTTP, () => {
console.log(`Server listening on port ${PORT_HTTP}`);
});