383 lines
15 KiB
HTML
383 lines
15 KiB
HTML
<!DOCTYPE html>
|
|
<html>
|
|
|
|
<head>
|
|
<title>Real-time Voice Chat</title>
|
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/tailwindcss/2.2.19/tailwind.min.css">
|
|
</head>
|
|
|
|
<body class="bg-gray-100">
|
|
<div class="container mx-auto px-4 py-8">
|
|
<h1 class="text-2xl font-bold mb-4 text-center">Real-time Voice Chat</h1>
|
|
|
|
<!-- Username Input -->
|
|
<div class="flex justify-center items-center mb-4">
|
|
<input type="text" id="username" class="border rounded p-2 mr-4" placeholder="Enter your username">
|
|
<button onclick="joinChat()" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded">Join Chat</button>
|
|
</div>
|
|
|
|
<!-- Active Users List -->
|
|
<div class="flex justify-center items-center mb-4">
|
|
<div class="w-1/3">
|
|
<h2 class="text-xl font-bold mb-2">Active Users</h2>
|
|
<ul id="users-list" class="list-disc list-inside bg-white p-4 rounded shadow">
|
|
<!-- Dynamic list of users -->
|
|
</ul>
|
|
</div>
|
|
<div class="w-2/3 ml-4">
|
|
<h2 class="text-xl font-bold mb-2">Chat Room</h2>
|
|
<div id="chat-room" class="bg-white p-4 rounded shadow">
|
|
<!-- Chat room content -->
|
|
<div class="mb-4">
|
|
<label class="flex items-center space-x-2">
|
|
<input type="checkbox" id="autosend" class="mr-2">
|
|
<span>Continuous</span>
|
|
</label>
|
|
</div>
|
|
<div class="mb-4">
|
|
<button id="record-button" disabled
|
|
class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded mr-4">Push to Talk</button>
|
|
</div>
|
|
<div id="transcription" class="border rounded p-4 h-48 overflow-y-scroll mb-4">
|
|
<!-- Transcription content -->
|
|
</div>
|
|
<canvas id="canvas" class="w-full mb-4"></canvas>
|
|
<div class="flex justify-between items-center">
|
|
<button id="copyButton" class="bg-gray-200 hover:bg-gray-300 text-gray-700 font-medium py-2 px-4 rounded focus:outline-none" onclick="copyToClipboard('transcription')">Copy</button>
|
|
<button id="clearButton" class="bg-gray-200 hover:bg-gray-300 text-gray-700 font-medium py-2 px-4 rounded focus:outline-none" onclick="clearTranscription()">Clear</button>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
|
|
<!-- Connection Status and Info -->
|
|
<div class="flex justify-center items-center mb-4">
|
|
<div id="connection-status" class="mr-4"></div>
|
|
</div>
|
|
<div class="flex justify-center items-center mb-4">
|
|
<div id="info"></div>
|
|
</div>
|
|
<div id="status-recording" class="flex justify-center items-center mb-4"></div>
|
|
</div>
|
|
|
|
<script>
|
|
let sessionId;
|
|
let selectedDeviceId = "default";
|
|
let socket;
|
|
let audioRecorder;
|
|
let audioStream;
|
|
let recording = false;
|
|
let recordButton;
|
|
let connected = false;
|
|
let connectionStatus;
|
|
let statusRecording;
|
|
let audioContext;
|
|
let serverTime;
|
|
let users = [];
|
|
let volumeChecker;
|
|
let lastVolumes = new Array(5);
|
|
let averageVolume;
|
|
let silenceCount = 0;
|
|
let isSpeaking = false;
|
|
let soundDetected = false;
|
|
let speakingCount = 0;
|
|
|
|
let SILENCE_DELAY_MS = 50;
|
|
let preDetect_IncludedAudio = 400; //ms
|
|
let soundCount_Threshold = 10;
|
|
let silenceCount_Threshold = 10;
|
|
|
|
const volumeHistory = [];
|
|
|
|
let canvas = document.getElementById("canvas");
|
|
let canvasCtx = canvas.getContext("2d");
|
|
let barWidth = 10;
|
|
let barSpacing = 5;
|
|
|
|
document.getElementById('autosend').addEventListener('change', (event) => {
|
|
const autosend = event.target.checked;
|
|
fetch('/settings', {
|
|
method: 'POST',
|
|
body: JSON.stringify({ autosend, sessionId }),
|
|
headers: { 'Content-Type': 'application/json' },
|
|
credentials: 'same-origin'
|
|
});
|
|
});
|
|
|
|
function drawSlidingBarGraph(lastVolumes) {
|
|
canvasCtx.clearRect(0, 0, canvas.width, canvas.height);
|
|
for (let i = 0; i < lastVolumes.length; i++) {
|
|
let value = lastVolumes[i];
|
|
let barHeight = (value / 255) * canvas.height;
|
|
let x = i * (barWidth + barSpacing);
|
|
let y = canvas.height - barHeight;
|
|
canvasCtx.fillRect(x, y, barWidth, barHeight);
|
|
}
|
|
}
|
|
|
|
volumeChecker = setInterval(() => {
|
|
if (!audioContext) {
|
|
console.log("No audio context");
|
|
return;
|
|
}
|
|
const frequencyData = new Uint8Array(analyser.frequencyBinCount);
|
|
analyser.getByteFrequencyData(frequencyData);
|
|
|
|
let totalVolume = 0;
|
|
for (let i = 0; i < frequencyData.length; i++) {
|
|
totalVolume += frequencyData[i];
|
|
}
|
|
averageVolume = totalVolume / frequencyData.length;
|
|
|
|
volumeHistory.push(averageVolume);
|
|
if (volumeHistory.length > 100) {
|
|
volumeHistory.shift();
|
|
}
|
|
|
|
const threshold = volumeHistory.reduce((acc, curr) => acc + curr) / volumeHistory.length + 5;
|
|
const isSilent = averageVolume < threshold;
|
|
|
|
if (averageVolume > threshold) {
|
|
if (autosend.checked && speakingCount == 0 && audioRecorder) {
|
|
soundDetected = false;
|
|
audioRecorder.stop();
|
|
audioRecorder.start();
|
|
}
|
|
speakingCount++;
|
|
if (speakingCount > soundCount_Threshold) {
|
|
statusRecording.innerHTML = "Listening...";
|
|
statusRecording.style.color = "green";
|
|
isSpeaking = true;
|
|
}
|
|
} else if (averageVolume - 5 < threshold) {
|
|
speakingCount = 0;
|
|
if (isSpeaking) {
|
|
silenceCount++;
|
|
if (silenceCount > silenceCount_Threshold) {
|
|
if (autosend.checked) {
|
|
soundDetected = true;
|
|
audioRecorder.stop();
|
|
audioRecorder.start();
|
|
}
|
|
isSpeaking = false;
|
|
statusRecording.innerHTML = "Silence detected...";
|
|
statusRecording.style.color = "orange";
|
|
}
|
|
}
|
|
}
|
|
}, SILENCE_DELAY_MS);
|
|
|
|
function InitAudioAnalyser(stream) {
|
|
audioContext = new AudioContext();
|
|
const source = audioContext.createMediaStreamSource(stream);
|
|
analyser = audioContext.createAnalyser();
|
|
analyser.fftSize = 2048;
|
|
analyser.smoothingTimeConstant = 0.8;
|
|
source.connect(analyser);
|
|
}
|
|
|
|
function connect() {
|
|
connectionStatus.innerHTML = "Connecting to WS...";
|
|
let wsurl = "ws://localhost:8081";
|
|
fetch("/wsurl")
|
|
.then((response) => response.text())
|
|
.then((data) => {
|
|
wsurl = data;
|
|
console.log("Got ws url: '" + wsurl + "'");
|
|
})
|
|
.then(() => {
|
|
socket = new WebSocket(wsurl);
|
|
socket.onopen = () => {
|
|
connectionStatus.innerHTML = "Connected to " + wsurl;
|
|
recordButton.disabled = false;
|
|
connected = true;
|
|
};
|
|
socket.onmessage = onmessage;
|
|
socket.onclose = () => {
|
|
connectionStatus.innerHTML = "Disconnected";
|
|
recordButton.disabled = true;
|
|
connected = false;
|
|
setTimeout(() => {
|
|
connect();
|
|
}, 5000);
|
|
};
|
|
})
|
|
.catch((error) => {
|
|
connectionStatus.innerHTML = "Error getting ws url: " + error;
|
|
});
|
|
};
|
|
|
|
function onmessage(event) {
|
|
try {
|
|
let json = JSON.parse(event.data);
|
|
if (json.hasOwnProperty("sessionId")) {
|
|
sessionId = json.sessionId;
|
|
console.log("Got session id: " + sessionId);
|
|
}
|
|
if (json.hasOwnProperty("languageDetected")) {
|
|
statusRecording.innerHTML = "Detected language: " + json.languageDetected;
|
|
}
|
|
if (json.hasOwnProperty("text")) {
|
|
transcription.innerHTML += "\r\n" + json.text;
|
|
}
|
|
if (json.hasOwnProperty("users")) {
|
|
users = json.users;
|
|
updateUserList();
|
|
}
|
|
return;
|
|
} catch (e) {
|
|
}
|
|
|
|
let latency = Date.now() - serverTime;
|
|
if (autosend.checked) {
|
|
const arr = event.data.split(/[(\)]/);
|
|
let queue = arr[1];
|
|
let text = arr[2].trim();
|
|
info.innerHTML = "latency: " + latency + "ms; server queue: " + queue + " requests";
|
|
transcription.value += text + " ";
|
|
statusRecording.innerHTML = "Listening...";
|
|
statusRecording.style.color = "black";
|
|
} else {
|
|
transcription.innerHTML = event.data;
|
|
}
|
|
}
|
|
|
|
function startListening() {
|
|
canvasCtx.fillStyle = "green";
|
|
recording = true;
|
|
navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000 } })
|
|
.then((stream) => {
|
|
audioStream = stream;
|
|
|
|
const audioContext = new AudioContext();
|
|
const sourceNode = audioContext.createMediaStreamSource(audioStream);
|
|
const audioSampleRate = sourceNode.context.sampleRate;
|
|
|
|
info.innerHTML = "Sample rate: " + audioSampleRate + " Hz";
|
|
var preBuffer = [];
|
|
|
|
const channelSplitter = audioContext.createChannelSplitter(2);
|
|
const channelMerger = audioContext.createChannelMerger(1);
|
|
sourceNode.connect(channelSplitter);
|
|
channelSplitter.connect(channelMerger, 0, 0);
|
|
const outputNode = channelMerger;
|
|
|
|
const mediaStreamDestination = audioContext.createMediaStreamDestination();
|
|
outputNode.connect(mediaStreamDestination);
|
|
const singleChannelStream = mediaStreamDestination.stream;
|
|
|
|
audioRecorder = new MediaRecorder(singleChannelStream);
|
|
audioRecorder.start();
|
|
audioRecorder.addEventListener("dataavailable", (event) => {
|
|
if (!soundDetected && autosend.checked) {
|
|
preBuffer = [];
|
|
preBuffer.push(event.data);
|
|
return;
|
|
}
|
|
if (event.data.size > 0) {
|
|
let data = event.data;
|
|
if (preBuffer.length > 0) {
|
|
sendAudioToServerPost(preBuffer);
|
|
}
|
|
sendAudioToServer(data);
|
|
soundDetected = false;
|
|
}
|
|
});
|
|
|
|
InitAudioAnalyser(stream);
|
|
});
|
|
|
|
recordButton.innerHTML = "Stop Talking";
|
|
recordButton.classList.toggle('bg-red-500');
|
|
recordButton.classList.toggle('bg-blue-500');
|
|
recordButton.classList.toggle('hover:bg-blue-700');
|
|
}
|
|
|
|
function stopListening() {
|
|
recording = false;
|
|
audioRecorder.stop();
|
|
recordButton.innerHTML = "Push to Talk";
|
|
recordButton.classList.toggle('bg-blue-500');
|
|
recordButton.classList.toggle('bg-red-500');
|
|
recordButton.classList.toggle('hover:bg-blue-700');
|
|
clearInterval(volumeChecker);
|
|
if (audioStream) {
|
|
audioStream.getTracks().forEach(track => track.stop());
|
|
audioStream = null;
|
|
}
|
|
}
|
|
|
|
function sendAudioToServerPost(data) {
|
|
const blob = new Blob(data, { type: "audio/ogg; codecs=opus" });
|
|
var formData = new FormData();
|
|
formData.append('file', data);
|
|
fetch('/upload', {
|
|
method: 'POST',
|
|
body: formData
|
|
});
|
|
}
|
|
|
|
function sendAudioToServer(data) {
|
|
if (connected) {
|
|
socket.send(data);
|
|
serverTime = Date.now();
|
|
if (!autosend.checked) {
|
|
transcription.innerHTML = "Processing audio...";
|
|
}
|
|
}
|
|
}
|
|
|
|
function toggleListening() {
|
|
if (socket.readyState === WebSocket.OPEN) {
|
|
if (recording) {
|
|
stopListening();
|
|
} else {
|
|
startListening();
|
|
}
|
|
}
|
|
}
|
|
|
|
function joinChat() {
|
|
const username = document.getElementById('username').value;
|
|
if (username.trim() === "") {
|
|
alert("Please enter a username");
|
|
return;
|
|
}
|
|
socket.send(JSON.stringify({ type: 'join', username }));
|
|
}
|
|
|
|
function updateUserList() {
|
|
const usersList = document.getElementById('users-list');
|
|
usersList.innerHTML = '';
|
|
users.forEach(user => {
|
|
const li = document.createElement('li');
|
|
li.innerText = user.username;
|
|
usersList.appendChild(li);
|
|
});
|
|
}
|
|
|
|
window.onload = () => {
|
|
recordButton = document.getElementById("record-button");
|
|
recordButton.addEventListener("click", toggleListening);
|
|
connectionStatus = document.getElementById("connection-status");
|
|
statusRecording = document.getElementById("status-recording");
|
|
|
|
connect(socket);
|
|
};
|
|
|
|
function copyToClipboard(id) {
|
|
var textarea = document.getElementById(id);
|
|
textarea.select();
|
|
document.execCommand('copy');
|
|
}
|
|
|
|
function clearTranscription() {
|
|
document.getElementById('transcription').innerText = '';
|
|
}
|
|
</script>
|
|
<script src="https://cdn.webrtc-experiment.com/MediaStreamRecorder.js"></script>
|
|
</body>
|
|
|
|
</html>
|