Switch listener streaming to MP3-only
This commit is contained in:
17
README.md
17
README.md
@@ -9,7 +9,7 @@ It supports:
|
||||
- Local library playback (files in `music/`)
|
||||
- Downloading audio from URLs (via `yt-dlp` when available, with fallback)
|
||||
- Live streaming from the DJ browser to listeners using Socket.IO
|
||||
- **Compatibility fallback**: if a listener browser can’t play the WebM/Opus stream, it can fall back to an **MP3 stream** (`/stream.mp3`) generated server-side with **ffmpeg**.
|
||||
- Live listening via an **MP3 stream** (`/stream.mp3`) generated server-side with **ffmpeg**.
|
||||
|
||||
---
|
||||
|
||||
@@ -161,22 +161,13 @@ If listeners can’t connect, this is often the cause.
|
||||
|
||||
---
|
||||
|
||||
## Streaming formats & fallback
|
||||
## Streaming
|
||||
|
||||
### Default stream
|
||||
|
||||
- DJ browser encodes live audio using `MediaRecorder` (usually WebM/Opus)
|
||||
- Listeners receive chunks over Socket.IO and play them via MediaSource
|
||||
|
||||
### MP3 fallback (compatibility)
|
||||
|
||||
Some browsers/environments don’t support WebM/Opus + MediaSource well.
|
||||
In that case TechDJ can fall back to:
|
||||
TechDJ serves the listener audio as an **MP3 HTTP stream**:
|
||||
|
||||
- MP3 stream endpoint: `http://<DJ_MACHINE_IP>:5001/stream.mp3`
|
||||
|
||||
This requires:
|
||||
- `ffmpeg` installed on the DJ/server machine
|
||||
This requires `ffmpeg` installed on the DJ/server machine.
|
||||
|
||||
### Debug endpoint
|
||||
|
||||
|
||||
241
script.js
241
script.js
@@ -1848,11 +1848,12 @@ function startBroadcast() {
|
||||
console.log(`🎚️ Starting broadcast at ${qualitySelect.value}kbps`);
|
||||
|
||||
const preferredTypes = [
|
||||
'audio/webm;codecs=opus',
|
||||
'audio/webm',
|
||||
'audio/ogg;codecs=opus',
|
||||
// Prefer MP4/AAC when available (broad device support)
|
||||
'audio/mp4;codecs=mp4a.40.2',
|
||||
'audio/mp4',
|
||||
// Fallbacks
|
||||
'audio/webm',
|
||||
'audio/ogg',
|
||||
];
|
||||
const chosenType = preferredTypes.find((t) => {
|
||||
try {
|
||||
@@ -1993,9 +1994,9 @@ function startBroadcast() {
|
||||
document.getElementById('broadcast-status').textContent = '🔴 LIVE';
|
||||
document.getElementById('broadcast-status').classList.add('live');
|
||||
|
||||
// Notify server (include codec/container so listeners can configure SourceBuffer)
|
||||
// Notify server that broadcast is active (listeners use MP3 stream)
|
||||
if (!socket) initSocket();
|
||||
socket.emit('start_broadcast', { mimeType: currentStreamMimeType });
|
||||
socket.emit('start_broadcast');
|
||||
socket.emit('get_listener_count');
|
||||
|
||||
console.log('✅ Broadcasting started successfully!');
|
||||
@@ -2153,7 +2154,7 @@ function toggleAutoStream(enabled) {
|
||||
// ========== LISTENER MODE ==========
|
||||
|
||||
function initListenerMode() {
|
||||
console.log('🎧 Initializing listener mode (MediaSource Pipeline)...');
|
||||
console.log('🎧 Initializing listener mode (MP3 stream)...');
|
||||
|
||||
// UI Feedback for listener
|
||||
const appContainer = document.querySelector('.app-container');
|
||||
@@ -2224,8 +2225,8 @@ function initListenerMode() {
|
||||
}
|
||||
|
||||
// Create a new hidden media element.
|
||||
// Note: MSE (MediaSource) support is often more reliable on <video> than <audio>.
|
||||
audio = document.createElement('video');
|
||||
// For MP3 we can use a plain <audio> element.
|
||||
audio = document.createElement('audio');
|
||||
audio.autoplay = false; // Don't autoplay - we use the Enable Audio button
|
||||
audio.muted = false;
|
||||
audio.controls = false;
|
||||
@@ -2233,73 +2234,12 @@ function initListenerMode() {
|
||||
audio.setAttribute('playsinline', '');
|
||||
audio.style.display = 'none';
|
||||
document.body.appendChild(audio);
|
||||
console.log('🆕 Created fresh media element (video) for listener');
|
||||
console.log('🆕 Created fresh media element (audio) for listener');
|
||||
|
||||
// Initialize MediaSource for streaming binary chunks
|
||||
const mediaSource = new MediaSource();
|
||||
audio.src = URL.createObjectURL(mediaSource);
|
||||
|
||||
// CRITICAL: Call load() to initialize the MediaSource
|
||||
// Without this, the audio element won't load the MediaSource until play() is called,
|
||||
// which will fail with "no supported sources" if no data is buffered yet
|
||||
// MP3 stream (server-side) — requires ffmpeg on the server.
|
||||
audio.src = getMp3FallbackUrl();
|
||||
audio.load();
|
||||
console.log('🎬 Audio element loading MediaSource...');
|
||||
|
||||
let sourceBuffer = null;
|
||||
let audioQueue = [];
|
||||
let chunksReceived = 0;
|
||||
let lastStatusUpdate = 0;
|
||||
|
||||
mediaSource.addEventListener('sourceopen', () => {
|
||||
console.log('📦 MediaSource opened');
|
||||
const mimeType = window.currentStreamMimeType || currentStreamMimeType || 'audio/webm;codecs=opus';
|
||||
|
||||
if (!MediaSource.isTypeSupported(mimeType)) {
|
||||
console.error(`❌ Browser does not support ${mimeType}`);
|
||||
const statusEl = document.getElementById('connection-status');
|
||||
if (statusEl) statusEl.textContent = '⚠️ WebM/Opus not supported - using MP3 fallback stream';
|
||||
|
||||
// Fallback to MP3 stream served by the backend (requires ffmpeg on server host)
|
||||
const fallbackUrl = getMp3FallbackUrl();
|
||||
console.log(`🎧 Switching to MP3 fallback: ${fallbackUrl}`);
|
||||
audio.src = fallbackUrl;
|
||||
audio.load();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
sourceBuffer = mediaSource.addSourceBuffer(mimeType);
|
||||
sourceBuffer.mode = 'sequence';
|
||||
|
||||
// Kick off first append if data is already in queue
|
||||
if (audioQueue.length > 0 && !sourceBuffer.updating && mediaSource.readyState === 'open') {
|
||||
sourceBuffer.appendBuffer(audioQueue.shift());
|
||||
}
|
||||
|
||||
sourceBuffer.addEventListener('updateend', () => {
|
||||
// Process next chunk in queue
|
||||
if (audioQueue.length > 0 && !sourceBuffer.updating) {
|
||||
sourceBuffer.appendBuffer(audioQueue.shift());
|
||||
}
|
||||
|
||||
// Periodic cleanup of old buffer data to prevent memory bloat
|
||||
// Keep the last 60 seconds of audio data
|
||||
if (audio.buffered.length > 0 && !sourceBuffer.updating && mediaSource.readyState === 'open') {
|
||||
const end = audio.buffered.end(audio.buffered.length - 1);
|
||||
const start = audio.buffered.start(0);
|
||||
if (end - start > 120) { // If buffer is > 2 mins
|
||||
try {
|
||||
sourceBuffer.remove(0, end - 60);
|
||||
} catch (e) {
|
||||
console.warn('Buffer cleanup skipped:', e.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
console.error('❌ Failed to add SourceBuffer:', e);
|
||||
}
|
||||
});
|
||||
console.log(`🎧 Listener source set to MP3 stream: ${audio.src}`);
|
||||
|
||||
// Show enable audio button instead of attempting autoplay
|
||||
const enableAudioBtn = document.getElementById('enable-audio-btn');
|
||||
@@ -2309,75 +2249,19 @@ function initListenerMode() {
|
||||
enableAudioBtn.style.display = 'flex';
|
||||
}
|
||||
if (statusEl) {
|
||||
statusEl.textContent = '🔵 Click "Enable Audio" to start listening';
|
||||
statusEl.textContent = '🔵 Click "Enable Audio" to start listening (MP3)';
|
||||
}
|
||||
|
||||
// Store audio element and context for later activation
|
||||
window.listenerAudio = audio;
|
||||
window.listenerMediaSource = mediaSource;
|
||||
window.listenerMediaSource = null;
|
||||
window.listenerAudioEnabled = false; // Track if user has enabled audio
|
||||
|
||||
// Initialize socket and join
|
||||
initSocket();
|
||||
socket.emit('join_listener');
|
||||
|
||||
socket.on('stream_mime', (data) => {
|
||||
const mt = data && data.mimeType ? String(data.mimeType) : null;
|
||||
if (mt && mt !== window.currentStreamMimeType) {
|
||||
console.log(`📡 Stream mimeType announced: ${mt}`);
|
||||
window.currentStreamMimeType = mt;
|
||||
}
|
||||
});
|
||||
|
||||
let hasHeader = false;
|
||||
|
||||
socket.on('audio_data', (data) => {
|
||||
// We MUST have the header before we can do anything with broadcast chunks
|
||||
const isHeaderDirect = data instanceof ArrayBuffer && data.byteLength > 1000; // Heuristic
|
||||
|
||||
hasHeader = true; // No header request needed for WebM relay
|
||||
|
||||
chunksReceived++;
|
||||
listenerChunksReceived = chunksReceived;
|
||||
audioQueue.push(data);
|
||||
|
||||
// JITTER BUFFER: Reduced to 1 segments (buffered) for WebM/Opus
|
||||
const isHeader = false;
|
||||
|
||||
if (sourceBuffer && !sourceBuffer.updating && mediaSource.readyState === 'open') {
|
||||
if (audioQueue.length >= 1) {
|
||||
try {
|
||||
const next = audioQueue.shift();
|
||||
sourceBuffer.appendBuffer(next);
|
||||
|
||||
// Reset error counter on success
|
||||
if (window.sourceBufferErrorCount) window.sourceBufferErrorCount = 0;
|
||||
} catch (e) {
|
||||
console.error('Buffer append error:', e);
|
||||
window.sourceBufferErrorCount = (window.sourceBufferErrorCount || 0) + 1;
|
||||
|
||||
if (window.sourceBufferErrorCount >= 5) {
|
||||
console.error('❌ Too many SourceBuffer errors - attempting recovery...');
|
||||
const statusEl = document.getElementById('connection-status');
|
||||
if (statusEl) statusEl.textContent = '⚠️ Stream error - reconnecting...';
|
||||
audioQueue = [];
|
||||
chunksReceived = 0;
|
||||
window.sourceBufferErrorCount = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UI Update (only if audio is already enabled, don't overwrite the enable prompt)
|
||||
const now = Date.now();
|
||||
if (now - lastStatusUpdate > 1000 && window.listenerAudioEnabled) {
|
||||
const statusEl = document.getElementById('connection-status');
|
||||
if (statusEl) {
|
||||
statusEl.textContent = `🟢 Connected - ${chunksReceived} chunks (${audioQueue.length} buffered)`;
|
||||
}
|
||||
lastStatusUpdate = now;
|
||||
}
|
||||
});
|
||||
// No socket audio chunks needed in MP3-only mode.
|
||||
|
||||
socket.on('broadcast_started', () => {
|
||||
const nowPlayingEl = document.getElementById('listener-now-playing');
|
||||
@@ -2395,8 +2279,6 @@ function initListenerMode() {
|
||||
socket.on('broadcast_stopped', () => {
|
||||
const nowPlayingEl = document.getElementById('listener-now-playing');
|
||||
if (nowPlayingEl) nowPlayingEl.textContent = 'Stream ended';
|
||||
chunksReceived = 0;
|
||||
audioQueue = [];
|
||||
});
|
||||
|
||||
socket.on('connect', () => {
|
||||
@@ -2491,66 +2373,18 @@ async function enableListenerAudio() {
|
||||
const volValue = volEl ? parseInt(volEl.value, 10) : 80;
|
||||
setListenerVolume(Number.isFinite(volValue) ? volValue : 80);
|
||||
|
||||
// Check if we have buffered data
|
||||
const hasBufferedData = () => {
|
||||
return window.listenerAudio.buffered && window.listenerAudio.buffered.length > 0;
|
||||
};
|
||||
|
||||
// CRITICAL: Wait for buffered data before calling play()
|
||||
// This prevents NotSupportedError when buffer is empty
|
||||
if (!hasBufferedData()) {
|
||||
console.log('⏳ Waiting for audio data to buffer before playback...');
|
||||
if (audioText) audioText.textContent = 'BUFFERING...';
|
||||
|
||||
// Wait for data with timeout (max 5 seconds)
|
||||
const waitForData = new Promise((resolve, reject) => {
|
||||
const timeout = setTimeout(() => {
|
||||
clearInterval(checkInterval);
|
||||
reject(new Error('Timeout waiting for audio data'));
|
||||
}, 5000);
|
||||
|
||||
const checkInterval = setInterval(() => {
|
||||
if (hasBufferedData()) {
|
||||
clearInterval(checkInterval);
|
||||
clearTimeout(timeout);
|
||||
console.log('✅ Audio data buffered, ready to play');
|
||||
resolve();
|
||||
}
|
||||
}, 100);
|
||||
});
|
||||
|
||||
try {
|
||||
await waitForData;
|
||||
} catch (e) {
|
||||
console.warn('⚠️ Timeout waiting for buffer data:', e.message);
|
||||
}
|
||||
} else {
|
||||
console.log('✅ Audio already has buffered data');
|
||||
}
|
||||
|
||||
// MP3 stream: call play() immediately to capture the user gesture.
|
||||
if (audioText) audioText.textContent = 'STARTING...';
|
||||
console.log('▶️ Attempting to play audio...');
|
||||
const playPromise = window.listenerAudio.play();
|
||||
|
||||
// If no buffered data yet, show status but don't block playback
|
||||
if (!hasBufferedData()) {
|
||||
console.log('⏳ Waiting for audio data to buffer...');
|
||||
const chunkCount = Number.isFinite(listenerChunksReceived) ? listenerChunksReceived : 0;
|
||||
if (audioText) {
|
||||
audioText.textContent = chunkCount > 0 ? 'BUFFERING...' : 'WAITING FOR STREAM...';
|
||||
}
|
||||
|
||||
// Start a background checker to update UI
|
||||
const checkInterval = setInterval(() => {
|
||||
if (hasBufferedData()) {
|
||||
clearInterval(checkInterval);
|
||||
console.log('✅ Audio data buffered');
|
||||
const chunkCount = Number.isFinite(listenerChunksReceived) ? listenerChunksReceived : 0;
|
||||
} else if (audioText && chunkCount > 0 && audioText.textContent === 'WAITING FOR STREAM...') {
|
||||
audioText.textContent = 'BUFFERING...';
|
||||
}
|
||||
}, 500);
|
||||
} else {
|
||||
console.log('✅ Audio already has buffered data');
|
||||
// If not buffered yet, show buffering but don't block.
|
||||
if (!hasBufferedData() && audioText) {
|
||||
audioText.textContent = 'BUFFERING...';
|
||||
}
|
||||
|
||||
await playPromise;
|
||||
@@ -2586,42 +2420,13 @@ async function enableListenerAudio() {
|
||||
if (error.name === 'NotAllowedError') {
|
||||
errorMsg = 'Browser blocked audio (NotAllowedError). Check permissions.';
|
||||
} else if (error.name === 'NotSupportedError') {
|
||||
errorMsg = 'Format not supported or buffer empty (NotSupportedError).';
|
||||
errorMsg = 'MP3 stream not supported or unavailable (NotSupportedError).';
|
||||
}
|
||||
|
||||
stashedStatus.textContent = '⚠️ ' + errorMsg;
|
||||
|
||||
if (error.name === 'NotSupportedError') {
|
||||
// Two common causes:
|
||||
// 1) WebM/Opus MSE isn't supported by this browser
|
||||
// 2) The element cannot play yet (empty buffer / transient)
|
||||
// Prefer a compatibility fallback to MP3 if available.
|
||||
try {
|
||||
const fallbackUrl = getMp3FallbackUrl();
|
||||
console.log(`🎧 NotSupportedError -> switching to MP3 fallback: ${fallbackUrl}`);
|
||||
window.listenerAudio.src = fallbackUrl;
|
||||
window.listenerAudio.load();
|
||||
|
||||
// Retry immediately (still within the click gesture)
|
||||
window.listenerAudio.play().then(() => {
|
||||
stashedStatus.textContent = '🟢 Playing via MP3 fallback';
|
||||
window.listenerAudioEnabled = true;
|
||||
}).catch((e) => {
|
||||
console.error('MP3 fallback play failed:', e);
|
||||
stashedStatus.textContent = '⚠️ MP3 fallback failed. Is ffmpeg installed on the server?';
|
||||
});
|
||||
} catch (e) {
|
||||
console.error('Failed to switch to MP3 fallback:', e);
|
||||
}
|
||||
|
||||
// Also keep a background retry in case it was just a buffer timing issue.
|
||||
console.log('🔄 Retrying playback in background once data arrives...');
|
||||
const retryInterval = setInterval(() => {
|
||||
if (window.listenerAudio.buffered && window.listenerAudio.buffered.length > 0) {
|
||||
clearInterval(retryInterval);
|
||||
window.listenerAudio.play().catch((e) => console.error('Background retry failed:', e));
|
||||
}
|
||||
}, 1000);
|
||||
stashedStatus.textContent = '⚠️ MP3 stream failed. Is ffmpeg installed on the server?';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
14
server.py
14
server.py
@@ -17,7 +17,6 @@ import downloader
|
||||
# Relay State
|
||||
broadcast_state = {
|
||||
'active': False,
|
||||
'mimeType': None,
|
||||
}
|
||||
listener_sids = set()
|
||||
dj_sids = set()
|
||||
@@ -339,12 +338,7 @@ def stop_broadcast_after_timeout():
|
||||
|
||||
@dj_socketio.on('start_broadcast')
|
||||
def dj_start(data=None):
|
||||
mime_type = None
|
||||
if isinstance(data, dict):
|
||||
mime_type = data.get('mimeType') or None
|
||||
|
||||
broadcast_state['active'] = True
|
||||
broadcast_state['mimeType'] = mime_type
|
||||
session['is_dj'] = True
|
||||
print("🎙️ Broadcast -> ACTIVE")
|
||||
|
||||
@@ -352,13 +346,10 @@ def dj_start(data=None):
|
||||
|
||||
listener_socketio.emit('broadcast_started', namespace='/')
|
||||
listener_socketio.emit('stream_status', {'active': True}, namespace='/')
|
||||
if mime_type:
|
||||
listener_socketio.emit('stream_mime', {'mimeType': mime_type}, namespace='/')
|
||||
|
||||
@dj_socketio.on('stop_broadcast')
|
||||
def dj_stop():
|
||||
broadcast_state['active'] = False
|
||||
broadcast_state['mimeType'] = None
|
||||
session['is_dj'] = False
|
||||
print("🛑 DJ stopped broadcasting")
|
||||
|
||||
@@ -369,9 +360,8 @@ def dj_stop():
|
||||
|
||||
@dj_socketio.on('audio_chunk')
|
||||
def dj_audio(data):
|
||||
# Relay audio chunk to all listeners immediately
|
||||
# MP3-only mode: do not relay raw chunks to listeners; feed transcoder only.
|
||||
if broadcast_state['active']:
|
||||
listener_socketio.emit('audio_data', data, namespace='/')
|
||||
# Ensure MP3 fallback transcoder is running (if ffmpeg is installed)
|
||||
if _ffmpeg_proc is None or _ffmpeg_proc.poll() is not None:
|
||||
_start_transcoder_if_needed()
|
||||
@@ -417,8 +407,6 @@ def listener_join():
|
||||
dj_socketio.emit('listener_count', {'count': count}, namespace='/')
|
||||
|
||||
emit('stream_status', {'active': broadcast_state['active']})
|
||||
if broadcast_state.get('mimeType'):
|
||||
emit('stream_mime', {'mimeType': broadcast_state['mimeType']})
|
||||
|
||||
@listener_socketio.on('get_listener_count')
|
||||
def listener_get_count():
|
||||
|
||||
Reference in New Issue
Block a user