mirror of
https://github.com/libsdl-org/SDL.git
synced 2025-09-22 03:08:22 +00:00
audio: Refer to audio devices to "playback" and "recording".
Fixes #9619.
This commit is contained in:
@@ -41,11 +41,11 @@ static int EMSCRIPTENAUDIO_PlayDevice(SDL_AudioDevice *device, const Uint8 *buff
|
||||
const int framelen = SDL_AUDIO_FRAMESIZE(device->spec);
|
||||
MAIN_THREAD_EM_ASM({
|
||||
var SDL3 = Module['SDL3'];
|
||||
var numChannels = SDL3.audio.currentOutputBuffer['numberOfChannels'];
|
||||
var numChannels = SDL3.audio_playback.currentPlaybackBuffer['numberOfChannels'];
|
||||
for (var c = 0; c < numChannels; ++c) {
|
||||
var channelData = SDL3.audio.currentOutputBuffer['getChannelData'](c);
|
||||
var channelData = SDL3.audio_playback.currentPlaybackBuffer['getChannelData'](c);
|
||||
if (channelData.length != $1) {
|
||||
throw 'Web Audio output buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
|
||||
throw 'Web Audio playback buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
|
||||
}
|
||||
|
||||
for (var j = 0; j < $1; ++j) {
|
||||
@@ -57,20 +57,20 @@ static int EMSCRIPTENAUDIO_PlayDevice(SDL_AudioDevice *device, const Uint8 *buff
|
||||
}
|
||||
|
||||
|
||||
static void EMSCRIPTENAUDIO_FlushCapture(SDL_AudioDevice *device)
|
||||
static void EMSCRIPTENAUDIO_FlushRecording(SDL_AudioDevice *device)
|
||||
{
|
||||
// Do nothing, the new data will just be dropped.
|
||||
}
|
||||
|
||||
static int EMSCRIPTENAUDIO_CaptureFromDevice(SDL_AudioDevice *device, void *buffer, int buflen)
|
||||
static int EMSCRIPTENAUDIO_RecordDevice(SDL_AudioDevice *device, void *buffer, int buflen)
|
||||
{
|
||||
MAIN_THREAD_EM_ASM({
|
||||
var SDL3 = Module['SDL3'];
|
||||
var numChannels = SDL3.capture.currentCaptureBuffer.numberOfChannels;
|
||||
var numChannels = SDL3.audio_recording.currentRecordingBuffer.numberOfChannels;
|
||||
for (var c = 0; c < numChannels; ++c) {
|
||||
var channelData = SDL3.capture.currentCaptureBuffer.getChannelData(c);
|
||||
var channelData = SDL3.audio_recording.currentRecordingBuffer.getChannelData(c);
|
||||
if (channelData.length != $1) {
|
||||
throw 'Web Audio capture buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
|
||||
throw 'Web Audio recording buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
|
||||
}
|
||||
|
||||
if (numChannels == 1) { // fastpath this a little for the common (mono) case.
|
||||
@@ -97,37 +97,37 @@ static void EMSCRIPTENAUDIO_CloseDevice(SDL_AudioDevice *device)
|
||||
MAIN_THREAD_EM_ASM({
|
||||
var SDL3 = Module['SDL3'];
|
||||
if ($0) {
|
||||
if (SDL3.capture.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.capture.silenceTimer);
|
||||
if (SDL3.audio_recording.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.audio_recording.silenceTimer);
|
||||
}
|
||||
if (SDL3.capture.stream !== undefined) {
|
||||
var tracks = SDL3.capture.stream.getAudioTracks();
|
||||
if (SDL3.audio_recording.stream !== undefined) {
|
||||
var tracks = SDL3.audio_recording.stream.getAudioTracks();
|
||||
for (var i = 0; i < tracks.length; i++) {
|
||||
SDL3.capture.stream.removeTrack(tracks[i]);
|
||||
SDL3.audio_recording.stream.removeTrack(tracks[i]);
|
||||
}
|
||||
}
|
||||
if (SDL3.capture.scriptProcessorNode !== undefined) {
|
||||
SDL3.capture.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {};
|
||||
SDL3.capture.scriptProcessorNode.disconnect();
|
||||
if (SDL3.audio_recording.scriptProcessorNode !== undefined) {
|
||||
SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {};
|
||||
SDL3.audio_recording.scriptProcessorNode.disconnect();
|
||||
}
|
||||
if (SDL3.capture.mediaStreamNode !== undefined) {
|
||||
SDL3.capture.mediaStreamNode.disconnect();
|
||||
if (SDL3.audio_recording.mediaStreamNode !== undefined) {
|
||||
SDL3.audio_recording.mediaStreamNode.disconnect();
|
||||
}
|
||||
SDL3.capture = undefined;
|
||||
SDL3.audio_recording = undefined;
|
||||
} else {
|
||||
if (SDL3.audio.scriptProcessorNode != undefined) {
|
||||
SDL3.audio.scriptProcessorNode.disconnect();
|
||||
if (SDL3.audio_playback.scriptProcessorNode != undefined) {
|
||||
SDL3.audio_playback.scriptProcessorNode.disconnect();
|
||||
}
|
||||
if (SDL3.audio.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.audio.silenceTimer);
|
||||
if (SDL3.audio_playback.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.audio_playback.silenceTimer);
|
||||
}
|
||||
SDL3.audio = undefined;
|
||||
SDL3.audio_playback = undefined;
|
||||
}
|
||||
if ((SDL3.audioContext !== undefined) && (SDL3.audio === undefined) && (SDL3.capture === undefined)) {
|
||||
if ((SDL3.audioContext !== undefined) && (SDL3.audio_playback === undefined) && (SDL3.audio_recording === undefined)) {
|
||||
SDL3.audioContext.close();
|
||||
SDL3.audioContext = undefined;
|
||||
}
|
||||
}, device->iscapture);
|
||||
}, device->recording);
|
||||
|
||||
SDL_free(device->hidden->mixbuf);
|
||||
SDL_free(device->hidden);
|
||||
@@ -149,9 +149,9 @@ static int EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
|
||||
}
|
||||
var SDL3 = Module['SDL3'];
|
||||
if (!$0) {
|
||||
SDL3.audio = {};
|
||||
SDL3.audio_playback = {};
|
||||
} else {
|
||||
SDL3.capture = {};
|
||||
SDL3.audio_recording = {};
|
||||
}
|
||||
|
||||
if (!SDL3.audioContext) {
|
||||
@@ -167,7 +167,7 @@ static int EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
|
||||
}
|
||||
}
|
||||
return SDL3.audioContext === undefined ? -1 : 0;
|
||||
}, device->iscapture);
|
||||
}, device->recording);
|
||||
|
||||
if (result < 0) {
|
||||
return SDL_SetError("Web Audio API is not available!");
|
||||
@@ -186,7 +186,7 @@ static int EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
|
||||
|
||||
SDL_UpdatedAudioDeviceFormat(device);
|
||||
|
||||
if (!device->iscapture) {
|
||||
if (!device->recording) {
|
||||
device->hidden->mixbuf = (Uint8 *)SDL_malloc(device->buffer_size);
|
||||
if (!device->hidden->mixbuf) {
|
||||
return -1;
|
||||
@@ -194,18 +194,18 @@ static int EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
|
||||
SDL_memset(device->hidden->mixbuf, device->silence_value, device->buffer_size);
|
||||
}
|
||||
|
||||
if (device->iscapture) {
|
||||
/* The idea is to take the capture media stream, hook it up to an
|
||||
if (device->recording) {
|
||||
/* The idea is to take the recording media stream, hook it up to an
|
||||
audio graph where we can pass it through a ScriptProcessorNode
|
||||
to access the raw PCM samples and push them to the SDL app's
|
||||
callback. From there, we "process" the audio data into silence
|
||||
and forget about it.
|
||||
|
||||
This should, strictly speaking, use MediaRecorder for capture, but
|
||||
This should, strictly speaking, use MediaRecorder for recording, but
|
||||
this API is cleaner to use and better supported, and fires a
|
||||
callback whenever there's enough data to fire down into the app.
|
||||
The downside is that we are spending CPU time silencing a buffer
|
||||
that the audiocontext uselessly mixes into any output. On the
|
||||
that the audiocontext uselessly mixes into any playback. On the
|
||||
upside, both of those things are not only run in native code in
|
||||
the browser, they're probably SIMD code, too. MediaRecorder
|
||||
feels like it's a pretty inefficient tapdance in similar ways,
|
||||
@@ -214,67 +214,67 @@ static int EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
|
||||
MAIN_THREAD_EM_ASM({
|
||||
var SDL3 = Module['SDL3'];
|
||||
var have_microphone = function(stream) {
|
||||
//console.log('SDL audio capture: we have a microphone! Replacing silence callback.');
|
||||
if (SDL3.capture.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.capture.silenceTimer);
|
||||
SDL3.capture.silenceTimer = undefined;
|
||||
SDL3.capture.silenceBuffer = undefined
|
||||
//console.log('SDL audio recording: we have a microphone! Replacing silence callback.');
|
||||
if (SDL3.audio_recording.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.audio_recording.silenceTimer);
|
||||
SDL3.audio_recording.silenceTimer = undefined;
|
||||
SDL3.audio_recording.silenceBuffer = undefined
|
||||
}
|
||||
SDL3.capture.mediaStreamNode = SDL3.audioContext.createMediaStreamSource(stream);
|
||||
SDL3.capture.scriptProcessorNode = SDL3.audioContext.createScriptProcessor($1, $0, 1);
|
||||
SDL3.capture.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {
|
||||
if ((SDL3 === undefined) || (SDL3.capture === undefined)) { return; }
|
||||
SDL3.audio_recording.mediaStreamNode = SDL3.audioContext.createMediaStreamSource(stream);
|
||||
SDL3.audio_recording.scriptProcessorNode = SDL3.audioContext.createScriptProcessor($1, $0, 1);
|
||||
SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {
|
||||
if ((SDL3 === undefined) || (SDL3.audio_recording === undefined)) { return; }
|
||||
audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0);
|
||||
SDL3.capture.currentCaptureBuffer = audioProcessingEvent.inputBuffer;
|
||||
SDL3.audio_recording.currentRecordingBuffer = audioProcessingEvent.inputBuffer;
|
||||
dynCall('vi', $2, [$3]);
|
||||
};
|
||||
SDL3.capture.mediaStreamNode.connect(SDL3.capture.scriptProcessorNode);
|
||||
SDL3.capture.scriptProcessorNode.connect(SDL3.audioContext.destination);
|
||||
SDL3.capture.stream = stream;
|
||||
SDL3.audio_recording.mediaStreamNode.connect(SDL3.audio_recording.scriptProcessorNode);
|
||||
SDL3.audio_recording.scriptProcessorNode.connect(SDL3.audioContext.destination);
|
||||
SDL3.audio_recording.stream = stream;
|
||||
};
|
||||
|
||||
var no_microphone = function(error) {
|
||||
//console.log('SDL audio capture: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.');
|
||||
//console.log('SDL audio recording: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.');
|
||||
};
|
||||
|
||||
// we write silence to the audio callback until the microphone is available (user approves use, etc).
|
||||
SDL3.capture.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
|
||||
SDL3.capture.silenceBuffer.getChannelData(0).fill(0.0);
|
||||
SDL3.audio_recording.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
|
||||
SDL3.audio_recording.silenceBuffer.getChannelData(0).fill(0.0);
|
||||
var silence_callback = function() {
|
||||
SDL3.capture.currentCaptureBuffer = SDL3.capture.silenceBuffer;
|
||||
SDL3.audio_recording.currentRecordingBuffer = SDL3.audio_recording.silenceBuffer;
|
||||
dynCall('vi', $2, [$3]);
|
||||
};
|
||||
|
||||
SDL3.capture.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
|
||||
SDL3.audio_recording.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
|
||||
|
||||
if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) {
|
||||
navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone);
|
||||
} else if (navigator.webkitGetUserMedia !== undefined) {
|
||||
navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone);
|
||||
}
|
||||
}, device->spec.channels, device->sample_frames, SDL_CaptureAudioThreadIterate, device);
|
||||
}, device->spec.channels, device->sample_frames, SDL_RecordingAudioThreadIterate, device);
|
||||
} else {
|
||||
// setup a ScriptProcessorNode
|
||||
MAIN_THREAD_EM_ASM({
|
||||
var SDL3 = Module['SDL3'];
|
||||
SDL3.audio.scriptProcessorNode = SDL3.audioContext['createScriptProcessor']($1, 0, $0);
|
||||
SDL3.audio.scriptProcessorNode['onaudioprocess'] = function (e) {
|
||||
if ((SDL3 === undefined) || (SDL3.audio === undefined)) { return; }
|
||||
SDL3.audio_playback.scriptProcessorNode = SDL3.audioContext['createScriptProcessor']($1, 0, $0);
|
||||
SDL3.audio_playback.scriptProcessorNode['onaudioprocess'] = function (e) {
|
||||
if ((SDL3 === undefined) || (SDL3.audio_playback === undefined)) { return; }
|
||||
// if we're actually running the node, we don't need the fake callback anymore, so kill it.
|
||||
if (SDL3.audio.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.audio.silenceTimer);
|
||||
SDL3.audio.silenceTimer = undefined;
|
||||
SDL3.audio.silenceBuffer = undefined;
|
||||
if (SDL3.audio_playback.silenceTimer !== undefined) {
|
||||
clearInterval(SDL3.audio_playback.silenceTimer);
|
||||
SDL3.audio_playback.silenceTimer = undefined;
|
||||
SDL3.audio_playback.silenceBuffer = undefined;
|
||||
}
|
||||
SDL3.audio.currentOutputBuffer = e['outputBuffer'];
|
||||
SDL3.audio_playback.currentPlaybackBuffer = e['outputBuffer'];
|
||||
dynCall('vi', $2, [$3]);
|
||||
};
|
||||
|
||||
SDL3.audio.scriptProcessorNode['connect'](SDL3.audioContext['destination']);
|
||||
SDL3.audio_playback.scriptProcessorNode['connect'](SDL3.audioContext['destination']);
|
||||
|
||||
if (SDL3.audioContext.state === 'suspended') { // uhoh, autoplay is blocked.
|
||||
SDL3.audio.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
|
||||
SDL3.audio.silenceBuffer.getChannelData(0).fill(0.0);
|
||||
SDL3.audio_playback.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
|
||||
SDL3.audio_playback.silenceBuffer.getChannelData(0).fill(0.0);
|
||||
var silence_callback = function() {
|
||||
if ((typeof navigator.userActivation) !== 'undefined') { // Almost everything modern except Firefox (as of August 2023)
|
||||
if (navigator.userActivation.hasBeenActive) {
|
||||
@@ -284,14 +284,14 @@ static int EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
|
||||
|
||||
// the buffer that gets filled here just gets ignored, so the app can make progress
|
||||
// and/or avoid flooding audio queues until we can actually play audio.
|
||||
SDL3.audio.currentOutputBuffer = SDL3.audio.silenceBuffer;
|
||||
SDL3.audio_playback.currentPlaybackBuffer = SDL3.audio_playback.silenceBuffer;
|
||||
dynCall('vi', $2, [$3]);
|
||||
SDL3.audio.currentOutputBuffer = undefined;
|
||||
SDL3.audio_playback.currentPlaybackBuffer = undefined;
|
||||
};
|
||||
|
||||
SDL3.audio.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
|
||||
SDL3.audio_playback.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
|
||||
}
|
||||
}, device->spec.channels, device->sample_frames, SDL_OutputAudioThreadIterate, device);
|
||||
}, device->spec.channels, device->sample_frames, SDL_PlaybackAudioThreadIterate, device);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -299,16 +299,16 @@ static int EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
|
||||
|
||||
static SDL_bool EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl *impl)
|
||||
{
|
||||
SDL_bool available, capture_available;
|
||||
SDL_bool available, recording_available;
|
||||
|
||||
impl->OpenDevice = EMSCRIPTENAUDIO_OpenDevice;
|
||||
impl->CloseDevice = EMSCRIPTENAUDIO_CloseDevice;
|
||||
impl->GetDeviceBuf = EMSCRIPTENAUDIO_GetDeviceBuf;
|
||||
impl->PlayDevice = EMSCRIPTENAUDIO_PlayDevice;
|
||||
impl->FlushCapture = EMSCRIPTENAUDIO_FlushCapture;
|
||||
impl->CaptureFromDevice = EMSCRIPTENAUDIO_CaptureFromDevice;
|
||||
impl->FlushRecording = EMSCRIPTENAUDIO_FlushRecording;
|
||||
impl->RecordDevice = EMSCRIPTENAUDIO_RecordDevice;
|
||||
|
||||
impl->OnlyHasDefaultOutputDevice = SDL_TRUE;
|
||||
impl->OnlyHasDefaultPlaybackDevice = SDL_TRUE;
|
||||
|
||||
// technically, this is just runs in idle time in the main thread, but it's close enough to a "thread" for our purposes.
|
||||
impl->ProvidesOwnCallbackThread = SDL_TRUE;
|
||||
@@ -327,7 +327,7 @@ static SDL_bool EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl *impl)
|
||||
SDL_SetError("No audio context available");
|
||||
}
|
||||
|
||||
capture_available = available && MAIN_THREAD_EM_ASM_INT({
|
||||
recording_available = available && MAIN_THREAD_EM_ASM_INT({
|
||||
if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) {
|
||||
return true;
|
||||
} else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') {
|
||||
@@ -336,8 +336,8 @@ static SDL_bool EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl *impl)
|
||||
return false;
|
||||
});
|
||||
|
||||
impl->HasCaptureSupport = capture_available;
|
||||
impl->OnlyHasDefaultCaptureDevice = capture_available;
|
||||
impl->HasRecordingSupport = recording_available;
|
||||
impl->OnlyHasDefaultRecordingDevice = recording_available;
|
||||
|
||||
return available;
|
||||
}
|
||||
|
Reference in New Issue
Block a user