mirror of
https://github.com/libsdl-org/SDL.git
synced 2025-10-03 16:36:25 +00:00
@@ -1414,6 +1414,47 @@ extern SDL_DECLSPEC bool SDLCALL SDL_SetAudioStreamOutputChannelMap(SDL_AudioStr
|
|||||||
*/
|
*/
|
||||||
extern SDL_DECLSPEC bool SDLCALL SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len);
|
extern SDL_DECLSPEC bool SDLCALL SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add data to the stream with each channel in a separate array.
|
||||||
|
*
|
||||||
|
* This data must match the format/channels/samplerate specified in the latest
|
||||||
|
* call to SDL_SetAudioStreamFormat, or the format specified when creating the
|
||||||
|
* stream if it hasn't been changed.
|
||||||
|
*
|
||||||
|
* The data will be interleaved and queued. Note that SDL_AudioStream only
|
||||||
|
* operates on interleaved data, so this is simply a convenience function for
|
||||||
|
* easily queueing data from sources that provide separate arrays. There is no
|
||||||
|
* equivalent function to retrieve planar data.
|
||||||
|
*
|
||||||
|
* The arrays in `channel_buffers` are ordered as they are to be interleaved;
|
||||||
|
* the first array will be the first sample in the interleaved data. Any
|
||||||
|
* individual array may be NULL; in this case, silence will be interleaved for
|
||||||
|
* that channel.
|
||||||
|
*
|
||||||
|
* Note that `num_samples` is the number of _samples per array_. This can also
|
||||||
|
* be thought of as the number of _sample frames_ to be queued. A value of 1
|
||||||
|
* with stereo arrays will queue two samples to the stream. This is different
|
||||||
|
* than SDL_PutAudioStreamData, which wants the size of a single array in bytes.
|
||||||
|
*
|
||||||
|
* \param stream the stream the audio data is being added to.
|
||||||
|
* \param channel_buffers a pointer to an array of arrays, one array per channel.
|
||||||
|
* \param num_samples the number of _samples_ per array to write to the stream.
|
||||||
|
* \returns true on success or false on failure; call SDL_GetError() for more
|
||||||
|
* information.
|
||||||
|
*
|
||||||
|
* \threadsafety It is safe to call this function from any thread, but if the
|
||||||
|
* stream has a callback set, the caller might need to manage
|
||||||
|
* extra locking.
|
||||||
|
*
|
||||||
|
* \since This function is available since SDL 3.4.0.
|
||||||
|
*
|
||||||
|
* \sa SDL_ClearAudioStream
|
||||||
|
* \sa SDL_FlushAudioStream
|
||||||
|
* \sa SDL_GetAudioStreamData
|
||||||
|
* \sa SDL_GetAudioStreamQueued
|
||||||
|
*/
|
||||||
|
extern SDL_DECLSPEC bool SDLCALL SDL_PutAudioStreamPlanarData(SDL_AudioStream *stream, const void * const *channel_buffers, int num_samples);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get converted/resampled data from the stream.
|
* Get converted/resampled data from the stream.
|
||||||
*
|
*
|
||||||
|
@@ -768,15 +768,47 @@ bool SDL_SetAudioStreamGain(SDL_AudioStream *stream, float gain)
|
|||||||
|
|
||||||
static bool CheckAudioStreamIsFullySetup(SDL_AudioStream *stream)
|
static bool CheckAudioStreamIsFullySetup(SDL_AudioStream *stream)
|
||||||
{
|
{
|
||||||
if (stream->src_spec.format == 0) {
|
if (stream->src_spec.format == SDL_AUDIO_UNKNOWN) {
|
||||||
return SDL_SetError("Stream has no source format");
|
return SDL_SetError("Stream has no source format");
|
||||||
} else if (stream->dst_spec.format == 0) {
|
} else if (stream->dst_spec.format == SDL_AUDIO_UNKNOWN) {
|
||||||
return SDL_SetError("Stream has no destination format");
|
return SDL_SetError("Stream has no destination format");
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// you MUST hold `stream->lock` when calling this, and validate your parameters!
|
||||||
|
static bool PutAudioStreamBufferInternal(SDL_AudioStream *stream, const SDL_AudioSpec *spec, const int *chmap, const void *buf, int len, SDL_ReleaseAudioBufferCallback callback, void* userdata)
|
||||||
|
{
|
||||||
|
SDL_AudioTrack* track = NULL;
|
||||||
|
|
||||||
|
if (callback) {
|
||||||
|
track = SDL_CreateAudioTrack(stream->queue, spec, chmap, (Uint8 *)buf, len, len, callback, userdata);
|
||||||
|
if (!track) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const int prev_available = stream->put_callback ? SDL_GetAudioStreamAvailable(stream) : 0;
|
||||||
|
|
||||||
|
bool retval = true;
|
||||||
|
|
||||||
|
if (track) {
|
||||||
|
SDL_AddTrackToAudioQueue(stream->queue, track);
|
||||||
|
} else {
|
||||||
|
retval = SDL_WriteToAudioQueue(stream->queue, spec, chmap, (const Uint8 *)buf, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (retval) {
|
||||||
|
if (stream->put_callback) {
|
||||||
|
const int newavail = SDL_GetAudioStreamAvailable(stream) - prev_available;
|
||||||
|
stream->put_callback(stream->put_callback_userdata, stream, newavail, newavail);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
static bool PutAudioStreamBuffer(SDL_AudioStream *stream, const void *buf, int len, SDL_ReleaseAudioBufferCallback callback, void* userdata)
|
static bool PutAudioStreamBuffer(SDL_AudioStream *stream, const void *buf, int len, SDL_ReleaseAudioBufferCallback callback, void* userdata)
|
||||||
{
|
{
|
||||||
#if DEBUG_AUDIOSTREAM
|
#if DEBUG_AUDIOSTREAM
|
||||||
@@ -795,37 +827,11 @@ static bool PutAudioStreamBuffer(SDL_AudioStream *stream, const void *buf, int l
|
|||||||
return SDL_SetError("Can't add partial sample frames");
|
return SDL_SetError("Can't add partial sample frames");
|
||||||
}
|
}
|
||||||
|
|
||||||
SDL_AudioTrack* track = NULL;
|
const bool retval = PutAudioStreamBufferInternal(stream, &stream->src_spec, stream->src_chmap, buf, len, callback, userdata);
|
||||||
|
|
||||||
if (callback) {
|
|
||||||
track = SDL_CreateAudioTrack(stream->queue, &stream->src_spec, stream->src_chmap, (Uint8 *)buf, len, len, callback, userdata);
|
|
||||||
|
|
||||||
if (!track) {
|
|
||||||
SDL_UnlockMutex(stream->lock);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const int prev_available = stream->put_callback ? SDL_GetAudioStreamAvailable(stream) : 0;
|
|
||||||
|
|
||||||
bool result = true;
|
|
||||||
|
|
||||||
if (track) {
|
|
||||||
SDL_AddTrackToAudioQueue(stream->queue, track);
|
|
||||||
} else {
|
|
||||||
result = SDL_WriteToAudioQueue(stream->queue, &stream->src_spec, stream->src_chmap, (const Uint8 *)buf, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result) {
|
|
||||||
if (stream->put_callback) {
|
|
||||||
const int newavail = SDL_GetAudioStreamAvailable(stream) - prev_available;
|
|
||||||
stream->put_callback(stream->put_callback_userdata, stream, newavail, newavail);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_UnlockMutex(stream->lock);
|
SDL_UnlockMutex(stream->lock);
|
||||||
|
|
||||||
return result;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void SDLCALL FreeAllocatedAudioBuffer(void *userdata, const void *buf, int len)
|
static void SDLCALL FreeAllocatedAudioBuffer(void *userdata, const void *buf, int len)
|
||||||
@@ -857,9 +863,8 @@ bool SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
|
|||||||
}
|
}
|
||||||
|
|
||||||
SDL_memcpy(data, buf, len);
|
SDL_memcpy(data, buf, len);
|
||||||
buf = data;
|
|
||||||
|
|
||||||
bool ret = PutAudioStreamBuffer(stream, buf, len, FreeAllocatedAudioBuffer, NULL);
|
bool ret = PutAudioStreamBuffer(stream, data, len, FreeAllocatedAudioBuffer, NULL);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
SDL_free(data);
|
SDL_free(data);
|
||||||
}
|
}
|
||||||
@@ -869,6 +874,144 @@ bool SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
|
|||||||
return PutAudioStreamBuffer(stream, buf, len, NULL, NULL);
|
return PutAudioStreamBuffer(stream, buf, len, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define GENERIC_INTERLEAVE_FUNCTION(bits) \
|
||||||
|
static void InterleaveAudioChannelsGeneric##bits(void *output, const void * const *channel_buffers, const int channels, int num_samples) { \
|
||||||
|
Uint##bits *dst = (Uint##bits *) output; \
|
||||||
|
const Uint##bits * const *srcs = (const Uint##bits * const *) channel_buffers; \
|
||||||
|
for (int frame = 0; frame < num_samples; frame++) { \
|
||||||
|
for (int channel = 0; channel < channels; channel++) { \
|
||||||
|
*(dst++) = srcs[channel][frame]; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
GENERIC_INTERLEAVE_FUNCTION(8)
|
||||||
|
GENERIC_INTERLEAVE_FUNCTION(16)
|
||||||
|
GENERIC_INTERLEAVE_FUNCTION(32)
|
||||||
|
//GENERIC_INTERLEAVE_FUNCTION(64) (we don't have any 64-bit audio data types at the moment.)
|
||||||
|
#undef GENERIC_INTERLEAVE_FUNCTION
|
||||||
|
|
||||||
|
#define GENERIC_INTERLEAVE_WITH_NULLS_FUNCTION(bits) \
|
||||||
|
static void InterleaveAudioChannelsWithNullsGeneric##bits(void *output, const void * const *channel_buffers, const int channels, int num_samples, const int isilence) { \
|
||||||
|
const Uint##bits silence = (Uint##bits) isilence; \
|
||||||
|
Uint##bits *dst = (Uint##bits *) output; \
|
||||||
|
const Uint##bits * const *srcs = (const Uint##bits * const *) channel_buffers; \
|
||||||
|
for (int frame = 0; frame < num_samples; frame++) { \
|
||||||
|
for (int channel = 0; channel < channels; channel++) { \
|
||||||
|
*(dst++) = srcs[channel] ? srcs[channel][frame] : silence; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
GENERIC_INTERLEAVE_WITH_NULLS_FUNCTION(8)
|
||||||
|
GENERIC_INTERLEAVE_WITH_NULLS_FUNCTION(16)
|
||||||
|
GENERIC_INTERLEAVE_WITH_NULLS_FUNCTION(32)
|
||||||
|
//GENERIC_INTERLEAVE_WITH_NULLS_FUNCTION(64) (we don't have any 64-bit audio data types at the moment.)
|
||||||
|
#undef GENERIC_INTERLEAVE_WITH_NULLS_FUNCTION
|
||||||
|
|
||||||
|
static void InterleaveAudioChannels(void *output, const void * const *channel_buffers, int num_samples, const SDL_AudioSpec *spec)
|
||||||
|
{
|
||||||
|
const int channels = spec->channels;
|
||||||
|
|
||||||
|
bool have_null_channel = false;
|
||||||
|
for (int i = 0; i < channels; i++) {
|
||||||
|
if (channel_buffers[i] == NULL) {
|
||||||
|
have_null_channel = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (have_null_channel) {
|
||||||
|
const int silence = SDL_GetSilenceValueForFormat(spec->format);
|
||||||
|
switch (SDL_AUDIO_BITSIZE(spec->format)) {
|
||||||
|
case 8: InterleaveAudioChannelsWithNullsGeneric8(output, channel_buffers, channels, num_samples, silence); break;
|
||||||
|
case 16: InterleaveAudioChannelsWithNullsGeneric16(output, channel_buffers, channels, num_samples, silence); break;
|
||||||
|
case 32: InterleaveAudioChannelsWithNullsGeneric32(output, channel_buffers, channels, num_samples, silence); break;
|
||||||
|
//case 64: InterleaveAudioChannelsGeneric64(output, channel_buffers, channels, num_samples); break; (we don't have any 64-bit audio data types at the moment.)
|
||||||
|
default: SDL_assert(!"Missing needed generic audio interleave function!"); SDL_memset(output, 0, SDL_AUDIO_FRAMESIZE(*spec) * num_samples); break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// !!! FIXME: it would be possible to do this really well in SIMD for stereo data, using unpack (intel) or zip (arm) instructions, etc.
|
||||||
|
switch (SDL_AUDIO_BITSIZE(spec->format)) {
|
||||||
|
case 8: InterleaveAudioChannelsGeneric8(output, channel_buffers, channels, num_samples); break;
|
||||||
|
case 16: InterleaveAudioChannelsGeneric16(output, channel_buffers, channels, num_samples); break;
|
||||||
|
case 32: InterleaveAudioChannelsGeneric32(output, channel_buffers, channels, num_samples); break;
|
||||||
|
//case 64: InterleaveAudioChannelsGeneric64(output, channel_buffers, channels, num_samples); break; (we don't have any 64-bit audio data types at the moment.)
|
||||||
|
default: SDL_assert(!"Missing needed generic audio interleave function!"); SDL_memset(output, 0, SDL_AUDIO_FRAMESIZE(*spec) * num_samples); break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SDL_PutAudioStreamPlanarData(SDL_AudioStream *stream, const void * const *channel_buffers, int num_samples)
|
||||||
|
{
|
||||||
|
if (!stream) {
|
||||||
|
return SDL_InvalidParamError("stream");
|
||||||
|
} else if (!channel_buffers) {
|
||||||
|
return SDL_InvalidParamError("channel_buffers");
|
||||||
|
} else if (num_samples < 0) {
|
||||||
|
return SDL_InvalidParamError("num_samples");
|
||||||
|
} else if (num_samples == 0) {
|
||||||
|
return true; // nothing to do.
|
||||||
|
}
|
||||||
|
|
||||||
|
// we do the interleaving up front without the lock held, so the audio device doesn't starve while we work.
|
||||||
|
// but we _do_ need to know the current input spec.
|
||||||
|
SDL_AudioSpec spec;
|
||||||
|
int chmap_copy[SDL_MAX_CHANNELMAP_CHANNELS];
|
||||||
|
int *chmap = NULL;
|
||||||
|
SDL_LockMutex(stream->lock);
|
||||||
|
if (!CheckAudioStreamIsFullySetup(stream)) {
|
||||||
|
SDL_UnlockMutex(stream->lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
SDL_copyp(&spec, &stream->src_spec);
|
||||||
|
if (stream->src_chmap) {
|
||||||
|
chmap = chmap_copy;
|
||||||
|
SDL_memcpy(chmap, stream->src_chmap, sizeof (*chmap) * spec.channels);
|
||||||
|
}
|
||||||
|
SDL_UnlockMutex(stream->lock);
|
||||||
|
|
||||||
|
if (spec.channels == 1) { // nothing to interleave, just use the usual function.
|
||||||
|
return SDL_PutAudioStreamData(stream, channel_buffers[0], SDL_AUDIO_FRAMESIZE(spec) * num_samples);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool retval = false;
|
||||||
|
|
||||||
|
const int len = SDL_AUDIO_FRAMESIZE(spec) * num_samples;
|
||||||
|
#if DEBUG_AUDIOSTREAM
|
||||||
|
SDL_Log("AUDIOSTREAM: wants to put %d bytes of separated data", len);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Is the data small enough to just interleave it on the stack and put it through the normal interface?
|
||||||
|
#define INTERLEAVE_STACK_SIZE 1024
|
||||||
|
Uint8 stackbuf[INTERLEAVE_STACK_SIZE];
|
||||||
|
void *data = stackbuf;
|
||||||
|
SDL_ReleaseAudioBufferCallback callback = NULL;
|
||||||
|
|
||||||
|
if (len > INTERLEAVE_STACK_SIZE) {
|
||||||
|
// too big for the stack? Just SDL_malloc a block and interleave into that. To avoid the extra copy, we'll just set it as a
|
||||||
|
// new track in the queue (the distinction is specifying a callback to PutAudioStreamBufferInternal, to release the buffer).
|
||||||
|
data = SDL_malloc(len);
|
||||||
|
if (!data) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
callback = FreeAllocatedAudioBuffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
InterleaveAudioChannels(data, channel_buffers, num_samples, &spec);
|
||||||
|
|
||||||
|
// it's okay if the stream format changed on another thread while we didn't hold the lock; PutAudioStreamBufferInternal will notice
|
||||||
|
// and set up a new track with the right format, and the next SDL_PutAudioStreamData will notice that stream->src_spec doesn't
|
||||||
|
// match the new track and set up a new one again. It's a bad idea to change the format on another thread while putting here,
|
||||||
|
// but everything _will_ work out with the format that was (presumably) expected.
|
||||||
|
SDL_LockMutex(stream->lock);
|
||||||
|
retval = PutAudioStreamBufferInternal(stream, &spec, chmap, data, len, callback, NULL);
|
||||||
|
SDL_UnlockMutex(stream->lock);
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
bool SDL_FlushAudioStream(SDL_AudioStream *stream)
|
bool SDL_FlushAudioStream(SDL_AudioStream *stream)
|
||||||
{
|
{
|
||||||
if (!stream) {
|
if (!stream) {
|
||||||
|
@@ -1250,6 +1250,7 @@ SDL3_0.0.0 {
|
|||||||
SDL_GetRenderTextureAddressMode;
|
SDL_GetRenderTextureAddressMode;
|
||||||
SDL_GetGPUDeviceProperties;
|
SDL_GetGPUDeviceProperties;
|
||||||
SDL_CreateGPURenderer;
|
SDL_CreateGPURenderer;
|
||||||
|
SDL_PutAudioStreamPlanarData;
|
||||||
# extra symbols go here (don't modify this line)
|
# extra symbols go here (don't modify this line)
|
||||||
local: *;
|
local: *;
|
||||||
};
|
};
|
||||||
|
@@ -1275,3 +1275,4 @@
|
|||||||
#define SDL_GetRenderTextureAddressMode SDL_GetRenderTextureAddressMode_REAL
|
#define SDL_GetRenderTextureAddressMode SDL_GetRenderTextureAddressMode_REAL
|
||||||
#define SDL_GetGPUDeviceProperties SDL_GetGPUDeviceProperties_REAL
|
#define SDL_GetGPUDeviceProperties SDL_GetGPUDeviceProperties_REAL
|
||||||
#define SDL_CreateGPURenderer SDL_CreateGPURenderer_REAL
|
#define SDL_CreateGPURenderer SDL_CreateGPURenderer_REAL
|
||||||
|
#define SDL_PutAudioStreamPlanarData SDL_PutAudioStreamPlanarData_REAL
|
||||||
|
@@ -1283,3 +1283,4 @@ SDL_DYNAPI_PROC(bool,SDL_SetRenderTextureAddressMode,(SDL_Renderer *a,SDL_Textur
|
|||||||
SDL_DYNAPI_PROC(bool,SDL_GetRenderTextureAddressMode,(SDL_Renderer *a,SDL_TextureAddressMode *b,SDL_TextureAddressMode *c),(a,b,c),return)
|
SDL_DYNAPI_PROC(bool,SDL_GetRenderTextureAddressMode,(SDL_Renderer *a,SDL_TextureAddressMode *b,SDL_TextureAddressMode *c),(a,b,c),return)
|
||||||
SDL_DYNAPI_PROC(SDL_PropertiesID,SDL_GetGPUDeviceProperties,(SDL_GPUDevice *a),(a),return)
|
SDL_DYNAPI_PROC(SDL_PropertiesID,SDL_GetGPUDeviceProperties,(SDL_GPUDevice *a),(a),return)
|
||||||
SDL_DYNAPI_PROC(SDL_Renderer*,SDL_CreateGPURenderer,(SDL_Window *a,SDL_GPUShaderFormat b,SDL_GPUDevice **c),(a,b,c),return)
|
SDL_DYNAPI_PROC(SDL_Renderer*,SDL_CreateGPURenderer,(SDL_Window *a,SDL_GPUShaderFormat b,SDL_GPUDevice **c),(a,b,c),return)
|
||||||
|
SDL_DYNAPI_PROC(bool,SDL_PutAudioStreamPlanarData,(SDL_AudioStream *a,const void * const*b,int c),(a,b,c),return)
|
||||||
|
Reference in New Issue
Block a user