diff --git a/src/camera/SDL_camera.c b/src/camera/SDL_camera.c
index 533943c5c5..8009136497 100644
--- a/src/camera/SDL_camera.c
+++ b/src/camera/SDL_camera.c
@@ -852,6 +852,8 @@ SDL_bool SDL_CameraThreadIterate(SDL_CameraDevice *device)
#if DEBUG_CAMERA
SDL_Log("CAMERA: Frame is going through without conversion!");
#endif
+ output_surface->w = acquired->w;
+ output_surface->h = acquired->h;
output_surface->pixels = acquired->pixels;
output_surface->pitch = acquired->pitch;
} else { // convert/scale into a different surface.
diff --git a/src/camera/coremedia/SDL_camera_coremedia.m b/src/camera/coremedia/SDL_camera_coremedia.m
index 93ab90f1ba..b946153d3c 100644
--- a/src/camera/coremedia/SDL_camera_coremedia.m
+++ b/src/camera/coremedia/SDL_camera_coremedia.m
@@ -41,7 +41,7 @@
* com.apple.security.device.camera
*/
-static Uint32 CoreMediaFormatToSDL(FourCharCode fmt)
+static SDL_PixelFormatEnum CoreMediaFormatToSDL(FourCharCode fmt)
{
switch (fmt) {
#define CASE(x, y) case x: return y
@@ -55,6 +55,10 @@ static Uint32 CoreMediaFormatToSDL(FourCharCode fmt)
CASE(kCMPixelFormat_32BGRA, SDL_PIXELFORMAT_BGRA32);
CASE(kCMPixelFormat_422YpCbCr8, SDL_PIXELFORMAT_YUY2);
CASE(kCMPixelFormat_422YpCbCr8_yuvs, SDL_PIXELFORMAT_UYVY);
+ CASE(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, SDL_PIXELFORMAT_NV12);
+ CASE(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange, SDL_PIXELFORMAT_NV12);
+ CASE(kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange, SDL_PIXELFORMAT_P010);
+ CASE(kCVPixelFormatType_420YpCbCr10BiPlanarFullRange, SDL_PIXELFORMAT_P010);
#undef CASE
default:
#if DEBUG_CAMERA
@@ -172,6 +176,9 @@ static int COREMEDIA_AcquireFrame(SDL_CameraDevice *device, SDL_Surface *frame,
// !!! FIXME: this currently copies the data to the surface (see FIXME about non-contiguous planar surfaces, but in theory we could just keep this locked until ReleaseFrame...
CVPixelBufferLockBaseAddress(image, 0);
+ frame->w = (int)CVPixelBufferGetWidth(image);
+ frame->h = (int)CVPixelBufferGetHeight(image);
+
if ((planar == 0) && (numPlanes == 0)) {
const int pitch = (int) CVPixelBufferGetBytesPerRow(image);
const size_t buflen = pitch * frame->h;
@@ -185,22 +192,26 @@ static int COREMEDIA_AcquireFrame(SDL_CameraDevice *device, SDL_Surface *frame,
} else {
// !!! FIXME: we have an open issue in SDL3 to allow SDL_Surface to support non-contiguous planar data, but we don't have it yet.
size_t buflen = 0;
- for (int i = 0; (i < numPlanes) && (i < 3); i++) {
- buflen += CVPixelBufferGetBytesPerRowOfPlane(image, i);
+ for (int i = 0; i < numPlanes; i++) {
+ size_t plane_height = CVPixelBufferGetHeightOfPlane(image, i);
+ size_t plane_pitch = CVPixelBufferGetBytesPerRowOfPlane(image, i);
+ size_t plane_size = (plane_pitch * plane_height);
+ buflen += plane_size;
}
- buflen *= frame->h;
+ frame->pitch = (int)CVPixelBufferGetBytesPerRowOfPlane(image, 0); // this is what SDL3 currently expects
frame->pixels = SDL_aligned_alloc(SDL_GetSIMDAlignment(), buflen);
if (frame->pixels == NULL) {
retval = -1;
} else {
Uint8 *dst = frame->pixels;
- frame->pitch = (int) CVPixelBufferGetBytesPerRowOfPlane(image, 0); // this is what SDL3 currently expects, probably incorrectly.
- for (int i = 0; (i < numPlanes) && (i < 3); i++) {
+ for (int i = 0; i < numPlanes; i++) {
const void *src = CVPixelBufferGetBaseAddressOfPlane(image, i);
- const size_t pitch = CVPixelBufferGetBytesPerRowOfPlane(image, i);
- SDL_memcpy(dst, src, pitch * frame->h);
- dst += pitch * frame->h;
+ size_t plane_height = CVPixelBufferGetHeightOfPlane(image, i);
+ size_t plane_pitch = CVPixelBufferGetBytesPerRowOfPlane(image, i);
+ size_t plane_size = (plane_pitch * plane_height);
+ SDL_memcpy(dst, src, plane_size);
+ dst += plane_size;
}
}
}
@@ -241,7 +252,7 @@ static int COREMEDIA_OpenDevice(SDL_CameraDevice *device, const SDL_CameraSpec *
AVCaptureDevice *avdevice = (__bridge AVCaptureDevice *) device->handle;
// Pick format that matches the spec
- const Uint32 sdlfmt = spec->format;
+ const SDL_PixelFormatEnum sdlfmt = spec->format;
const int w = spec->width;
const int h = spec->height;
const int rate = spec->interval_denominator;
@@ -356,7 +367,7 @@ static void GatherCameraSpecs(AVCaptureDevice *device, CameraFormatAddData *add_
continue;
}
- const Uint32 sdlfmt = CoreMediaFormatToSDL(CMFormatDescriptionGetMediaSubType(fmt.formatDescription));
+ const SDL_PixelFormatEnum sdlfmt = CoreMediaFormatToSDL(CMFormatDescriptionGetMediaSubType(fmt.formatDescription));
if (sdlfmt == SDL_PIXELFORMAT_UNKNOWN) {
continue;
}
diff --git a/test/testcamera.c b/test/testcamera.c
index 71207e5e76..93fa77bb38 100644
--- a/test/testcamera.c
+++ b/test/testcamera.c
@@ -180,21 +180,6 @@ int SDL_AppEvent(void *appstate, const SDL_Event *event)
case SDL_EVENT_CAMERA_DEVICE_APPROVED:
SDL_Log("Camera approved!");
- if (SDL_GetCameraFormat(camera, &spec) < 0) {
- SDL_Log("Couldn't get camera spec: %s", SDL_GetError());
- return SDL_APP_FAILURE;
- }
-
- /* Resize the window to match */
- SDL_SetWindowSize(window, spec.width, spec.height);
-
- /* Create texture with appropriate format */
- SDL_assert(texture == NULL);
- texture = SDL_CreateTexture(renderer, spec.format, SDL_TEXTUREACCESS_STREAMING, spec.width, spec.height);
- if (!texture) {
- SDL_Log("Couldn't create texture: %s", SDL_GetError());
- return SDL_APP_FAILURE;
- }
break;
case SDL_EVENT_CAMERA_DEVICE_DENIED:
@@ -213,30 +198,48 @@ int SDL_AppIterate(void *appstate)
SDL_SetRenderDrawColor(renderer, 0x99, 0x99, 0x99, 255);
SDL_RenderClear(renderer);
- if (texture) { /* if not NULL, camera is ready to go. */
- int win_w, win_h, tw, th;
- SDL_FRect d;
- Uint64 timestampNS = 0;
- SDL_Surface *frame_next = camera ? SDL_AcquireCameraFrame(camera, ×tampNS) : NULL;
+ int win_w, win_h, tw, th;
+ SDL_FRect d;
+ Uint64 timestampNS = 0;
+ SDL_Surface *frame_next = camera ? SDL_AcquireCameraFrame(camera, ×tampNS) : NULL;
- #if 0
- if (frame_next) {
- SDL_Log("frame: %p at %" SDL_PRIu64, (void*)frame_next->pixels, timestampNS);
+ #if 0
+ if (frame_next) {
+ SDL_Log("frame: %p at %" SDL_PRIu64, (void*)frame_next->pixels, timestampNS);
+ }
+ #endif
+
+ if (frame_next) {
+ if (frame_current) {
+ if (SDL_ReleaseCameraFrame(camera, frame_current) < 0) {
+ SDL_Log("err SDL_ReleaseCameraFrame: %s", SDL_GetError());
+ }
}
- #endif
- if (frame_next) {
- if (frame_current) {
- if (SDL_ReleaseCameraFrame(camera, frame_current) < 0) {
- SDL_Log("err SDL_ReleaseCameraFrame: %s", SDL_GetError());
- }
+ /* It's not needed to keep the frame once updated the texture is updated.
+ * But in case of 0-copy, it's needed to have the frame while using the texture.
+ */
+ frame_current = frame_next;
+ texture_updated = SDL_FALSE;
+ }
+
+ if (frame_current) {
+ if (!texture ||
+ SDL_QueryTexture(texture, NULL, NULL, &tw, &th) < 0 ||
+ tw != frame_current->w || th != frame_current->h) {
+ /* Resize the window to match */
+ SDL_SetWindowSize(window, frame_current->w, frame_current->h);
+
+ if (texture) {
+ SDL_DestroyTexture(texture);
}
- /* It's not needed to keep the frame once updated the texture is updated.
- * But in case of 0-copy, it's needed to have the frame while using the texture.
- */
- frame_current = frame_next;
- texture_updated = SDL_FALSE;
+ /* Create texture with appropriate format */
+ texture = SDL_CreateTexture(renderer, frame_current->format->format, SDL_TEXTUREACCESS_STREAMING, frame_current->w, frame_current->h);
+ if (!texture) {
+ SDL_Log("Couldn't create texture: %s", SDL_GetError());
+ return SDL_APP_FAILURE;
+ }
}
/* Update SDL_Texture with last video frame (only once per new frame) */