summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2024-02-11 19:46:17 +0100
committerAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2024-03-05 08:59:26 +0100
commita62c8558ae1604304ae8ed94e86a98954c015592 (patch)
tree67a5013395c46b645f23c828b82cf3d0ffec0d65
parent44e5749462e0b7f804b1844206047404ee3e7526 (diff)
avutil/hwcontext_dxva2: Allocate pub and priv frames hwctx together
This is possible because the lifetime of both coincide. Besides reducing the number of allocations this also simplifies access to DXVA2FramesContext as one no longer has to go through AVHWFramesInternal. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
-rw-r--r--libavutil/hwcontext_dxva2.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/libavutil/hwcontext_dxva2.c b/libavutil/hwcontext_dxva2.c
index 2c11f151ff..0922776342 100644
--- a/libavutil/hwcontext_dxva2.c
+++ b/libavutil/hwcontext_dxva2.c
@@ -57,6 +57,11 @@ typedef struct DXVA2Mapping {
} DXVA2Mapping;
typedef struct DXVA2FramesContext {
+ /**
+ * The public AVDXVA2FramesContext. See hwcontext_dxva2.h for it.
+ */
+ AVDXVA2FramesContext p;
+
IDirect3DSurface9 **surfaces_internal;
int nb_surfaces_used;
@@ -99,8 +104,8 @@ DEFINE_GUID(video_processor_service, 0xfc51a552, 0xd5e7, 0x11d9, 0xaf, 0x55, 0x0
static void dxva2_frames_uninit(AVHWFramesContext *ctx)
{
AVDXVA2DeviceContext *device_hwctx = ctx->device_ctx->hwctx;
- AVDXVA2FramesContext *frames_hwctx = ctx->hwctx;
- DXVA2FramesContext *s = ctx->internal->priv;
+ DXVA2FramesContext *s = ctx->hwctx;
+ AVDXVA2FramesContext *frames_hwctx = &s->p;
int i;
if (frames_hwctx->decoder_to_release)
@@ -135,8 +140,8 @@ static void dxva2_pool_release_dummy(void *opaque, uint8_t *data)
static AVBufferRef *dxva2_pool_alloc(void *opaque, size_t size)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;
- DXVA2FramesContext *s = ctx->internal->priv;
- AVDXVA2FramesContext *hwctx = ctx->hwctx;
+ DXVA2FramesContext *s = ctx->hwctx;
+ AVDXVA2FramesContext *hwctx = &s->p;
if (s->nb_surfaces_used < hwctx->nb_surfaces) {
s->nb_surfaces_used++;
@@ -149,9 +154,9 @@ static AVBufferRef *dxva2_pool_alloc(void *opaque, size_t size)
static int dxva2_init_pool(AVHWFramesContext *ctx)
{
- AVDXVA2FramesContext *frames_hwctx = ctx->hwctx;
AVDXVA2DeviceContext *device_hwctx = ctx->device_ctx->hwctx;
- DXVA2FramesContext *s = ctx->internal->priv;
+ DXVA2FramesContext *s = ctx->hwctx;
+ AVDXVA2FramesContext *frames_hwctx = &s->p;
int decode = (frames_hwctx->surface_type == DXVA2_VideoDecoderRenderTarget);
int i;
@@ -216,8 +221,8 @@ static int dxva2_init_pool(AVHWFramesContext *ctx)
static int dxva2_frames_init(AVHWFramesContext *ctx)
{
- AVDXVA2FramesContext *hwctx = ctx->hwctx;
- DXVA2FramesContext *s = ctx->internal->priv;
+ DXVA2FramesContext *s = ctx->hwctx;
+ AVDXVA2FramesContext *hwctx = &s->p;
int ret;
if (hwctx->surface_type != DXVA2_VideoDecoderRenderTarget &&
@@ -586,8 +591,7 @@ const HWContextType ff_hwcontext_type_dxva2 = {
.name = "DXVA2",
.device_hwctx_size = sizeof(AVDXVA2DeviceContext),
- .frames_hwctx_size = sizeof(AVDXVA2FramesContext),
- .frames_priv_size = sizeof(DXVA2FramesContext),
+ .frames_hwctx_size = sizeof(DXVA2FramesContext),
.device_create = dxva2_device_create,
.frames_init = dxva2_frames_init,