summaryrefslogtreecommitdiff
path: root/libavcodec/libdav1d.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/libdav1d.c')
-rw-r--r--libavcodec/libdav1d.c90
1 files changed, 29 insertions, 61 deletions
diff --git a/libavcodec/libdav1d.c b/libavcodec/libdav1d.c
index 3501c15e22..99390d527c 100644
--- a/libavcodec/libdav1d.c
+++ b/libavcodec/libdav1d.c
@@ -22,7 +22,6 @@
#include <dav1d/dav1d.h>
#include "libavutil/avassert.h"
-#include "libavutil/fifo.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
@@ -35,9 +34,9 @@ typedef struct Libdav1dContext {
AVClass *class;
Dav1dContext *c;
- AVFifoBuffer *cache;
Dav1dData data;
int tile_threads;
+ int apply_grain;
} Libdav1dContext;
static av_cold int libdav1d_init(AVCodecContext *c)
@@ -50,11 +49,8 @@ static av_cold int libdav1d_init(AVCodecContext *c)
dav1d_default_settings(&s);
s.n_tile_threads = dav1d->tile_threads;
- s.n_frame_threads = FFMIN(c->thread_count ? c->thread_count : av_cpu_count(), 256);
-
- dav1d->cache = av_fifo_alloc(8 * sizeof(AVPacket));
- if (!dav1d->cache)
- return AVERROR(ENOMEM);
+ s.apply_grain = dav1d->apply_grain;
+ s.n_frame_threads = FFMIN(c->thread_count ? c->thread_count : av_cpu_count(), DAV1D_MAX_FRAME_THREADS);
res = dav1d_open(&dav1d->c, &s);
if (res < 0)
@@ -67,23 +63,10 @@ static void libdav1d_flush(AVCodecContext *c)
{
Libdav1dContext *dav1d = c->priv_data;
- av_fifo_reset(dav1d->cache);
dav1d_data_unref(&dav1d->data);
dav1d_flush(dav1d->c);
}
-static int libdav1d_fifo_write(void *src, void *dst, int dst_size) {
- AVPacket *pkt_dst = dst, *pkt_src = src;
-
- av_assert2(dst_size >= sizeof(AVPacket));
-
- pkt_src->buf = NULL;
- av_packet_free_side_data(pkt_src);
- *pkt_dst = *pkt_src;
-
- return sizeof(AVPacket);
-}
-
static void libdav1d_data_free(const uint8_t *data, void *opaque) {
AVBufferRef *buf = opaque;
@@ -98,50 +81,38 @@ static void libdav1d_frame_free(void *opaque, uint8_t *data) {
dav1d_picture_unref(&p);
}
-static const enum AVPixelFormat pix_fmt[][2] = {
- [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10 },
- [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10 },
- [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10 },
- [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10 },
-};
-
-// TODO: Update once 12bit support is added.
-static const int profile[] = {
- [DAV1D_PIXEL_LAYOUT_I400] = FF_PROFILE_AV1_MAIN,
- [DAV1D_PIXEL_LAYOUT_I420] = FF_PROFILE_AV1_MAIN,
- [DAV1D_PIXEL_LAYOUT_I422] = FF_PROFILE_AV1_PROFESSIONAL,
- [DAV1D_PIXEL_LAYOUT_I444] = FF_PROFILE_AV1_HIGH,
+static const enum AVPixelFormat pix_fmt[][3] = {
+ [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12 },
+ [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12 },
+ [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12 },
+ [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12 },
};
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
{
Libdav1dContext *dav1d = c->priv_data;
Dav1dData *data = &dav1d->data;
- AVPacket pkt = { 0 };
Dav1dPicture p = { 0 };
int res;
if (!data->sz) {
+ AVPacket pkt = { 0 };
+
res = ff_decode_get_packet(c, &pkt);
if (res < 0 && res != AVERROR_EOF)
return res;
if (pkt.size) {
- if (!av_fifo_space(dav1d->cache)) {
- res = av_fifo_realloc2(dav1d->cache, av_fifo_size(dav1d->cache) + 8 * sizeof(pkt));
- if (res < 0) {
- av_packet_unref(&pkt);
- return res;
- }
- }
-
res = dav1d_data_wrap(data, pkt.data, pkt.size, libdav1d_data_free, pkt.buf);
if (res < 0) {
av_packet_unref(&pkt);
return res;
}
- av_fifo_generic_write(dav1d->cache, &pkt, sizeof(pkt), libdav1d_fifo_write);
+ data->m.timestamp = pkt.pts;
+
+ pkt.buf = NULL;
+ av_packet_unref(&pkt);
}
}
@@ -165,8 +136,6 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
av_assert0(p.data[0] != NULL);
- av_fifo_generic_read(dav1d->cache, &pkt, sizeof(pkt), NULL);
-
frame->buf[0] = av_buffer_create(NULL, 0, libdav1d_frame_free,
p.ref, AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0]) {
@@ -181,8 +150,8 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
frame->linesize[1] = p.stride[1];
frame->linesize[2] = p.stride[1];
- c->profile = profile[p.p.layout];
- frame->format = c->pix_fmt = pix_fmt[p.p.layout][p.p.bpc == 10];
+ c->profile = p.seq_hdr->profile;
+ frame->format = c->pix_fmt = pix_fmt[p.p.layout][p.seq_hdr->hbd];
frame->width = p.p.w;
frame->height = p.p.h;
if (c->width != p.p.w || c->height != p.p.h) {
@@ -191,7 +160,7 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
return res;
}
- switch (p.p.chr) {
+ switch (p.seq_hdr->chr) {
case DAV1D_CHR_VERTICAL:
frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT;
break;
@@ -199,22 +168,22 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
break;
}
- frame->colorspace = c->colorspace = (enum AVColorSpace) p.p.mtrx;
- frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p.p.pri;
- frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p.p.trc;
- frame->color_range = c->color_range = p.p.fullrange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+ frame->colorspace = c->colorspace = (enum AVColorSpace) p.seq_hdr->mtrx;
+ frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p.seq_hdr->pri;
+ frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p.seq_hdr->trc;
+ frame->color_range = c->color_range = p.seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
// match timestamps and packet size
- frame->pts = pkt.pts;
+ frame->pts = p.m.timestamp;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
- frame->pkt_pts = pkt.pts;
+ frame->pkt_pts = p.m.timestamp;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
- frame->pkt_dts = pkt.dts;
- frame->key_frame = p.p.type == DAV1D_FRAME_TYPE_KEY;
+ frame->pkt_dts = p.m.timestamp;
+ frame->key_frame = p.frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
- switch (p.p.type) {
+ switch (p.frame_hdr->frame_type) {
case DAV1D_FRAME_TYPE_KEY:
case DAV1D_FRAME_TYPE_INTRA:
frame->pict_type = AV_PICTURE_TYPE_I;
@@ -236,7 +205,6 @@ static av_cold int libdav1d_close(AVCodecContext *c)
{
Libdav1dContext *dav1d = c->priv_data;
- av_fifo_free(dav1d->cache);
dav1d_data_unref(&dav1d->data);
dav1d_close(&dav1d->c);
@@ -246,7 +214,8 @@ static av_cold int libdav1d_close(AVCodecContext *c)
#define OFFSET(x) offsetof(Libdav1dContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption libdav1d_options[] = {
- { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 64, VD, NULL },
+ { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, DAV1D_MAX_TILE_THREADS, VD },
+ { "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VD },
{ NULL }
};
@@ -268,8 +237,7 @@ AVCodec ff_libdav1d_decoder = {
.flush = libdav1d_flush,
.receive_frame = libdav1d_receive_frame,
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
- .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
- FF_CODEC_CAP_SETS_PKT_DTS,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_SETS_PKT_DTS,
.priv_class = &libdav1d_class,
.wrapper_name = "libdav1d",
};