summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorIvan Uskov <ivan.uskov@nablet.com>2015-07-20 11:07:34 -0400
committerMichael Niedermayer <michael@niedermayer.cc>2015-07-23 03:48:00 +0200
commit1acb19d12bcd259c3b2be39fb3149ced5916b56e (patch)
tree4f6b9ba92ab7378d08f54b2427dcdbdafb5abdef /libavcodec
parentf467fc02b475cd45b68aa5fb5f7c78286110ba86 (diff)
libavcodec/qsvdec_h264.c: SPS parsing is now performed by MFXVideoDECODE_DecodeHeader() in libavcodec/qsvdec.c
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/qsvdec.c80
-rw-r--r--libavcodec/qsvdec.h2
-rw-r--r--libavcodec/qsvdec_h264.c90
3 files changed, 58 insertions, 114 deletions
diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index 26467313fc..8b06611f1d 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -49,54 +49,60 @@ int ff_qsv_map_pixfmt(enum AVPixelFormat format)
}
}
-int ff_qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
+int ff_qsv_decode_init(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt)
{
mfxVideoParam param = { { 0 } };
+ mfxBitstream bs = { { { 0 } } };
int ret;
- q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
- (sizeof(mfxSyncPoint) + sizeof(QSVFrame*)));
- if (!q->async_fifo)
- return AVERROR(ENOMEM);
-
q->iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
+ if (!q->session) {
+ if (avctx->hwaccel_context) {
+ AVQSVContext *qsv = avctx->hwaccel_context;
- if (avctx->hwaccel_context) {
- AVQSVContext *qsv = avctx->hwaccel_context;
+ q->session = qsv->session;
+ q->iopattern = qsv->iopattern;
+ q->ext_buffers = qsv->ext_buffers;
+ q->nb_ext_buffers = qsv->nb_ext_buffers;
+ }
+ if (!q->session) {
+ ret = ff_qsv_init_internal_session(avctx, &q->internal_qs, NULL);
+ if (ret < 0)
+ return ret;
- q->session = qsv->session;
- q->iopattern = qsv->iopattern;
- q->ext_buffers = qsv->ext_buffers;
- q->nb_ext_buffers = qsv->nb_ext_buffers;
+ q->session = q->internal_qs.session;
+ }
}
- if (!q->session) {
- ret = ff_qsv_init_internal_session(avctx, &q->internal_qs, NULL);
- if (ret < 0)
- return ret;
- q->session = q->internal_qs.session;
- }
+ if (avpkt->size) {
+ bs.Data = avpkt->data;
+ bs.DataLength = avpkt->size;
+ bs.MaxLength = bs.DataLength;
+ bs.TimeStamp = avpkt->pts;
+ } else
+ return AVERROR_INVALIDDATA;
ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
- if (ret < 0)
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported codec_id %08x\n", avctx->codec_id);
return ret;
+ }
- param.mfx.CodecId = ret;
- param.mfx.CodecProfile = avctx->profile;
- param.mfx.CodecLevel = avctx->level;
-
- param.mfx.FrameInfo.BitDepthLuma = 8;
- param.mfx.FrameInfo.BitDepthChroma = 8;
- param.mfx.FrameInfo.Shift = 0;
- param.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
- param.mfx.FrameInfo.Width = avctx->coded_width;
- param.mfx.FrameInfo.Height = avctx->coded_height;
- param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
+ param.mfx.CodecId = ret;
+ ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, &param);
+ if (MFX_ERR_MORE_DATA==ret) {
+ return AVERROR(EAGAIN);
+ } else if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Decode header error %d\n", ret);
+ return ff_qsv_error(ret);
+ }
param.IOPattern = q->iopattern;
param.AsyncDepth = q->async_depth;
param.ExtParam = q->ext_buffers;
param.NumExtParam = q->nb_ext_buffers;
+ param.mfx.FrameInfo.BitDepthLuma = 8;
+ param.mfx.FrameInfo.BitDepthChroma = 8;
ret = MFXVideoDECODE_Init(q->session, &param);
if (ret < 0) {
@@ -104,6 +110,20 @@ int ff_qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
return ff_qsv_error(ret);
}
+ avctx->pix_fmt = AV_PIX_FMT_NV12;
+ avctx->profile = param.mfx.CodecProfile;
+ avctx->level = param.mfx.CodecLevel;
+ avctx->coded_width = param.mfx.FrameInfo.Width;
+ avctx->coded_height = param.mfx.FrameInfo.Height;
+ avctx->width = param.mfx.FrameInfo.CropW - param.mfx.FrameInfo.CropX;
+ avctx->height = param.mfx.FrameInfo.CropH - param.mfx.FrameInfo.CropY;
+
+ q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
+ (sizeof(mfxSyncPoint) + sizeof(QSVFrame*)));
+ if (!q->async_fifo)
+ return AVERROR(ENOMEM);
+
+
return 0;
}
diff --git a/libavcodec/qsvdec.h b/libavcodec/qsvdec.h
index 5b40bb1734..4d3c50563d 100644
--- a/libavcodec/qsvdec.h
+++ b/libavcodec/qsvdec.h
@@ -60,7 +60,7 @@ typedef struct QSVContext {
int ff_qsv_map_pixfmt(enum AVPixelFormat format);
-int ff_qsv_decode_init(AVCodecContext *s, QSVContext *q);
+int ff_qsv_decode_init(AVCodecContext *s, QSVContext *q, AVPacket *avpkt);
int ff_qsv_decode(AVCodecContext *s, QSVContext *q,
AVFrame *frame, int *got_frame,
diff --git a/libavcodec/qsvdec_h264.c b/libavcodec/qsvdec_h264.c
index 1e9dff13c1..8b3f916189 100644
--- a/libavcodec/qsvdec_h264.c
+++ b/libavcodec/qsvdec_h264.c
@@ -39,11 +39,6 @@ typedef struct QSVH264Context {
AVClass *class;
QSVContext qsv;
- // the internal parser and codec context for parsing the data
- AVCodecParserContext *parser;
- AVCodecContext *avctx_internal;
- enum AVPixelFormat orig_pix_fmt;
-
// the filter for converting to Annex B
AVBitStreamFilterContext *bsf;
@@ -79,8 +74,6 @@ static av_cold int qsv_decode_close(AVCodecContext *avctx)
av_fifo_free(s->packet_fifo);
av_bitstream_filter_close(s->bsf);
- av_parser_close(s->parser);
- avcodec_free_context(&s->avctx_internal);
return 0;
}
@@ -90,8 +83,6 @@ static av_cold int qsv_decode_init(AVCodecContext *avctx)
QSVH264Context *s = avctx->priv_data;
int ret;
- s->orig_pix_fmt = AV_PIX_FMT_NONE;
-
s->packet_fifo = av_fifo_alloc(sizeof(AVPacket));
if (!s->packet_fifo) {
ret = AVERROR(ENOMEM);
@@ -104,30 +95,6 @@ static av_cold int qsv_decode_init(AVCodecContext *avctx)
goto fail;
}
- s->avctx_internal = avcodec_alloc_context3(NULL);
- if (!s->avctx_internal) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- if (avctx->extradata) {
- s->avctx_internal->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!s->avctx_internal->extradata) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
- memcpy(s->avctx_internal->extradata, avctx->extradata,
- avctx->extradata_size);
- s->avctx_internal->extradata_size = avctx->extradata_size;
- }
-
- s->parser = av_parser_init(AV_CODEC_ID_H264);
- if (!s->parser) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
- s->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
-
return 0;
fail:
qsv_decode_close(avctx);
@@ -138,60 +105,18 @@ static int qsv_process_data(AVCodecContext *avctx, AVFrame *frame,
int *got_frame, AVPacket *pkt)
{
QSVH264Context *s = avctx->priv_data;
- uint8_t *dummy_data;
- int dummy_size;
int ret;
- /* we assume the packets are already split properly and want
- * just the codec parameters here */
- av_parser_parse2(s->parser, s->avctx_internal,
- &dummy_data, &dummy_size,
- pkt->data, pkt->size, pkt->pts, pkt->dts,
- pkt->pos);
-
- /* TODO: flush delayed frames on reinit */
- if (s->parser->format != s->orig_pix_fmt ||
- s->parser->coded_width != avctx->coded_width ||
- s->parser->coded_height != avctx->coded_height) {
-
- enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
- AV_PIX_FMT_NONE,
- AV_PIX_FMT_NONE };
- enum AVPixelFormat qsv_format;
-
- qsv_format = ff_qsv_map_pixfmt(s->parser->format);
- if (qsv_format < 0) {
- av_log(avctx, AV_LOG_ERROR,
- "Only 8-bit YUV420 streams are supported.\n");
- ret = AVERROR(ENOSYS);
- goto reinit_fail;
- }
-
- s->orig_pix_fmt = s->parser->format;
- avctx->pix_fmt = pix_fmts[1] = qsv_format;
- avctx->width = s->parser->width;
- avctx->height = s->parser->height;
- avctx->coded_width = s->parser->coded_width;
- avctx->coded_height = s->parser->coded_height;
- avctx->level = s->avctx_internal->level;
- avctx->profile = s->avctx_internal->profile;
-
- ret = ff_get_format(avctx, pix_fmts);
+ if (!s->qsv.session || AV_PIX_FMT_NONE==avctx->pix_fmt) {
+ ret = ff_qsv_decode_init(avctx, &s->qsv, pkt);
+ /* consume packet without a header */
+ if (AVERROR(EAGAIN)==ret)
+ return pkt->size;
if (ret < 0)
- goto reinit_fail;
-
- avctx->pix_fmt = ret;
-
- ret = ff_qsv_decode_init(avctx, &s->qsv);
- if (ret < 0)
- goto reinit_fail;
+ return ret;
}
- return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, &s->pkt_filtered);
-
-reinit_fail:
- s->orig_pix_fmt = s->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
- return ret;
+ return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, pkt);
}
static int qsv_decode_frame(AVCodecContext *avctx, void *data,
@@ -262,7 +187,6 @@ static void qsv_decode_flush(AVCodecContext *avctx)
QSVH264Context *s = avctx->priv_data;
qsv_clear_buffers(s);
- s->orig_pix_fmt = AV_PIX_FMT_NONE;
}
AVHWAccel ff_h264_qsv_hwaccel = {