diff options
Diffstat (limited to 'libavcodec/mpeg12dec.c')
-rw-r--r-- | libavcodec/mpeg12dec.c | 776 |
1 files changed, 550 insertions, 226 deletions
diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c index 532934ccba..83e537884b 100644 --- a/libavcodec/mpeg12dec.c +++ b/libavcodec/mpeg12dec.c @@ -1,22 +1,22 @@ /* * MPEG-1/2 decoder * Copyright (c) 2000, 2001 Fabrice Bellard - * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at> * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -25,9 +25,11 @@ * MPEG-1/2 decoder */ +#define UNCHECKED_BITSTREAM_READER 1 #include <inttypes.h> #include "libavutil/attributes.h" +#include "libavutil/imgutils.h" #include "libavutil/internal.h" #include "libavutil/stereo3d.h" @@ -46,6 +48,7 @@ #include "profiles.h" #include "thread.h" #include "version.h" +#include "xvmc_internal.h" typedef struct Mpeg1Context { MpegEncContext mpeg_enc_ctx; @@ -59,11 +62,11 @@ typedef struct Mpeg1Context { uint8_t afd; int has_afd; int slice_count; - int save_aspect_info; + AVRational save_aspect; int save_width, save_height, save_progressive_seq; AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */ int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */ - int closed_gop; /* GOP is closed */ + int tmpgexs; int first_slice; int extradata_decoded; } Mpeg1Context; @@ -94,13 +97,6 @@ static const uint32_t btype2mb_type[11] = { MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP, }; -static const uint8_t non_linear_qscale[32] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 10, 12, 14, 16, 18, 20, 22, - 24, 28, 32, 36, 40, 44, 48, 52, - 56, 64, 72, 80, 88, 96, 104, 112, -}; - /* as H.263, but only 17 codes */ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred) { @@ -223,6 +219,11 @@ end: return 0; } +/** + * Note: this function can read out of range and crash for corrupt streams. + * Changing this would eat up any speed benefits it has. + * Do not use "fast" flag if you need the code to be robust. + */ static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n) { @@ -394,6 +395,11 @@ end: return 0; } +/** + * Note: this function can read out of range and crash for corrupt streams. + * Changing this would eat up any speed benefits it has. + * Do not use "fast" flag if you need the code to be robust. + */ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n) { @@ -451,8 +457,9 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, } block[j] = level; - if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF) + if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF || i > 63) break; + UPDATE_CACHE(re, &s->gb); } end: @@ -490,8 +497,8 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, dc = s->last_dc[component]; dc += diff; s->last_dc[component] = dc; - block[0] = dc << (3 - s->intra_dc_precision); - ff_dlog(s->avctx, "dc=%d\n", block[0]); + block[0] = dc * (1 << (3 - s->intra_dc_precision)); + ff_tlog(s->avctx, "dc=%d\n", block[0]); mismatch = block[0] ^ 1; i = 0; if (s->intra_vlc_format) @@ -550,6 +557,11 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, return 0; } +/** + * Note: this function can read out of range and crash for corrupt streams. + * Changing this would eat up any speed benefits it has. + * Do not use "fast" flag if you need the code to be robust. + */ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n) { @@ -589,12 +601,10 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); - if (level == 127) { + if (level >= 64 || i > 63) { break; } else if (level != 0) { i += run; - if (i > MAX_INDEX) - break; j = scantable[i]; level = (level * qscale * quant_matrix[j]) >> 4; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - @@ -608,8 +618,6 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); i += run; - if (i > MAX_INDEX) - break; j = scantable[i]; if (level < 0) { level = (-level * qscale * quant_matrix[j]) >> 4; @@ -641,15 +649,6 @@ static inline int get_dmv(MpegEncContext *s) return 0; } -static inline int get_qscale(MpegEncContext *s) -{ - int qscale = get_bits(&s->gb, 5); - if (s->q_scale_type) - return non_linear_qscale[qscale]; - else - return qscale << 1; -} - /* motion type (for MPEG-2) */ #define MT_FIELD 1 #define MT_FRAME 2 @@ -662,9 +661,9 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) const int mb_block_count = 4 + (1 << s->chroma_format); int ret; - ff_dlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y); + ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y); - assert(s->mb_skipped == 0); + av_assert2(s->mb_skipped == 0); if (s->mb_skip_run-- != 0) { if (s->pict_type == AV_PICTURE_TYPE_P) { @@ -679,11 +678,12 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) else // FIXME not sure if this is allowed in MPEG at all mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; - if (IS_INTRA(mb_type)) + if (IS_INTRA(mb_type)) { + av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n"); return AVERROR_INVALIDDATA; + } s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type | MB_TYPE_SKIP; -// assert(s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1] & (MB_TYPE_16x16 | MB_TYPE_16x8)); if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0) s->mb_skipped = 1; @@ -726,7 +726,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) mb_type = btype2mb_type[mb_type]; break; } - ff_dlog(s->avctx, "mb_type=%x\n", mb_type); + ff_tlog(s->avctx, "mb_type=%x\n", mb_type); // motion_type = 0; /* avoid warning */ if (IS_INTRA(mb_type)) { s->bdsp.clear_blocks(s->block[0]); @@ -741,7 +741,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) s->interlaced_dct = get_bits1(&s->gb); if (IS_QUANT(mb_type)) - s->qscale = get_qscale(s); + s->qscale = mpeg_get_qscale(s); if (s->concealment_motion_vectors) { /* just parse them */ @@ -757,12 +757,15 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1], s->last_mv[0][0][1]); - skip_bits1(&s->gb); /* marker */ + check_marker(s->avctx, &s->gb, "after concealment_motion_vectors"); } else { /* reset mv prediction */ memset(s->last_mv, 0, sizeof(s->last_mv)); } s->mb_intra = 1; + // if 1, we memcpy blocks in xvmcvideo + if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks) + ff_xvmc_pack_pblocks(s, -1); // inter are always full blocks if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) { @@ -791,11 +794,12 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) } } else { if (mb_type & MB_TYPE_ZERO_MV) { - assert(mb_type & MB_TYPE_CBP); + av_assert2(mb_type & MB_TYPE_CBP); s->mv_dir = MV_DIR_FORWARD; if (s->picture_structure == PICT_FRAME) { - if (!s->frame_pred_frame_dct) + if (s->picture_structure == PICT_FRAME + && !s->frame_pred_frame_dct) s->interlaced_dct = get_bits1(&s->gb); s->mv_type = MV_TYPE_16X16; } else { @@ -805,7 +809,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) } if (IS_QUANT(mb_type)) - s->qscale = get_qscale(s); + s->qscale = mpeg_get_qscale(s); s->last_mv[0][0][0] = 0; s->last_mv[0][0][1] = 0; @@ -814,10 +818,10 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; } else { - assert(mb_type & MB_TYPE_L0L1); + av_assert2(mb_type & MB_TYPE_L0L1); // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED /* get additional motion vector type */ - if (s->frame_pred_frame_dct) { + if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) { motion_type = MT_FRAME; } else { motion_type = get_bits(&s->gb, 2); @@ -826,11 +830,11 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) } if (IS_QUANT(mb_type)) - s->qscale = get_qscale(s); + s->qscale = mpeg_get_qscale(s); /* motion vectors */ s->mv_dir = (mb_type >> 13) & 3; - ff_dlog(s->avctx, "motion_type=%d\n", motion_type); + ff_tlog(s->avctx, "motion_type=%d\n", motion_type); switch (motion_type) { case MT_FRAME: /* or MT_16X8 */ if (s->picture_structure == PICT_FRAME) { @@ -851,8 +855,8 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) s->last_mv[i][0][1]); /* full_pel: only for MPEG-1 */ if (s->full_pel[i]) { - s->mv[i][0][0] <<= 1; - s->mv[i][0][1] <<= 1; + s->mv[i][0][0] *= 2; + s->mv[i][0][1] *= 2; } } } @@ -887,16 +891,17 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) s->last_mv[i][j][0]); s->last_mv[i][j][0] = val; s->mv[i][j][0] = val; - ff_dlog(s->avctx, "fmx=%d\n", val); + ff_tlog(s->avctx, "fmx=%d\n", val); val = mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][j][1] >> 1); - s->last_mv[i][j][1] = val << 1; + s->last_mv[i][j][1] = 2 * val; s->mv[i][j][1] = val; - ff_dlog(s->avctx, "fmy=%d\n", val); + ff_tlog(s->avctx, "fmy=%d\n", val); } } } } else { + av_assert0(!s->progressive_sequence); mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; for (i = 0; i < 2; i++) { if (USES_LIST(mb_type, i)) { @@ -913,6 +918,10 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) } break; case MT_DMV: + if (s->progressive_sequence){ + av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n"); + return AVERROR_INVALIDDATA; + } s->mv_type = MV_TYPE_DMV; for (i = 0; i < 2; i++) { if (USES_LIST(mb_type, i)) { @@ -929,8 +938,8 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) dmy = get_dmv(s); - s->last_mv[i][0][1] = my << my_shift; - s->last_mv[i][1][1] = my << my_shift; + s->last_mv[i][0][1] = my * (1 << my_shift); + s->last_mv[i][1][1] = my * (1 << my_shift); s->mv[i][0][0] = mx; s->mv[i][0][1] = my; @@ -975,16 +984,20 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) cbp = get_vlc2(&s->gb, ff_mb_pat_vlc.table, MB_PAT_VLC_BITS, 1); if (mb_block_count > 6) { - cbp <<= mb_block_count - 6; + cbp *= 1 << mb_block_count - 6; cbp |= get_bits(&s->gb, mb_block_count - 6); s->bdsp.clear_blocks(s->block[6]); } if (cbp <= 0) { av_log(s->avctx, AV_LOG_ERROR, - "invalid cbp at %d %d\n", s->mb_x, s->mb_y); + "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } + // if 1, we memcpy blocks in xvmcvideo + if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks) + ff_xvmc_pack_pblocks(s, cbp); + if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) { for (i = 0; i < 6; i++) { @@ -1046,6 +1059,11 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx) ff_mpv_decode_defaults(s2); + if ( avctx->codec_tag != AV_RL32("VCR2") + && avctx->codec_tag != AV_RL32("BW10")) + avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input + ff_mpv_decode_init(s2, avctx); + s->mpeg_enc_ctx.avctx = avctx; /* we need some permutation to store matrices, @@ -1054,18 +1072,16 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx) ff_mpeg12_common_init(&s->mpeg_enc_ctx); ff_mpeg12_init_vlcs(); + s2->chroma_format = 1; s->mpeg_enc_ctx_allocated = 0; s->mpeg_enc_ctx.picture_number = 0; s->repeat_field = 0; s->mpeg_enc_ctx.codec_id = avctx->codec->id; avctx->color_range = AVCOL_RANGE_MPEG; - if (avctx->codec->id == AV_CODEC_ID_MPEG1VIDEO) - avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; - else - avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; return 0; } +#if HAVE_THREADS static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCodecContext *avctx_from) { @@ -1082,17 +1098,15 @@ static int mpeg_decode_update_thread_context(AVCodecContext *avctx, if (err) return err; - if (!ctx->mpeg_enc_ctx_allocated) { - // copy the whole context after the initial MpegEncContext structure - memcpy(ctx, ctx_from, sizeof(*ctx)); - memset(&ctx->mpeg_enc_ctx, 0, sizeof(ctx->mpeg_enc_ctx)); - } + if (!ctx->mpeg_enc_ctx_allocated) + memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext)); if (!(s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)) s->picture_number++; return 0; } +#endif static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, const uint8_t *new_perm) @@ -1106,7 +1120,30 @@ static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, matrix[new_perm[i]] = temp_matrix[old_perm[i]]; } -static const enum AVPixelFormat mpeg12_hwaccel_pixfmt_list_420[] = { +static const enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[] = { +#if CONFIG_MPEG1_NVDEC_HWACCEL + AV_PIX_FMT_CUDA, +#endif +#if CONFIG_MPEG1_XVMC_HWACCEL + AV_PIX_FMT_XVMC, +#endif +#if CONFIG_MPEG1_VDPAU_HWACCEL + AV_PIX_FMT_VDPAU, +#endif + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_NONE +}; + +static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = { +#if CONFIG_MPEG2_NVDEC_HWACCEL + AV_PIX_FMT_CUDA, +#endif +#if CONFIG_MPEG2_XVMC_HWACCEL + AV_PIX_FMT_XVMC, +#endif +#if CONFIG_MPEG2_VDPAU_HWACCEL + AV_PIX_FMT_VDPAU, +#endif #if CONFIG_MPEG2_DXVA2_HWACCEL AV_PIX_FMT_DXVA2_VLD, #endif @@ -1117,8 +1154,8 @@ static const enum AVPixelFormat mpeg12_hwaccel_pixfmt_list_420[] = { #if CONFIG_MPEG2_VAAPI_HWACCEL AV_PIX_FMT_VAAPI, #endif -#if CONFIG_MPEG1_VDPAU_HWACCEL | CONFIG_MPEG2_VDPAU_HWACCEL - AV_PIX_FMT_VDPAU, +#if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL + AV_PIX_FMT_VIDEOTOOLBOX, #endif AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE @@ -1140,14 +1177,34 @@ static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx) MpegEncContext *s = &s1->mpeg_enc_ctx; const enum AVPixelFormat *pix_fmts; + if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY)) + return AV_PIX_FMT_GRAY8; + if (s->chroma_format < 2) - pix_fmts = mpeg12_hwaccel_pixfmt_list_420; + pix_fmts = avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO ? + mpeg1_hwaccel_pixfmt_list_420 : + mpeg2_hwaccel_pixfmt_list_420; else if (s->chroma_format == 2) pix_fmts = mpeg12_pixfmt_list_422; else pix_fmts = mpeg12_pixfmt_list_444; - return ff_get_format(avctx, pix_fmts); + return ff_thread_get_format(avctx, pix_fmts); +} + +static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx) +{ + // until then pix_fmt may be changed right after codec init + if (avctx->hwaccel) + if (avctx->idct_algo == FF_IDCT_AUTO) + avctx->idct_algo = FF_IDCT_NONE; + + if (avctx->hwaccel && avctx->pix_fmt == AV_PIX_FMT_XVMC) { + Mpeg1Context *s1 = avctx->priv_data; + MpegEncContext *s = &s1->mpeg_enc_ctx; + + s->pack_pblocks = 1; + } } /* Call this function when we know all parameters. @@ -1159,27 +1216,84 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) uint8_t old_permutation[64]; int ret; + if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) { + // MPEG-1 aspect + AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s->aspect_ratio_info], 255); + avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num }; + } else { // MPEG-2 + // MPEG-2 aspect + if (s->aspect_ratio_info > 1) { + AVRational dar = + av_mul_q(av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], + (AVRational) { s1->pan_scan.width, + s1->pan_scan.height }), + (AVRational) { s->width, s->height }); + + /* We ignore the spec here and guess a bit as reality does not + * match the spec, see for example res_change_ffmpeg_aspect.ts + * and sequence-display-aspect.mpg. + * issue1613, 621, 562 */ + if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) || + (av_cmp_q(dar, (AVRational) { 4, 3 }) && + av_cmp_q(dar, (AVRational) { 16, 9 }))) { + s->avctx->sample_aspect_ratio = + av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], + (AVRational) { s->width, s->height }); + } else { + s->avctx->sample_aspect_ratio = + av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], + (AVRational) { s1->pan_scan.width, s1->pan_scan.height }); +// issue1613 4/3 16/9 -> 16/9 +// res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3 +// widescreen-issue562.mpg 4/3 16/9 -> 16/9 +// s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height}); + ff_dlog(avctx, "aspect A %d/%d\n", + ff_mpeg2_aspect[s->aspect_ratio_info].num, + ff_mpeg2_aspect[s->aspect_ratio_info].den); + ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num, + s->avctx->sample_aspect_ratio.den); + } + } else { + s->avctx->sample_aspect_ratio = + ff_mpeg2_aspect[s->aspect_ratio_info]; + } + } // MPEG-2 + + if (av_image_check_sar(s->width, s->height, + avctx->sample_aspect_ratio) < 0) { + av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", + avctx->sample_aspect_ratio.num, + avctx->sample_aspect_ratio.den); + avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; + } + if ((s1->mpeg_enc_ctx_allocated == 0) || avctx->coded_width != s->width || avctx->coded_height != s->height || s1->save_width != s->width || s1->save_height != s->height || - s1->save_aspect_info != s->aspect_ratio_info || - s1->save_progressive_seq != s->progressive_sequence || + av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) || + (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) || 0) { if (s1->mpeg_enc_ctx_allocated) { ParseContext pc = s->parse_context; s->parse_context.buffer = 0; ff_mpv_common_end(s); s->parse_context = pc; + s1->mpeg_enc_ctx_allocated = 0; } ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; - avctx->bit_rate = s->bit_rate; - s1->save_aspect_info = s->aspect_ratio_info; + if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate) { + avctx->rc_max_rate = s->bit_rate; + } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate && + (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) { + avctx->bit_rate = s->bit_rate; + } + s1->save_aspect = s->avctx->sample_aspect_ratio; s1->save_width = s->width; s1->save_height = s->height; s1->save_progressive_seq = s->progressive_sequence; @@ -1191,61 +1305,28 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) { // MPEG-1 fps avctx->framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index]; - // MPEG-1 aspect - avctx->sample_aspect_ratio = av_d2q(1.0 / ff_mpeg1_aspect[s->aspect_ratio_info], 255); avctx->ticks_per_frame = 1; + + avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; } else { // MPEG-2 // MPEG-2 fps av_reduce(&s->avctx->framerate.num, &s->avctx->framerate.den, - ff_mpeg12_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num * 2, + ff_mpeg12_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num, ff_mpeg12_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den, 1 << 30); avctx->ticks_per_frame = 2; - // MPEG-2 aspect - if (s->aspect_ratio_info > 1) { - AVRational dar = - av_mul_q(av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], - (AVRational) { s1->pan_scan.width, - s1->pan_scan.height }), - (AVRational) { s->width, s->height }); - /* We ignore the spec here and guess a bit as reality does not - * match the spec, see for example res_change_ffmpeg_aspect.ts - * and sequence-display-aspect.mpg. - * issue1613, 621, 562 */ - if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) || - (av_cmp_q(dar, (AVRational) { 4, 3 }) && - av_cmp_q(dar, (AVRational) { 16, 9 }))) { - s->avctx->sample_aspect_ratio = - av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], - (AVRational) { s->width, s->height }); - } else { - s->avctx->sample_aspect_ratio = - av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info], - (AVRational) { s1->pan_scan.width, s1->pan_scan.height }); -// issue1613 4/3 16/9 -> 16/9 -// res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3 -// widescreen-issue562.mpg 4/3 16/9 -> 16/9 -// s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height}); - ff_dlog(avctx, "A %d/%d\n", - ff_mpeg2_aspect[s->aspect_ratio_info].num, - ff_mpeg2_aspect[s->aspect_ratio_info].den); - ff_dlog(avctx, "B %d/%d\n", s->avctx->sample_aspect_ratio.num, - s->avctx->sample_aspect_ratio.den); - } - } else { - s->avctx->sample_aspect_ratio = - ff_mpeg2_aspect[s->aspect_ratio_info]; + switch (s->chroma_format) { + case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break; + case 2: + case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break; + default: av_assert0(0); } } // MPEG-2 - ff_set_sar(s->avctx, s->avctx->sample_aspect_ratio); - avctx->pix_fmt = mpeg_get_pixelformat(avctx); - // until then pix_fmt may be changed right after codec init - if (avctx->hwaccel && avctx->idct_algo == FF_IDCT_AUTO) - avctx->idct_algo = FF_IDCT_SIMPLE; + setup_hwaccel_for_pixfmt(avctx); /* Quantization matrices may need reordering * if DCT permutation is changed. */ @@ -1280,20 +1361,23 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, return AVERROR_INVALIDDATA; vbv_delay = get_bits(&s->gb, 16); + s->vbv_delay = vbv_delay; if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) { s->full_pel[0] = get_bits1(&s->gb); f_code = get_bits(&s->gb, 3); - if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM)) + if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT))) return AVERROR_INVALIDDATA; + f_code += !f_code; s->mpeg_f_code[0][0] = f_code; s->mpeg_f_code[0][1] = f_code; } if (s->pict_type == AV_PICTURE_TYPE_B) { s->full_pel[1] = get_bits1(&s->gb); f_code = get_bits(&s->gb, 3); - if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM)) + if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT))) return AVERROR_INVALIDDATA; + f_code += !f_code; s->mpeg_f_code[1][0] = f_code; s->mpeg_f_code[1][1] = f_code; } @@ -1320,22 +1404,19 @@ static void mpeg_decode_sequence_extension(Mpeg1Context *s1) s->avctx->level = get_bits(&s->gb, 4); s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */ s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */ + + if (!s->chroma_format) { + s->chroma_format = 1; + av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n"); + } + horiz_size_ext = get_bits(&s->gb, 2); vert_size_ext = get_bits(&s->gb, 2); s->width |= (horiz_size_ext << 12); s->height |= (vert_size_ext << 12); - - bit_rate_ext = get_bits(&s->gb, 12) << 18; - if (bit_rate_ext < INT_MAX / 400 && - bit_rate_ext * 400 < INT_MAX - s->bit_rate) { - s->bit_rate += bit_rate_ext * 400; - } else { - av_log(s->avctx, AV_LOG_WARNING, "Invalid bit rate extension value: %d\n", - bit_rate_ext >> 18); - s->bit_rate = 0; - } - - skip_bits1(&s->gb); /* marker */ + bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */ + s->bit_rate += (bit_rate_ext << 18) * 400LL; + check_marker(s->avctx, &s->gb, "after bit rate extension"); s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10; s->low_delay = get_bits1(&s->gb); @@ -1350,8 +1431,8 @@ static void mpeg_decode_sequence_extension(Mpeg1Context *s1) if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, - "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n", - s->avctx->profile, s->avctx->level, + "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n", + s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format, s->avctx->rc_buffer_size, s->bit_rate); } @@ -1426,7 +1507,7 @@ static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], return AVERROR_INVALIDDATA; } if (intra && i == 0 && v != 8) { - av_log(s->avctx, AV_LOG_ERROR, "intra matrix invalid, ignoring\n"); + av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v); v = 8; // needed by pink.mpg / issue1046 } matrix0[j] = v; @@ -1472,6 +1553,11 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1) s->current_picture.f->pict_type = s->pict_type; s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; } + s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0]; + s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1]; + s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0]; + s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1]; + s->intra_dc_precision = get_bits(&s->gb, 2); s->picture_structure = get_bits(&s->gb, 2); s->top_field_first = get_bits1(&s->gb); @@ -1484,30 +1570,6 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1) s->chroma_420_type = get_bits1(&s->gb); s->progressive_frame = get_bits1(&s->gb); - if (s->progressive_sequence && !s->progressive_frame) { - s->progressive_frame = 1; - av_log(s->avctx, AV_LOG_ERROR, - "interlaced frame in progressive sequence, ignoring\n"); - } - - if (s->picture_structure == 0 || - (s->progressive_frame && s->picture_structure != PICT_FRAME)) { - av_log(s->avctx, AV_LOG_ERROR, - "picture_structure %d invalid, ignoring\n", - s->picture_structure); - s->picture_structure = PICT_FRAME; - } - - if (s->progressive_sequence && !s->frame_pred_frame_dct) - av_log(s->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n"); - - if (s->picture_structure == PICT_FRAME) { - s->v_edge_pos = 16 * s->mb_height; - } else { - s->v_edge_pos = 8 * s->mb_height; - memset(s->mbskip_table, 0, s->mb_stride * s->mb_height); - } - if (s->alternate_scan) { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); @@ -1534,11 +1596,6 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) Mpeg1Context *s1 = (Mpeg1Context *) s; int ret; - if (s->picture_structure == PICT_FRAME) - s->first_field = 0; - else - s->first_field ^= 1; - /* start frame decoding */ if (s->first_field || s->picture_structure == PICT_FRAME) { AVFrameSideData *pan_scan; @@ -1609,9 +1666,11 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) if (s->avctx->hwaccel && (s->avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD)) { - if (s->avctx->hwaccel->end_frame(s->avctx) < 0) + if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) { av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode first field\n"); + return ret; + } } for (i = 0; i < 4; i++) { @@ -1643,20 +1702,23 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size) { AVCodecContext *avctx = s->avctx; + const int lowres = s->avctx->lowres; const int field_pic = s->picture_structure != PICT_FRAME; int ret; s->resync_mb_x = s->resync_mb_y = -1; - assert(mb_y < s->mb_height); + av_assert0(mb_y < s->mb_height); init_get_bits(&s->gb, *buf, buf_size * 8); + if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16) + skip_bits(&s->gb, 3); ff_mpeg1_clean_buffers(s); s->interlaced_dct = 0; - s->qscale = get_qscale(s); + s->qscale = mpeg_get_qscale(s); if (s->qscale == 0) { av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n"); @@ -1664,8 +1726,8 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, } /* extra slice info */ - while (get_bits1(&s->gb) != 0) - skip_bits(&s->gb, 8); + if (skip_1stop_8data_bits(&s->gb) < 0) + return AVERROR_INVALIDDATA; s->mb_x = 0; @@ -1695,7 +1757,7 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, return AVERROR_INVALIDDATA; } - if (avctx->hwaccel) { + if (avctx->hwaccel && avctx->hwaccel->decode_slice) { const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */ int start_code = -1; buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code); @@ -1735,6 +1797,10 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, } for (;;) { + // If 1, we memcpy blocks in xvmcvideo. + if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks) + ff_xvmc_init_block(s); // set s->block + if ((ret = mpeg_decode_mb(s, s->block)) < 0) return ret; @@ -1765,22 +1831,23 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, s->current_picture.motion_val[dir][xy + 1][1] = motion_y; s->current_picture.ref_index [dir][b8_xy] = s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i]; - assert(s->field_select[dir][i] == 0 || - s->field_select[dir][i] == 1); + av_assert2(s->field_select[dir][i] == 0 || + s->field_select[dir][i] == 1); } xy += wrap; b8_xy += 2; } } - s->dest[0] += 16; - s->dest[1] += 16 >> s->chroma_x_shift; - s->dest[2] += 16 >> s->chroma_x_shift; + s->dest[0] += 16 >> lowres; + s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift; + s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift; - ff_mpv_decode_mb(s, s->block); + ff_mpv_reconstruct_mb(s, s->block); if (++s->mb_x >= s->mb_width) { - const int mb_size = 16; + const int mb_size = 16 >> s->avctx->lowres; + int left; ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size); ff_mpv_report_decode_progress(s); @@ -1798,15 +1865,40 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10 */; + if (left >= 32 && !is_d10) { + GetBitContext gb = s->gb; + align_get_bits(&gb); + if (show_bits(&gb, 24) == 0x060E2B) { + av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n"); + is_d10 = 1; + } + if (left > 32 && show_bits_long(&gb, 32) == 0x201) { + av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n"); + goto eos; + } + } + if (left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) || - ((avctx->err_recognition & AV_EF_BUFFER) && left > 8)) { - av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n", - left, show_bits(&s->gb, FFMIN(left, 23))); + ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) { + av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n", + left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } else goto eos; } + // There are some files out there which are missing the last slice + // in cases where the slice is completely outside the visible + // area, we detect this here instead of running into the end expecting + // more data + left = get_bits_left(&s->gb); + if (s->mb_y >= ((s->height + 15) >> 4) && + !s->progressive_sequence && + left <= 25 && + left >= 0 && + s->mb_skip_run == -1 && + (!left || show_bits(&s->gb, left) == 0)) + goto eos; ff_init_block_index(s); } @@ -1867,13 +1959,19 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, s->mv[0][0][1] = s->last_mv[0][0][1]; s->mv[1][0][0] = s->last_mv[1][0][0]; s->mv[1][0][1] = s->last_mv[1][0][1]; + s->field_select[0][0] = (s->picture_structure - 1) & 1; + s->field_select[1][0] = (s->picture_structure - 1) & 1; } } } } eos: // end of slice + if (get_bits_left(&s->gb) < 0) { + av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb)); + return AVERROR_INVALIDDATA; + } *buf += (get_bits_count(&s->gb) - 1) / 8; - ff_dlog(s, "y %d %d %d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y); + ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y); return 0; } @@ -1913,7 +2011,10 @@ static int slice_decode_thread(AVCodecContext *c, void *arg) start_code = -1; buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code); - mb_y = (start_code - SLICE_MIN_START_CODE) << field_pic; + mb_y = start_code - SLICE_MIN_START_CODE; + if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16) + mb_y += (*buf&0xE0)<<2; + mb_y <<= field_pic; if (s->picture_structure == PICT_BOTTOM_FIELD) mb_y++; if (mb_y < 0 || mb_y >= s->end_mb_y) @@ -1934,13 +2035,16 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) return 0; if (s->avctx->hwaccel) { - if (s->avctx->hwaccel->end_frame(s->avctx) < 0) + int ret = s->avctx->hwaccel->end_frame(s->avctx); + if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n"); + return ret; + } } /* end of slice reached */ - if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field) { + if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) { /* end of image */ ff_er_frame_end(&s->er); @@ -1951,7 +2055,8 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) int ret = av_frame_ref(pict, s->current_picture_ptr->f); if (ret < 0) return ret; - ff_print_debug_info(s, s->current_picture_ptr); + ff_print_debug_info(s, s->current_picture_ptr, pict); + ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG2); } else { if (avctx->active_thread_type & FF_THREAD_FRAME) s->picture_number++; @@ -1961,7 +2066,8 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) int ret = av_frame_ref(pict, s->last_picture_ptr->f); if (ret < 0) return ret; - ff_print_debug_info(s, s->last_picture_ptr); + ff_print_debug_info(s, s->last_picture_ptr, pict); + ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG2); } } @@ -1986,28 +2092,25 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, if (width == 0 || height == 0) { av_log(avctx, AV_LOG_WARNING, "Invalid horizontal or vertical size value.\n"); - if (avctx->err_recognition & AV_EF_BITSTREAM) + if (avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_COMPLIANT)) return AVERROR_INVALIDDATA; } s->aspect_ratio_info = get_bits(&s->gb, 4); if (s->aspect_ratio_info == 0) { av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n"); - if (avctx->err_recognition & AV_EF_BITSTREAM) + if (avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_COMPLIANT)) return AVERROR_INVALIDDATA; } s->frame_rate_index = get_bits(&s->gb, 4); if (s->frame_rate_index == 0 || s->frame_rate_index > 13) { av_log(avctx, AV_LOG_WARNING, "frame_rate_index %d is invalid\n", s->frame_rate_index); - return AVERROR_INVALIDDATA; + s->frame_rate_index = 1; } - s->bit_rate = get_bits(&s->gb, 18) * 400; - if (get_bits1(&s->gb) == 0) { /* marker */ - av_log(avctx, AV_LOG_ERROR, "Marker in sequence header missing\n"); + s->bit_rate = get_bits(&s->gb, 18) * 400LL; + if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) { return AVERROR_INVALIDDATA; } - s->width = width; - s->height = height; s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16; skip_bits(&s->gb, 1); @@ -2039,21 +2142,26 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, return AVERROR_INVALIDDATA; } + s->width = width; + s->height = height; + /* We set MPEG-2 parameters so that it emulates MPEG-1. */ s->progressive_sequence = 1; s->progressive_frame = 1; s->picture_structure = PICT_FRAME; + s->first_field = 0; s->frame_pred_frame_dct = 1; s->chroma_format = 1; s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO; s->out_format = FMT_MPEG1; + s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) s->low_delay = 1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) - av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n", - s->avctx->rc_buffer_size, s->bit_rate); + av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n", + s->avctx->rc_buffer_size, s->bit_rate, s->aspect_ratio_info); return 0; } @@ -2068,6 +2176,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) s->out_format = FMT_MPEG1; if (s1->mpeg_enc_ctx_allocated) { ff_mpv_common_end(s); + s1->mpeg_enc_ctx_allocated = 0; } s->width = avctx->coded_width; s->height = avctx->coded_height; @@ -2075,9 +2184,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) s->low_delay = 1; avctx->pix_fmt = mpeg_get_pixelformat(avctx); - - if (avctx->hwaccel && avctx->idct_algo == FF_IDCT_AUTO) - avctx->idct_algo = FF_IDCT_SIMPLE; + setup_hwaccel_for_pixfmt(avctx); ff_mpv_idct_init(s); if ((ret = ff_mpv_common_init(s)) < 0) @@ -2098,9 +2205,15 @@ static int vcr2_init_sequence(AVCodecContext *avctx) s->progressive_sequence = 1; s->progressive_frame = 1; s->picture_structure = PICT_FRAME; + s->first_field = 0; s->frame_pred_frame_dct = 1; s->chroma_format = 1; - s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO; + if (s->codec_tag == AV_RL32("BW10")) { + s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO; + } else { + s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB + s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO; + } s1->save_width = s->width; s1->save_height = s->height; s1->save_progressive_seq = s->progressive_sequence; @@ -2121,13 +2234,82 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx, av_freep(&s1->a53_caption); s1->a53_caption_size = cc_count * 3; s1->a53_caption = av_malloc(s1->a53_caption_size); - if (s1->a53_caption) + if (!s1->a53_caption) { + s1->a53_caption_size = 0; + } else { memcpy(s1->a53_caption, p + 7, s1->a53_caption_size); + } + avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; + } + return 1; + } else if (buf_size >= 2 && + p[0] == 0x03 && (p[1]&0x7f) == 0x01) { + /* extract SCTE-20 CC data */ + GetBitContext gb; + int cc_count = 0; + int i; + + init_get_bits(&gb, p + 2, buf_size - 2); + cc_count = get_bits(&gb, 5); + if (cc_count > 0) { + av_freep(&s1->a53_caption); + s1->a53_caption_size = cc_count * 3; + s1->a53_caption = av_mallocz(s1->a53_caption_size); + if (!s1->a53_caption) { + s1->a53_caption_size = 0; + } else { + uint8_t field, cc1, cc2; + uint8_t *cap = s1->a53_caption; + for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) { + skip_bits(&gb, 2); // priority + field = get_bits(&gb, 2); + skip_bits(&gb, 5); // line_offset + cc1 = get_bits(&gb, 8); + cc2 = get_bits(&gb, 8); + skip_bits(&gb, 1); // marker + + if (!field) { // forbidden + cap[0] = cap[1] = cap[2] = 0x00; + } else { + field = (field == 2 ? 1 : 0); + if (!s1->mpeg_enc_ctx.top_field_first) field = !field; + cap[0] = 0x04 | field; + cap[1] = ff_reverse[cc1]; + cap[2] = ff_reverse[cc2]; + } + cap += 3; + } + } + avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; } return 1; } else if (buf_size >= 11 && p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) { - /* extract DVD CC data */ + /* extract DVD CC data + * + * uint32_t user_data_start_code 0x000001B2 (big endian) + * uint16_t user_identifier 0x4343 "CC" + * uint8_t user_data_type_code 0x01 + * uint8_t caption_block_size 0xF8 + * uint8_t + * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first + * bit 6 caption_filler 0 + * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP. + * bit 0 caption_extra_field_added 1=one additional caption word + * + * struct caption_field_block { + * uint8_t + * bit 7:1 caption_filler 0x7F (all 1s) + * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4) + * uint8_t caption_first_byte + * uint8_t caption_second_byte + * } caption_block[(caption_block_count * 2) + caption_extra_field_added]; + * + * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields + * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields. + * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start + * on the even field. There also exist DVDs in the wild that encode an odd field count and the + * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */ int cc_count = 0; int i; // There is a caption count field in the data, but it is often @@ -2139,7 +2321,9 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx, av_freep(&s1->a53_caption); s1->a53_caption_size = cc_count * 6; s1->a53_caption = av_malloc(s1->a53_caption_size); - if (s1->a53_caption) { + if (!s1->a53_caption) { + s1->a53_caption_size = 0; + } else { uint8_t field1 = !!(p[4] & 0x80); uint8_t *cap = s1->a53_caption; p += 5; @@ -2154,6 +2338,7 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx, p += 6; } } + avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; } return 1; } @@ -2163,9 +2348,25 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx, static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size) { + Mpeg1Context *s = avctx->priv_data; const uint8_t *buf_end = p + buf_size; Mpeg1Context *s1 = avctx->priv_data; +#if 0 + int i; + for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){ + av_log(avctx, AV_LOG_ERROR, "%c", p[i]); + } + av_log(avctx, AV_LOG_ERROR, "\n"); +#endif + + if (buf_size > 29){ + int i; + for(i=0; i<20; i++) + if (!memcmp(p+i, "\0TMPGEXS\0", 9)){ + s->tmpgexs= 1; + } + } /* we parse the DTG active format information */ if (buf_end - p >= 5 && p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') { @@ -2219,32 +2420,32 @@ static void mpeg_decode_gop(AVCodecContext *avctx, { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; - - int time_code_hours, time_code_minutes; - int time_code_seconds, time_code_pictures; int broken_link; + int64_t tc; init_get_bits(&s->gb, buf, buf_size * 8); - skip_bits1(&s->gb); /* drop_frame_flag */ + tc = s-> timecode_frame_start = get_bits(&s->gb, 25); - time_code_hours = get_bits(&s->gb, 5); - time_code_minutes = get_bits(&s->gb, 6); - skip_bits1(&s->gb); // marker bit - time_code_seconds = get_bits(&s->gb, 6); - time_code_pictures = get_bits(&s->gb, 6); +#if FF_API_PRIVATE_OPT +FF_DISABLE_DEPRECATION_WARNINGS + avctx->timecode_frame_start = tc; +FF_ENABLE_DEPRECATION_WARNINGS +#endif - s1->closed_gop = get_bits1(&s->gb); - /* broken_link indicate that after editing the + s->closed_gop = get_bits1(&s->gb); + /* broken_link indicates that after editing the * reference frames of the first B-Frames after GOP I-Frame * are missing (open gop) */ broken_link = get_bits1(&s->gb); - if (s->avctx->debug & FF_DEBUG_PICT_INFO) + if (s->avctx->debug & FF_DEBUG_PICT_INFO) { + char tcbuf[AV_TIMECODE_STR_SIZE]; + av_timecode_make_mpeg_tc_string(tcbuf, tc); av_log(s->avctx, AV_LOG_DEBUG, - "GOP (%2d:%02d:%02d.[%02d]) closed_gop=%d broken_link=%d\n", - time_code_hours, time_code_minutes, time_code_seconds, - time_code_pictures, s1->closed_gop, broken_link); + "GOP (%s) closed_gop=%d broken_link=%d\n", + tcbuf, s->closed_gop, broken_link); + } } static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, @@ -2256,6 +2457,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, const uint8_t *buf_end = buf + buf_size; int ret, input_size; int last_code = 0, skip_frame = 0; + int picture_start_code_seen = 0; for (;;) { /* find next start code */ @@ -2267,6 +2469,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, (avctx->active_thread_type & FF_THREAD_SLICE) && !avctx->hwaccel) { int i; + av_assert0(avctx->thread_count > 1); avctx->execute(avctx, slice_decode_thread, &s2->thread_context[0], NULL, @@ -2285,13 +2488,17 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, } } s2->pict_type = 0; + + if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count) + return AVERROR_INVALIDDATA; + return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index); } input_size = buf_end - buf_ptr; if (avctx->debug & FF_DEBUG_STARTCODE) - av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %td left %d\n", + av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n", start_code, buf_ptr - buf, input_size); /* prepare data for next start code */ @@ -2299,7 +2506,8 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, case SEQ_START_CODE: if (last_code == 0) { mpeg1_decode_sequence(avctx, buf_ptr, input_size); - s->sync = 1; + if (buf != avctx->extradata) + s->sync = 1; } else { av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code); @@ -2309,12 +2517,24 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, break; case PICTURE_START_CODE: + if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) { + /* If it's a frame picture, there can't be more than one picture header. + Yet, it does happen and we need to handle it. */ + av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n"); + break; + } + picture_start_code_seen = 1; + if (s2->width <= 0 || s2->height <= 0) { av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n", s2->width, s2->height); return AVERROR_INVALIDDATA; } + if (s->tmpgexs){ + s2->intra_dc_precision= 3; + s2->intra_matrix[0]= 1; + } if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && !avctx->hwaccel && s->slice_count) { int i; @@ -2398,14 +2618,50 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, break; default: if (start_code >= SLICE_MIN_START_CODE && + start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) { + if (s2->progressive_sequence && !s2->progressive_frame) { + s2->progressive_frame = 1; + av_log(s2->avctx, AV_LOG_ERROR, + "interlaced frame in progressive sequence, ignoring\n"); + } + + if (s2->picture_structure == 0 || + (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) { + av_log(s2->avctx, AV_LOG_ERROR, + "picture_structure %d invalid, ignoring\n", + s2->picture_structure); + s2->picture_structure = PICT_FRAME; + } + + if (s2->progressive_sequence && !s2->frame_pred_frame_dct) + av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n"); + + if (s2->picture_structure == PICT_FRAME) { + s2->first_field = 0; + s2->v_edge_pos = 16 * s2->mb_height; + } else { + s2->first_field ^= 1; + s2->v_edge_pos = 8 * s2->mb_height; + memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height); + } + } + if (start_code >= SLICE_MIN_START_CODE && start_code <= SLICE_MAX_START_CODE && last_code != 0) { const int field_pic = s2->picture_structure != PICT_FRAME; - int mb_y = (start_code - SLICE_MIN_START_CODE) << field_pic; + int mb_y = start_code - SLICE_MIN_START_CODE; last_code = SLICE_MIN_START_CODE; + if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16) + mb_y += (*buf_ptr&0xE0)<<2; + mb_y <<= field_pic; if (s2->picture_structure == PICT_BOTTOM_FIELD) mb_y++; + if (buf_end - buf_ptr < 2) { + av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n"); + return AVERROR_INVALIDDATA; + } + if (mb_y >= s2->mb_height) { av_log(s2->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", mb_y, s2->mb_height); @@ -2416,19 +2672,23 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, /* Skip B-frames if we do not have reference frames and * GOP is not closed. */ if (s2->pict_type == AV_PICTURE_TYPE_B) { - if (!s->closed_gop) { + if (!s2->closed_gop) { skip_frame = 1; + av_log(s2->avctx, AV_LOG_DEBUG, + "Skipping B slice due to open GOP\n"); break; } } } - if (s2->pict_type == AV_PICTURE_TYPE_I) + if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) s->sync = 1; if (!s2->next_picture_ptr) { /* Skip P-frames if we do not have a reference frame or * we have an invalid header. */ if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) { skip_frame = 1; + av_log(s2->avctx, AV_LOG_DEBUG, + "Skipping P slice due to !sync\n"); break; } } @@ -2475,6 +2735,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int threshold = (s2->mb_height * s->slice_count + s2->slice_context_count / 2) / s2->slice_context_count; + av_assert0(avctx->thread_count > 1); if (threshold <= mb_y) { MpegEncContext *thread_context = s2->thread_context[s->slice_count]; @@ -2517,11 +2778,11 @@ static int mpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; + int ret; int buf_size = avpkt->size; Mpeg1Context *s = avctx->priv_data; AVFrame *picture = data; MpegEncContext *s2 = &s->mpeg_enc_ctx; - ff_dlog(avctx, "fill_buffer\n"); if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) { /* special case for last picture */ @@ -2546,20 +2807,46 @@ static int mpeg_decode_frame(AVCodecContext *avctx, void *data, return buf_size; } - if (s->mpeg_enc_ctx_allocated == 0 && avctx->codec_tag == AV_RL32("VCR2")) + s2->codec_tag = avpriv_toupper4(avctx->codec_tag); + if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2") + || s2->codec_tag == AV_RL32("BW10") + )) vcr2_init_sequence(avctx); s->slice_count = 0; if (avctx->extradata && !s->extradata_decoded) { - int ret = decode_chunks(avctx, picture, got_output, - avctx->extradata, avctx->extradata_size); + ret = decode_chunks(avctx, picture, got_output, + avctx->extradata, avctx->extradata_size); + if (*got_output) { + av_log(avctx, AV_LOG_ERROR, "picture in extradata\n"); + av_frame_unref(picture); + *got_output = 0; + } s->extradata_decoded = 1; - if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) + if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) { + s2->current_picture_ptr = NULL; return ret; + } + } + + ret = decode_chunks(avctx, picture, got_output, buf, buf_size); + if (ret<0 || *got_output) { + s2->current_picture_ptr = NULL; + + if (s2->timecode_frame_start != -1 && *got_output) { + AVFrameSideData *tcside = av_frame_new_side_data(picture, + AV_FRAME_DATA_GOP_TIMECODE, + sizeof(int64_t)); + if (!tcside) + return AVERROR(ENOMEM); + memcpy(tcside->data, &s2->timecode_frame_start, sizeof(int64_t)); + + s2->timecode_frame_start = -1; + } } - return decode_chunks(avctx, picture, got_output, buf, buf_size); + return ret; } static void flush(AVCodecContext *avctx) @@ -2567,7 +2854,6 @@ static void flush(AVCodecContext *avctx) Mpeg1Context *s = avctx->priv_data; s->sync = 0; - s->closed_gop = 0; ff_mpeg_flush(avctx); } @@ -2594,12 +2880,23 @@ AVCodec ff_mpeg1video_decoder = { .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS, + .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = flush, + .max_lowres = 3, .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context), .hw_configs = (const AVCodecHWConfigInternal*[]) { +#if CONFIG_MPEG1_NVDEC_HWACCEL + HWACCEL_NVDEC(mpeg1), +#endif #if CONFIG_MPEG1_VDPAU_HWACCEL HWACCEL_VDPAU(mpeg1), #endif +#if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL + HWACCEL_VIDEOTOOLBOX(mpeg1), +#endif +#if CONFIG_MPEG1_XVMC_HWACCEL + HWACCEL_XVMC(mpeg1), +#endif NULL }, }; @@ -2616,7 +2913,9 @@ AVCodec ff_mpeg2video_decoder = { .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS, + .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = flush, + .max_lowres = 3, .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg2_video_profiles), .hw_configs = (const AVCodecHWConfigInternal*[]) { #if CONFIG_MPEG2_DXVA2_HWACCEL @@ -2628,12 +2927,37 @@ AVCodec ff_mpeg2video_decoder = { #if CONFIG_MPEG2_D3D11VA2_HWACCEL HWACCEL_D3D11VA2(mpeg2), #endif +#if CONFIG_MPEG2_NVDEC_HWACCEL + HWACCEL_NVDEC(mpeg2), +#endif #if CONFIG_MPEG2_VAAPI_HWACCEL HWACCEL_VAAPI(mpeg2), #endif #if CONFIG_MPEG2_VDPAU_HWACCEL HWACCEL_VDPAU(mpeg2), #endif +#if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL + HWACCEL_VIDEOTOOLBOX(mpeg2), +#endif +#if CONFIG_MPEG2_XVMC_HWACCEL + HWACCEL_XVMC(mpeg2), +#endif NULL }, }; + +//legacy decoder +AVCodec ff_mpegvideo_decoder = { + .name = "mpegvideo", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"), + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_MPEG2VIDEO, + .priv_data_size = sizeof(Mpeg1Context), + .init = mpeg_decode_init, + .close = mpeg_decode_end, + .decode = mpeg_decode_frame, + .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS, + .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, + .flush = flush, + .max_lowres = 3, +}; |