summaryrefslogtreecommitdiff
path: root/libavcodec/mpeg12dec.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/mpeg12dec.c')
-rw-r--r--libavcodec/mpeg12dec.c367
1 files changed, 251 insertions, 116 deletions
diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
index 2fcb78005d..4795a1c451 100644
--- a/libavcodec/mpeg12dec.c
+++ b/libavcodec/mpeg12dec.c
@@ -1,22 +1,22 @@
/*
* MPEG-1/2 decoder
* Copyright (c) 2000, 2001 Fabrice Bellard
- * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -51,7 +51,7 @@ typedef struct Mpeg1Context {
int save_width, save_height, save_progressive_seq;
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame?
- int closed_gop; ///< GOP is closed
+ int tmpgexs;
int extradata_decoded;
} Mpeg1Context;
@@ -235,6 +235,11 @@ end:
return 0;
}
+/**
+ * Note: this function can read out of range and crash for corrupt streams.
+ * Changing this would eat up any speed benefits it has.
+ * Do not use "fast" flag if you need the code to be robust.
+ */
static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
{
int level, i, j, run;
@@ -386,6 +391,11 @@ end:
return 0;
}
+/**
+ * Note: this function can read out of range and crash for corrupt streams.
+ * Changing this would eat up any speed benefits it has.
+ * Do not use "fast" flag if you need the code to be robust.
+ */
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
int16_t *block, int n)
{
@@ -526,6 +536,11 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, in
return 0;
}
+/**
+ * Note: this function can read out of range and crash for corrupt streams.
+ * Changing this would eat up any speed benefits it has.
+ * Do not use "fast" flag if you need the code to be robust.
+ */
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
{
int level, dc, diff, j, run;
@@ -637,7 +652,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
av_dlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
- assert(s->mb_skipped == 0);
+ av_assert2(s->mb_skipped == 0);
if (s->mb_skip_run-- != 0) {
if (s->pict_type == AV_PICTURE_TYPE_P) {
@@ -650,11 +665,12 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
else
mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
- if (IS_INTRA(mb_type))
+ if (IS_INTRA(mb_type)) {
+ av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
return -1;
+ }
s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
mb_type | MB_TYPE_SKIP;
-// assert(s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1] & (MB_TYPE_16x16 | MB_TYPE_16x8));
if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
s->mb_skipped = 1;
@@ -752,11 +768,12 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
}
} else {
if (mb_type & MB_TYPE_ZERO_MV) {
- assert(mb_type & MB_TYPE_CBP);
+ av_assert2(mb_type & MB_TYPE_CBP);
s->mv_dir = MV_DIR_FORWARD;
if (s->picture_structure == PICT_FRAME) {
- if (!s->frame_pred_frame_dct)
+ if (s->picture_structure == PICT_FRAME
+ && !s->frame_pred_frame_dct)
s->interlaced_dct = get_bits1(&s->gb);
s->mv_type = MV_TYPE_16X16;
} else {
@@ -775,10 +792,10 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
} else {
- assert(mb_type & MB_TYPE_L0L1);
+ av_assert2(mb_type & MB_TYPE_L0L1);
// FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
/* get additional motion vector type */
- if (s->frame_pred_frame_dct)
+ if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct)
motion_type = MT_FRAME;
else {
motion_type = get_bits(&s->gb, 2);
@@ -852,6 +869,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
}
}
} else {
+ av_assert0(!s->progressive_sequence);
mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
for (i = 0; i < 2; i++) {
if (USES_LIST(mb_type, i)) {
@@ -868,6 +886,10 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
}
break;
case MT_DMV:
+ if(s->progressive_sequence){
+ av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
+ return -1;
+ }
s->mv_type = MV_TYPE_DMV;
for (i = 0; i < 2; i++) {
if (USES_LIST(mb_type, i)) {
@@ -934,7 +956,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
s->dsp.clear_blocks(s->block[6]);
}
if (cbp <= 0) {
- av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
+ av_log(s->avctx, AV_LOG_ERROR, "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
return -1;
}
@@ -1067,45 +1089,71 @@ static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
}
}
-static const enum AVPixelFormat pixfmt_xvmc_mpg2_420[] = {
+static const enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[] = {
+#if CONFIG_MPEG_XVMC_DECODER
AV_PIX_FMT_XVMC_MPEG2_IDCT,
AV_PIX_FMT_XVMC_MPEG2_MC,
- AV_PIX_FMT_NONE };
+#endif
+#if CONFIG_MPEG1_VDPAU_HWACCEL
+ AV_PIX_FMT_VDPAU_MPEG1,
+ AV_PIX_FMT_VDPAU,
+#endif
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE
+};
-static const enum AVPixelFormat mpeg12_hwaccel_pixfmt_list_420[] = {
+static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = {
+#if CONFIG_MPEG_XVMC_DECODER
+ AV_PIX_FMT_XVMC_MPEG2_IDCT,
+ AV_PIX_FMT_XVMC_MPEG2_MC,
+#endif
+#if CONFIG_MPEG2_VDPAU_HWACCEL
+ AV_PIX_FMT_VDPAU_MPEG2,
+ AV_PIX_FMT_VDPAU,
+#endif
#if CONFIG_MPEG2_DXVA2_HWACCEL
AV_PIX_FMT_DXVA2_VLD,
#endif
#if CONFIG_MPEG2_VAAPI_HWACCEL
AV_PIX_FMT_VAAPI_VLD,
#endif
-#if CONFIG_MPEG1_VDPAU_HWACCEL | CONFIG_MPEG2_VDPAU_HWACCEL
- AV_PIX_FMT_VDPAU,
-#endif
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
};
+static inline int uses_vdpau(AVCodecContext *avctx) {
+ return avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG1 || avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG2;
+}
+
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
{
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
- if (avctx->xvmc_acceleration)
- return avctx->get_format(avctx, pixfmt_xvmc_mpg2_420);
- else if (avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) {
- if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO)
- return AV_PIX_FMT_VDPAU_MPEG1;
- else
- return AV_PIX_FMT_VDPAU_MPEG2;
- } else {
- if (s->chroma_format < 2)
- return avctx->get_format(avctx, mpeg12_hwaccel_pixfmt_list_420);
- else if (s->chroma_format == 2)
- return AV_PIX_FMT_YUV422P;
- else
- return AV_PIX_FMT_YUV444P;
+ if(s->chroma_format < 2) {
+ return ff_thread_get_format(avctx,
+ avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO ?
+ mpeg1_hwaccel_pixfmt_list_420 :
+ mpeg2_hwaccel_pixfmt_list_420);
+ } else if(s->chroma_format == 2)
+ return AV_PIX_FMT_YUV422P;
+ else
+ return AV_PIX_FMT_YUV444P;
+}
+
+static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
+{
+ if (avctx->pix_fmt != AV_PIX_FMT_XVMC_MPEG2_IDCT && avctx->pix_fmt != AV_PIX_FMT_XVMC_MPEG2_MC) {
+ avctx->xvmc_acceleration = 0;
+ } else if (!avctx->xvmc_acceleration) {
+ avctx->xvmc_acceleration = 2;
}
+ avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
+ // until then pix_fmt may be changed right after codec init
+ if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT ||
+ avctx->hwaccel || uses_vdpau(avctx))
+ if (avctx->idct_algo == FF_IDCT_AUTO)
+ avctx->idct_algo = FF_IDCT_SIMPLE;
}
/* Call this function when we know all parameters.
@@ -1122,7 +1170,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
s1->save_width != s->width ||
s1->save_height != s->height ||
s1->save_aspect_info != s->aspect_ratio_info ||
- s1->save_progressive_seq != s->progressive_sequence ||
+ (s1->save_progressive_seq != s->progressive_sequence && (s->height&31)) ||
0)
{
@@ -1137,7 +1185,12 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
return -2;
avcodec_set_dimensions(avctx, s->width, s->height);
- avctx->bit_rate = s->bit_rate;
+ if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate) {
+ avctx->rc_max_rate = s->bit_rate;
+ } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
+ (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
+ avctx->bit_rate = s->bit_rate;
+ }
s1->save_aspect_info = s->aspect_ratio_info;
s1->save_width = s->width;
s1->save_height = s->height;
@@ -1197,13 +1250,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
} // MPEG-2
avctx->pix_fmt = mpeg_get_pixelformat(avctx);
- avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
- // until then pix_fmt may be changed right after codec init
- if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT ||
- avctx->hwaccel ||
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
- if (avctx->idct_algo == FF_IDCT_AUTO)
- avctx->idct_algo = FF_IDCT_SIMPLE;
+ setup_hwaccel_for_pixfmt(avctx);
/* Quantization matrices may need reordering
* if DCT permutation is changed. */
@@ -1237,19 +1284,22 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
return -1;
vbv_delay = get_bits(&s->gb, 16);
+ s->vbv_delay = vbv_delay;
if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[0] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3);
- if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM))
+ if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
return -1;
+ f_code += !f_code;
s->mpeg_f_code[0][0] = f_code;
s->mpeg_f_code[0][1] = f_code;
}
if (s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[1] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3);
- if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM))
+ if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
return -1;
+ f_code += !f_code;
s->mpeg_f_code[1][0] = f_code;
s->mpeg_f_code[1][1] = f_code;
}
@@ -1369,7 +1419,7 @@ static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1
return -1;
}
if (intra && i == 0 && v != 8) {
- av_log(s->avctx, AV_LOG_ERROR, "intra matrix invalid, ignoring\n");
+ av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
v = 8; // needed by pink.mpg / issue1046
}
matrix0[j] = v;
@@ -1410,6 +1460,11 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
}
+ s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
+ s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
+ s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
+ s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
+
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
s->top_field_first = get_bits1(&s->gb);
@@ -1422,28 +1477,6 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
s->chroma_420_type = get_bits1(&s->gb);
s->progressive_frame = get_bits1(&s->gb);
- if (s->progressive_sequence && !s->progressive_frame) {
- s->progressive_frame = 1;
- av_log(s->avctx, AV_LOG_ERROR, "interlaced frame in progressive sequence, ignoring\n");
- }
-
- if (s->picture_structure == 0 || (s->progressive_frame && s->picture_structure != PICT_FRAME)) {
- av_log(s->avctx, AV_LOG_ERROR, "picture_structure %d invalid, ignoring\n", s->picture_structure);
- s->picture_structure = PICT_FRAME;
- }
-
- if (s->progressive_sequence && !s->frame_pred_frame_dct) {
- av_log(s->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
- }
-
- if (s->picture_structure == PICT_FRAME) {
- s->first_field = 0;
- s->v_edge_pos = 16 * s->mb_height;
- } else {
- s->first_field ^= 1;
- s->v_edge_pos = 8 * s->mb_height;
- memset(s->mbskip_table, 0, s->mb_stride * s->mb_height);
- }
if (s->alternate_scan) {
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
@@ -1550,14 +1583,17 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
const uint8_t **buf, int buf_size)
{
AVCodecContext *avctx = s->avctx;
+ const int lowres = s->avctx->lowres;
const int field_pic = s->picture_structure != PICT_FRAME;
s->resync_mb_x =
s->resync_mb_y = -1;
- assert(mb_y < s->mb_height);
+ av_assert0(mb_y < s->mb_height);
init_get_bits(&s->gb, *buf, buf_size * 8);
+ if(s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
+ skip_bits(&s->gb, 3);
ff_mpeg1_clean_buffers(s);
s->interlaced_dct = 0;
@@ -1664,21 +1700,21 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
s->current_picture.ref_index [dir][b8_xy ] =
s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
- assert(s->field_select[dir][i] == 0 || s->field_select[dir][i] == 1);
+ av_assert2(s->field_select[dir][i] == 0 || s->field_select[dir][i] == 1);
}
xy += wrap;
b8_xy +=2;
}
}
- s->dest[0] += 16;
- s->dest[1] += 16 >> s->chroma_x_shift;
- s->dest[2] += 16 >> s->chroma_x_shift;
+ s->dest[0] += 16 >> lowres;
+ s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
+ s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
ff_MPV_decode_mb(s, s->block);
if (++s->mb_x >= s->mb_width) {
- const int mb_size = 16;
+ const int mb_size = 16 >> s->avctx->lowres;
ff_mpeg_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size);
ff_MPV_report_decode_progress(s);
@@ -1692,8 +1728,17 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
&& s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0
&& s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/;
+ if (left >= 32 && !is_d10) {
+ GetBitContext gb = s->gb;
+ align_get_bits(&gb);
+ if (show_bits(&gb, 24) == 0x060E2B) {
+ av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
+ is_d10 = 1;
+ }
+ }
+
if (left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10)
- || ((avctx->err_recognition & AV_EF_BUFFER) && left > 8)) {
+ || ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n", left, show_bits(&s->gb, FFMIN(left, 23)));
return -1;
} else
@@ -1800,7 +1845,10 @@ static int slice_decode_thread(AVCodecContext *c, void *arg)
start_code = -1;
buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
- mb_y= (start_code - SLICE_MIN_START_CODE) << field_pic;
+ mb_y= start_code - SLICE_MIN_START_CODE;
+ if(s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
+ mb_y += (*buf&0xE0)<<2;
+ mb_y <<= field_pic;
if (s->picture_structure == PICT_BOTTOM_FIELD)
mb_y++;
if (mb_y < 0 || mb_y >= s->end_mb_y)
@@ -1829,7 +1877,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
ff_xvmc_field_end(s);
/* end of slice reached */
- if (/*s->mb_y << field_pic == s->mb_height &&*/ !s->first_field) {
+ if (/*s->mb_y << field_pic == s->mb_height &&*/ !s->first_field && !s->first_slice) {
/* end of image */
ff_er_frame_end(&s->er);
@@ -1840,7 +1888,8 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
int ret = av_frame_ref(pict, &s->current_picture_ptr->f);
if (ret < 0)
return ret;
- ff_print_debug_info(s, s->current_picture_ptr);
+ ff_print_debug_info(s, s->current_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG2);
} else {
if (avctx->active_thread_type & FF_THREAD_FRAME)
s->picture_number++;
@@ -1850,7 +1899,8 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
int ret = av_frame_ref(pict, &s->last_picture_ptr->f);
if (ret < 0)
return ret;
- ff_print_debug_info(s, s->last_picture_ptr);
+ ff_print_debug_info(s, s->last_picture_ptr, pict);
+ ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG2);
}
}
@@ -1875,13 +1925,13 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
if (width == 0 || height == 0) {
av_log(avctx, AV_LOG_WARNING, "Invalid horizontal or vertical size "
"value.\n");
- if (avctx->err_recognition & AV_EF_BITSTREAM)
+ if (avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_COMPLIANT))
return AVERROR_INVALIDDATA;
}
s->aspect_ratio_info = get_bits(&s->gb, 4);
if (s->aspect_ratio_info == 0) {
av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
- if (avctx->err_recognition & AV_EF_BITSTREAM)
+ if (avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_COMPLIANT))
return -1;
}
s->frame_rate_index = get_bits(&s->gb, 4);
@@ -1927,6 +1977,7 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
s->progressive_sequence = 1;
s->progressive_frame = 1;
s->picture_structure = PICT_FRAME;
+ s->first_field = 0;
s->frame_pred_frame_dct = 1;
s->chroma_format = 1;
s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
@@ -1959,17 +2010,10 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
s->low_delay = 1;
avctx->pix_fmt = mpeg_get_pixelformat(avctx);
- avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
-
- if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel ||
- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
- if (avctx->idct_algo == FF_IDCT_AUTO)
- avctx->idct_algo = FF_IDCT_SIMPLE;
+ setup_hwaccel_for_pixfmt(avctx);
if (ff_MPV_common_init(s) < 0)
return -1;
- exchange_uv(s); // common init reset pblocks, so we swap them here
- s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
s1->mpeg_enc_ctx_allocated = 1;
for (i = 0; i < 64; i++) {
@@ -1986,9 +2030,16 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
s->progressive_sequence = 1;
s->progressive_frame = 1;
s->picture_structure = PICT_FRAME;
+ s->first_field = 0;
s->frame_pred_frame_dct = 1;
s->chroma_format = 1;
- s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
+ if (s->codec_tag == AV_RL32("BW10")) {
+ s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
+ } else {
+ exchange_uv(s); // common init reset pblocks, so we swap them here
+ s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
+ s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
+ }
s1->save_width = s->width;
s1->save_height = s->height;
s1->save_progressive_seq = s->progressive_sequence;
@@ -1999,8 +2050,22 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
static void mpeg_decode_user_data(AVCodecContext *avctx,
const uint8_t *p, int buf_size)
{
+ Mpeg1Context *s = avctx->priv_data;
const uint8_t *buf_end = p + buf_size;
+ if(buf_size > 29){
+ int i;
+ for(i=0; i<20; i++)
+ if(!memcmp(p+i, "\0TMPGEXS\0", 9)){
+ s->tmpgexs= 1;
+ }
+
+/* for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
+ av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
+ }
+ av_log(avctx, AV_LOG_ERROR, "\n");*/
+ }
+
/* we parse the DTG active format information */
if (buf_end - p >= 5 &&
p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
@@ -2023,31 +2088,26 @@ static void mpeg_decode_gop(AVCodecContext *avctx,
{
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
-
- int time_code_hours, time_code_minutes;
- int time_code_seconds, time_code_pictures;
int broken_link;
+ int64_t tc;
init_get_bits(&s->gb, buf, buf_size*8);
- skip_bits1(&s->gb); /* drop_frame_flag */
-
- time_code_hours = get_bits(&s->gb, 5);
- time_code_minutes = get_bits(&s->gb, 6);
- skip_bits1(&s->gb); // marker bit
- time_code_seconds = get_bits(&s->gb, 6);
- time_code_pictures = get_bits(&s->gb, 6);
+ tc = avctx->timecode_frame_start = get_bits(&s->gb, 25);
- s1->closed_gop = get_bits1(&s->gb);
+ s->closed_gop = get_bits1(&s->gb);
/*broken_link indicate that after editing the
reference frames of the first B-Frames after GOP I-Frame
are missing (open gop)*/
broken_link = get_bits1(&s->gb);
- if (s->avctx->debug & FF_DEBUG_PICT_INFO)
- av_log(s->avctx, AV_LOG_DEBUG, "GOP (%2d:%02d:%02d.[%02d]) closed_gop=%d broken_link=%d\n",
- time_code_hours, time_code_minutes, time_code_seconds,
- time_code_pictures, s1->closed_gop, broken_link);
+ if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
+ char tcbuf[AV_TIMECODE_STR_SIZE];
+ av_timecode_make_mpeg_tc_string(tcbuf, tc);
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "GOP (%s) closed_gop=%d broken_link=%d\n",
+ tcbuf, s->closed_gop, broken_link);
+ }
}
static int decode_chunks(AVCodecContext *avctx,
@@ -2060,6 +2120,7 @@ static int decode_chunks(AVCodecContext *avctx,
const uint8_t *buf_end = buf + buf_size;
int ret, input_size;
int last_code = 0, skip_frame = 0;
+ int picture_start_code_seen = 0;
for (;;) {
/* find next start code */
@@ -2070,6 +2131,7 @@ static int decode_chunks(AVCodecContext *avctx,
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
!avctx->hwaccel) {
int i;
+ av_assert0(avctx->thread_count > 1);
avctx->execute(avctx, slice_decode_thread, &s2->thread_context[0], NULL, s->slice_count, sizeof(void*));
for (i = 0; i < s->slice_count; i++)
@@ -2077,7 +2139,7 @@ static int decode_chunks(AVCodecContext *avctx,
}
if ((CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER)
- && avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
+ && uses_vdpau(avctx))
ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count);
ret = slice_end(avctx, picture);
@@ -2103,7 +2165,8 @@ static int decode_chunks(AVCodecContext *avctx,
case SEQ_START_CODE:
if (last_code == 0) {
mpeg1_decode_sequence(avctx, buf_ptr, input_size);
- s->sync=1;
+ if(buf != avctx->extradata)
+ s->sync=1;
} else {
av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code);
if (avctx->err_recognition & AV_EF_EXPLODE)
@@ -2112,12 +2175,24 @@ static int decode_chunks(AVCodecContext *avctx,
break;
case PICTURE_START_CODE:
+ if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
+ /* If it's a frame picture, there can't be more than one picture header.
+ Yet, it does happen and we need to handle it. */
+ av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
+ break;
+ }
+ picture_start_code_seen = 1;
+
if (s2->width <= 0 || s2->height <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
s2->width, s2->height);
return AVERROR_INVALIDDATA;
}
+ if(s->tmpgexs){
+ s2->intra_dc_precision= 3;
+ s2->intra_matrix[0]= 1;
+ }
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
!avctx->hwaccel && s->slice_count) {
int i;
@@ -2196,11 +2271,40 @@ static int decode_chunks(AVCodecContext *avctx,
break;
default:
if (start_code >= SLICE_MIN_START_CODE &&
+ start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
+
+ if (s2->progressive_sequence && !s2->progressive_frame) {
+ s2->progressive_frame = 1;
+ av_log(s2->avctx, AV_LOG_ERROR, "interlaced frame in progressive sequence, ignoring\n");
+ }
+
+ if (s2->picture_structure == 0 || (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
+ av_log(s2->avctx, AV_LOG_ERROR, "picture_structure %d invalid, ignoring\n", s2->picture_structure);
+ s2->picture_structure = PICT_FRAME;
+ }
+
+ if (s2->progressive_sequence && !s2->frame_pred_frame_dct) {
+ av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
+ }
+
+ if (s2->picture_structure == PICT_FRAME) {
+ s2->first_field = 0;
+ s2->v_edge_pos = 16 * s2->mb_height;
+ } else {
+ s2->first_field ^= 1;
+ s2->v_edge_pos = 8 * s2->mb_height;
+ memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
+ }
+ }
+ if (start_code >= SLICE_MIN_START_CODE &&
start_code <= SLICE_MAX_START_CODE && last_code != 0) {
const int field_pic = s2->picture_structure != PICT_FRAME;
- int mb_y = (start_code - SLICE_MIN_START_CODE) << field_pic;
+ int mb_y = start_code - SLICE_MIN_START_CODE;
last_code = SLICE_MIN_START_CODE;
+ if(s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
+ mb_y += (*buf_ptr&0xE0)<<2;
+ mb_y <<= field_pic;
if (s2->picture_structure == PICT_BOTTOM_FIELD)
mb_y++;
@@ -2212,13 +2316,13 @@ static int decode_chunks(AVCodecContext *avctx,
if (s2->last_picture_ptr == NULL) {
/* Skip B-frames if we do not have reference frames and gop is not closed */
if (s2->pict_type == AV_PICTURE_TYPE_B) {
- if (!s->closed_gop) {
+ if (!s2->closed_gop) {
skip_frame = 1;
break;
}
}
}
- if (s2->pict_type == AV_PICTURE_TYPE_I)
+ if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->flags2 & CODEC_FLAG2_SHOW_ALL))
s->sync=1;
if (s2->next_picture_ptr == NULL) {
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
@@ -2260,7 +2364,7 @@ static int decode_chunks(AVCodecContext *avctx,
return AVERROR_INVALIDDATA;
}
- if (avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) {
+ if (uses_vdpau(avctx)) {
s->slice_count++;
break;
}
@@ -2270,6 +2374,7 @@ static int decode_chunks(AVCodecContext *avctx,
int threshold = (s2->mb_height * s->slice_count +
s2->slice_context_count / 2) /
s2->slice_context_count;
+ av_assert0(avctx->thread_count > 1);
if (threshold <= mb_y) {
MpegEncContext *thread_context = s2->thread_context[s->slice_count];
@@ -2310,6 +2415,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
+ int ret;
int buf_size = avpkt->size;
Mpeg1Context *s = avctx->priv_data;
AVFrame *picture = data;
@@ -2337,19 +2443,32 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
return buf_size;
}
- if (s->mpeg_enc_ctx_allocated == 0 && avctx->codec_tag == AV_RL32("VCR2"))
+ s2->codec_tag = avpriv_toupper4(avctx->codec_tag);
+ if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
+ || s2->codec_tag == AV_RL32("BW10")
+ ))
vcr2_init_sequence(avctx);
s->slice_count = 0;
if (avctx->extradata && !s->extradata_decoded) {
- int ret = decode_chunks(avctx, picture, got_output, avctx->extradata, avctx->extradata_size);
+ ret = decode_chunks(avctx, picture, got_output, avctx->extradata, avctx->extradata_size);
+ if(*got_output) {
+ av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
+ *got_output = 0;
+ }
s->extradata_decoded = 1;
- if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
+ if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
+ s2->current_picture_ptr = NULL;
return ret;
+ }
}
- return decode_chunks(avctx, picture, got_output, buf, buf_size);
+ ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
+ if (ret<0 || *got_output)
+ s2->current_picture_ptr = NULL;
+
+ return ret;
}
@@ -2358,7 +2477,6 @@ static void flush(AVCodecContext *avctx)
Mpeg1Context *s = avctx->priv_data;
s->sync=0;
- s->closed_gop = 0;
ff_mpeg_flush(avctx);
}
@@ -2397,6 +2515,7 @@ AVCodec ff_mpeg1video_decoder = {
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
CODEC_CAP_SLICE_THREADS,
.flush = flush,
+ .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
.update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context)
};
@@ -2413,10 +2532,26 @@ AVCodec ff_mpeg2video_decoder = {
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
CODEC_CAP_SLICE_THREADS,
.flush = flush,
+ .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
.profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles),
};
+//legacy decoder
+AVCodec ff_mpegvideo_decoder = {
+ .name = "mpegvideo",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG2VIDEO,
+ .priv_data_size = sizeof(Mpeg1Context),
+ .init = mpeg_decode_init,
+ .close = mpeg_decode_end,
+ .decode = mpeg_decode_frame,
+ .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
+ .flush = flush,
+ .max_lowres = 3,
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
+};
+
#if CONFIG_MPEG_XVMC_DECODER
static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx)
{