summaryrefslogtreecommitdiff
path: root/libavcodec/mpegvideo_enc.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/mpegvideo_enc.c')
-rw-r--r--libavcodec/mpegvideo_enc.c471
1 files changed, 329 insertions, 142 deletions
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 90da3f465c..c84837b71a 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -5,20 +5,20 @@
*
* 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -52,6 +52,7 @@
#include "internal.h"
#include "bytestream.h"
#include <limits.h>
+#include "sp5x.h"
static int encode_picture(MpegEncContext *s, int picture_number);
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
@@ -101,8 +102,7 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
* 3444240 >= (1 << 36) / (x) >= 275 */
qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
- (ff_aanscales[i] * qscale *
- quant_matrix[j]));
+ (ff_aanscales[i] * (int64_t)qscale * quant_matrix[j]));
}
} else {
for (i = 0; i < 64; i++) {
@@ -222,6 +222,22 @@ static void MPV_encode_defaults(MpegEncContext *s)
s->picture_in_gop_number = 0;
}
+av_cold int ff_dct_encode_init(MpegEncContext *s) {
+ if (ARCH_X86)
+ ff_dct_encode_init_x86(s);
+
+ ff_h263dsp_init(&s->h263dsp);
+ if (!s->dct_quantize)
+ s->dct_quantize = ff_dct_quantize_c;
+ if (!s->denoise_dct)
+ s->denoise_dct = denoise_dct_c;
+ s->fast_dct_quantize = s->dct_quantize;
+ if (s->avctx->trellis)
+ s->dct_quantize = dct_quantize_trellis_c;
+
+ return 0;
+}
+
/* init video encoder */
av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
{
@@ -240,10 +256,13 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
}
break;
case AV_CODEC_ID_MJPEG:
+ case AV_CODEC_ID_AMV:
if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
+ avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
- avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
+ avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
+ avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
return -1;
@@ -257,6 +276,10 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
}
switch (avctx->pix_fmt) {
+ case AV_PIX_FMT_YUVJ444P:
+ case AV_PIX_FMT_YUV444P:
+ s->chroma_format = CHROMA_444;
+ break;
case AV_PIX_FMT_YUVJ422P:
case AV_PIX_FMT_YUV422P:
s->chroma_format = CHROMA_422;
@@ -273,8 +296,9 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
s->height = avctx->height;
if (avctx->gop_size > 600 &&
avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
- av_log(avctx, AV_LOG_ERROR,
- "Warning keyframe interval too large! reducing it ...\n");
+ av_log(avctx, AV_LOG_WARNING,
+ "keyframe interval too large!, reducing it from %d to %d\n",
+ avctx->gop_size, 600);
avctx->gop_size = 600;
}
s->gop_size = avctx->gop_size;
@@ -284,6 +308,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
if (avctx->max_b_frames > MAX_B_FRAMES) {
av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
"is %d.\n", MAX_B_FRAMES);
+ avctx->max_b_frames = MAX_B_FRAMES;
}
s->max_b_frames = avctx->max_b_frames;
s->codec_id = avctx->codec->id;
@@ -318,10 +343,35 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
- av_log(avctx, AV_LOG_ERROR,
- "a vbv buffer size is needed, "
- "for encoding with a maximum bitrate\n");
- return -1;
+ switch(avctx->codec_id) {
+ case AV_CODEC_ID_MPEG1VIDEO:
+ case AV_CODEC_ID_MPEG2VIDEO:
+ avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
+ break;
+ case AV_CODEC_ID_MPEG4:
+ case AV_CODEC_ID_MSMPEG4V1:
+ case AV_CODEC_ID_MSMPEG4V2:
+ case AV_CODEC_ID_MSMPEG4V3:
+ if (avctx->rc_max_rate >= 15000000) {
+ avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
+ } else if(avctx->rc_max_rate >= 2000000) {
+ avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
+ } else if(avctx->rc_max_rate >= 384000) {
+ avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
+ } else
+ avctx->rc_buffer_size = 40;
+ avctx->rc_buffer_size *= 16384;
+ break;
+ }
+ if (avctx->rc_buffer_size) {
+ av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
+ }
+ }
+
+ if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
+ av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
+ if (avctx->rc_max_rate && !avctx->rc_buffer_size)
+ return -1;
}
if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
@@ -335,7 +385,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
}
if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
- av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
+ av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
return -1;
}
@@ -397,18 +447,74 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
return -1;
}
+ if (s->max_b_frames < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "max b frames must be 0 or positive for mpegvideo based encoders\n");
+ return -1;
+ }
if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
s->codec_id == AV_CODEC_ID_H263 ||
s->codec_id == AV_CODEC_ID_H263P) &&
(avctx->sample_aspect_ratio.num > 255 ||
avctx->sample_aspect_ratio.den > 255)) {
- av_log(avctx, AV_LOG_ERROR,
- "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
+ av_log(avctx, AV_LOG_WARNING,
+ "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
+ av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
+ avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
+ }
+
+ if ((s->codec_id == AV_CODEC_ID_H263 ||
+ s->codec_id == AV_CODEC_ID_H263P) &&
+ (avctx->width > 2048 ||
+ avctx->height > 1152 )) {
+ av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
+ return -1;
+ }
+ if ((s->codec_id == AV_CODEC_ID_H263 ||
+ s->codec_id == AV_CODEC_ID_H263P) &&
+ ((avctx->width &3) ||
+ (avctx->height&3) )) {
+ av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
+ return -1;
+ }
+
+ if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
+ (avctx->width > 4095 ||
+ avctx->height > 4095 )) {
+ av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
+ return -1;
+ }
+
+ if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
+ (avctx->width > 16383 ||
+ avctx->height > 16383 )) {
+ av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
return -1;
}
+ if (s->codec_id == AV_CODEC_ID_RV10 &&
+ (avctx->width &15 ||
+ avctx->height&15 )) {
+ av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->codec_id == AV_CODEC_ID_RV20 &&
+ (avctx->width &3 ||
+ avctx->height&3 )) {
+ av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((s->codec_id == AV_CODEC_ID_WMV1 ||
+ s->codec_id == AV_CODEC_ID_WMV2) &&
+ avctx->width & 1) {
+ av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
+ return -1;
+ }
+
if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
@@ -416,7 +522,8 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
}
// FIXME mpeg2 uses that too
- if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
+ if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
+ && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
av_log(avctx, AV_LOG_ERROR,
"mpeg2 style quantization not supported by codec\n");
return -1;
@@ -466,6 +573,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
s->codec_id != AV_CODEC_ID_MPEG4 &&
s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
+ s->codec_id != AV_CODEC_ID_MJPEG &&
(s->codec_id != AV_CODEC_ID_H263P)) {
av_log(avctx, AV_LOG_ERROR,
"multi threaded encoding not supported by codec\n");
@@ -474,14 +582,17 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
if (s->avctx->thread_count < 1) {
av_log(avctx, AV_LOG_ERROR,
- "automatic thread number detection not supported by codec,"
+ "automatic thread number detection not supported by codec, "
"patch welcome\n");
return -1;
}
- if (s->avctx->thread_count > 1)
+ if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
s->rtp_mode = 1;
+ if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
+ s->h263_slice_structured = 1;
+
if (!avctx->time_base.den || !avctx->time_base.num) {
av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
return -1;
@@ -508,8 +619,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
//return -1;
}
- if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
- s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
+ if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
// (a + x * 3 / 8) / x
s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
s->inter_quant_bias = 0;
@@ -519,11 +629,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
}
+ if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
+ return AVERROR(EINVAL);
+ }
+
if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->intra_quant_bias = avctx->intra_quant_bias;
if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->inter_quant_bias = avctx->inter_quant_bias;
+ av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
+
if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
s->avctx->time_base.den > (1 << 16) - 1) {
av_log(avctx, AV_LOG_ERROR,
@@ -548,6 +665,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
s->rtp_mode = 1;
break;
case AV_CODEC_ID_MJPEG:
+ case AV_CODEC_ID_AMV:
s->out_format = FMT_MJPEG;
s->intra_only = 1; /* force intra only for jpeg */
if (!CONFIG_MJPEG_ENCODER ||
@@ -572,13 +690,13 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
break;
case AV_CODEC_ID_H263:
if (!CONFIG_H263_ENCODER)
- return -1;
+ return -1;
if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
s->width, s->height) == 8) {
- av_log(avctx, AV_LOG_INFO,
+ av_log(avctx, AV_LOG_ERROR,
"The specified picture size of %dx%d is not valid for "
"the H.263 codec.\nValid sizes are 128x96, 176x144, "
- "352x288, 704x576, and 1408x1152."
+ "352x288, 704x576, and 1408x1152. "
"Try H.263+.\n", s->width, s->height);
return -1;
}
@@ -682,9 +800,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
if (ff_MPV_common_init(s) < 0)
return -1;
- if (ARCH_X86)
- ff_MPV_encode_init_x86(s);
-
s->avctx->coded_frame = &s->current_picture.f;
if (s->msmpeg4_version) {
@@ -695,8 +810,10 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
MAX_PICTURE_COUNT * sizeof(Picture *), fail);
@@ -708,14 +825,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
2 * 64 * sizeof(uint16_t), fail);
}
- ff_h263dsp_init(&s->h263dsp);
- if (!s->dct_quantize)
- s->dct_quantize = ff_dct_quantize_c;
- if (!s->denoise_dct)
- s->denoise_dct = denoise_dct_c;
- s->fast_dct_quantize = s->dct_quantize;
- if (avctx->trellis)
- s->dct_quantize = dct_quantize_trellis_c;
+ ff_dct_encode_init(s);
if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
s->chroma_qscale_table = ff_h263_chroma_qscale_table;
@@ -822,6 +932,10 @@ av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
av_freep(&s->avctx->stats_out);
av_freep(&s->ac_stats);
+ if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
+ if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
+ s->q_chroma_intra_matrix= NULL;
+ s->q_chroma_intra_matrix16= NULL;
av_freep(&s->q_intra_matrix);
av_freep(&s->q_inter_matrix);
av_freep(&s->q_intra_matrix16);
@@ -886,18 +1000,17 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
if (pts != AV_NOPTS_VALUE) {
if (s->user_specified_pts != AV_NOPTS_VALUE) {
- int64_t time = pts;
int64_t last = s->user_specified_pts;
- if (time <= last) {
+ if (pts <= last) {
av_log(s->avctx, AV_LOG_ERROR,
- "Error, Invalid timestamp=%"PRId64", "
- "last=%"PRId64"\n", pts, s->user_specified_pts);
- return -1;
+ "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
+ pts, last);
+ return AVERROR(EINVAL);
}
if (!s->low_delay && display_picture_number == 1)
- s->dts_delta = time - last;
+ s->dts_delta = pts - last;
}
s->user_specified_pts = pts;
} else {
@@ -914,7 +1027,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
}
if (pic_arg) {
- if (!pic_arg->buf[0]);
+ if (!pic_arg->buf[0])
direct = 0;
if (pic_arg->linesize[0] != s->linesize)
direct = 0;
@@ -971,18 +1084,31 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
uint8_t *src = pic_arg->data[i];
uint8_t *dst = pic->f.data[i];
+ if (s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
+ h = ((s->height + 15)/16*16) >> v_shift;
+ }
+
if (!s->avctx->rc_buffer_size)
dst += INPLACE_OFFSET;
if (src_stride == dst_stride)
memcpy(dst, src, src_stride * h);
else {
- while (h--) {
- memcpy(dst, src, w);
- dst += dst_stride;
+ int h2 = h;
+ uint8_t *dst2 = dst;
+ while (h2--) {
+ memcpy(dst2, src, w);
+ dst2 += dst_stride;
src += src_stride;
}
}
+ if ((s->width & 15) || (s->height & 15)) {
+ s->dsp.draw_edges(dst, dst_stride,
+ w, h,
+ 16>>h_shift,
+ 16>>v_shift,
+ EDGE_BOTTOM);
+ }
}
}
}
@@ -1019,19 +1145,23 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
- switch (s->avctx->frame_skip_exp) {
+ switch (FFABS(s->avctx->frame_skip_exp)) {
case 0: score = FFMAX(score, v); break;
case 1: score += FFABS(v); break;
- case 2: score += v * v; break;
- case 3: score64 += FFABS(v * v * (int64_t)v); break;
- case 4: score64 += v * v * (int64_t)(v * v); break;
+ case 2: score64 += v * (int64_t)v; break;
+ case 3: score64 += FFABS(v * (int64_t)v * v); break;
+ case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
}
}
}
}
+ emms_c();
if (score)
score64 = score;
+ if (s->avctx->frame_skip_exp < 0)
+ score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
+ -1.0/s->avctx->frame_skip_exp);
if (score64 < s->avctx->frame_skip_threshold)
return 1;
@@ -1064,7 +1194,7 @@ static int estimate_best_b_count(MpegEncContext *s)
int64_t best_rd = INT64_MAX;
int best_b_count = -1;
- assert(scale >= 0 && scale <= 3);
+ av_assert0(scale >= 0 && scale <= 3);
//emms_c();
//s->next_picture_ptr->quality;
@@ -1174,6 +1304,19 @@ static int select_input_picture(MpegEncContext *s)
/* set next picture type & ordering */
if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
+ if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
+ if (s->picture_in_gop_number < s->gop_size &&
+ s->next_picture_ptr &&
+ skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
+ // FIXME check that te gop check above is +-1 correct
+ av_frame_unref(&s->input_picture[0]->f);
+
+ ff_vbv_update(s, 0);
+
+ goto no_output_pic;
+ }
+ }
+
if (/*s->picture_in_gop_number >= s->gop_size ||*/
s->next_picture_ptr == NULL || s->intra_only) {
s->reordered_input_picture[0] = s->input_picture[0];
@@ -1183,19 +1326,6 @@ static int select_input_picture(MpegEncContext *s)
} else {
int b_frames;
- if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
- if (s->picture_in_gop_number < s->gop_size &&
- skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
- // FIXME check that te gop check above is +-1 correct
- av_frame_unref(&s->input_picture[0]->f);
-
- emms_c();
- ff_vbv_update(s, 0);
-
- goto no_output_pic;
- }
- }
-
if (s->flags & CODEC_FLAG_PASS2) {
for (i = 0; i < s->max_b_frames + 1; i++) {
int pict_num = s->input_picture[0]->f.display_picture_number + i;
@@ -1344,23 +1474,21 @@ no_output_pic:
static void frame_end(MpegEncContext *s)
{
- int i;
-
if (s->unrestricted_mv &&
s->current_picture.reference &&
!s->intra_only) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
int hshift = desc->log2_chroma_w;
int vshift = desc->log2_chroma_h;
- s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
+ s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
s->h_edge_pos, s->v_edge_pos,
EDGE_WIDTH, EDGE_WIDTH,
EDGE_TOP | EDGE_BOTTOM);
- s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
+ s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
- s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
+ s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
@@ -1373,14 +1501,6 @@ static void frame_end(MpegEncContext *s)
if (s->pict_type!= AV_PICTURE_TYPE_B)
s->last_non_b_pict_type = s->pict_type;
- if (s->encoding) {
- /* release non-reference frames */
- for (i = 0; i < MAX_PICTURE_COUNT; i++) {
- if (!s->picture[i].reference)
- ff_mpeg_unref_picture(s, &s->picture[i]);
- }
- }
-
s->avctx->coded_frame = &s->current_picture_ptr->f;
}
@@ -1471,7 +1591,7 @@ static int frame_start(MpegEncContext *s)
}
if (s->dct_error_sum) {
- assert(s->avctx->noise_reduction && s->encoding);
+ av_assert2(s->avctx->noise_reduction && s->encoding);
update_noise_reduction(s);
}
@@ -1479,7 +1599,7 @@ static int frame_start(MpegEncContext *s)
}
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
- const AVFrame *pic_arg, int *got_packet)
+ AVFrame *pic_arg, int *got_packet)
{
MpegEncContext *s = avctx->priv_data;
int i, stuffing_count, ret;
@@ -1496,8 +1616,7 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
/* output? */
if (s->new_picture.f.data[0]) {
- if (!pkt->data &&
- (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
+ if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
return ret;
if (s->mb_info) {
s->mb_info_ptr = av_packet_new_side_data(pkt,
@@ -1594,6 +1713,7 @@ vbv_retry:
s->frame_bits = put_bits_count(&s->pb);
stuffing_count = ff_vbv_update(s, s->frame_bits);
+ s->stuffing_bits = 8*stuffing_count;
if (stuffing_count) {
if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
stuffing_count + 50) {
@@ -1648,7 +1768,7 @@ vbv_retry:
vbv_delay = FFMAX(vbv_delay, min_delay);
- assert(vbv_delay < 0xFFFF);
+ av_assert0(vbv_delay < 0xFFFF);
s->vbv_delay_ptr[0] &= 0xF8;
s->vbv_delay_ptr[0] |= vbv_delay >> 13;
@@ -1661,12 +1781,12 @@ vbv_retry:
avctx->frame_bits = s->frame_bits;
pkt->pts = s->current_picture.f.pts;
- if (!s->low_delay) {
+ if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
if (!s->current_picture.f.coded_picture_number)
pkt->dts = pkt->pts - s->dts_delta;
else
pkt->dts = s->reordered_pts;
- s->reordered_pts = s->input_picture[0]->f.pts;
+ s->reordered_pts = pkt->pts;
} else
pkt->dts = pkt->pts;
if (s->current_picture.f.key_frame)
@@ -1676,6 +1796,13 @@ vbv_retry:
} else {
s->frame_bits = 0;
}
+
+ /* release non-reference frames */
+ for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+ if (!s->picture[i].reference)
+ ff_mpeg_unref_picture(s, &s->picture[i]);
+ }
+
assert((s->frame_bits & 7) == 0);
pkt->size = s->frame_bits / 8;
@@ -1800,15 +1927,17 @@ static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
static av_always_inline void encode_mb_internal(MpegEncContext *s,
int motion_x, int motion_y,
int mb_block_height,
+ int mb_block_width,
int mb_block_count)
{
- int16_t weight[8][64];
- int16_t orig[8][64];
+ int16_t weight[12][64];
+ int16_t orig[12][64];
const int mb_x = s->mb_x;
const int mb_y = s->mb_y;
int i;
- int skip_dct[8];
+ int skip_dct[12];
int dct_offset = s->linesize * 8; // default for progressive frames
+ int uv_dct_offset = s->uvlinesize * 8;
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
ptrdiff_t wrap_y, wrap_c;
@@ -1850,12 +1979,14 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
ptr_y = s->new_picture.f.data[0] +
(mb_y * 16 * wrap_y) + mb_x * 16;
ptr_cb = s->new_picture.f.data[1] +
- (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+ (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
ptr_cr = s->new_picture.f.data[2] +
- (mb_y * mb_block_height * wrap_c) + mb_x * 8;
+ (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
- if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
+ if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
uint8_t *ebuf = s->edge_emu_buffer + 32;
+ int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
+ int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
s->vdsp.emulated_edge_mc(ebuf, ptr_y,
wrap_y, wrap_y,
16, 16, mb_x * 16, mb_y * 16,
@@ -1863,14 +1994,16 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
ptr_y = ebuf;
s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
wrap_c, wrap_c,
- 8, mb_block_height, mb_x * 8, mb_y * 8,
- s->width >> 1, s->height >> 1);
+ mb_block_width, mb_block_height,
+ mb_x * mb_block_width, mb_y * mb_block_height,
+ cw, ch);
ptr_cb = ebuf + 18 * wrap_y;
- s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
+ s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 16, ptr_cr,
wrap_c, wrap_c,
- 8, mb_block_height, mb_x * 8, mb_y * 8,
- s->width >> 1, s->height >> 1);
- ptr_cr = ebuf + 18 * wrap_y + 8;
+ mb_block_width, mb_block_height,
+ mb_x * mb_block_width, mb_y * mb_block_height,
+ cw, ch);
+ ptr_cr = ebuf + 18 * wrap_y + 16;
}
if (s->mb_intra) {
@@ -1892,8 +2025,10 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
s->interlaced_dct = 1;
dct_offset = wrap_y;
+ uv_dct_offset = wrap_c;
wrap_y <<= 1;
- if (s->chroma_format == CHROMA_422)
+ if (s->chroma_format == CHROMA_422 ||
+ s->chroma_format == CHROMA_444)
wrap_c <<= 1;
}
}
@@ -1910,11 +2045,16 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
} else {
s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
- if (!s->chroma_y_shift) { /* 422 */
- s->dsp.get_pixels(s->block[6],
- ptr_cb + (dct_offset >> 1), wrap_c);
- s->dsp.get_pixels(s->block[7],
- ptr_cr + (dct_offset >> 1), wrap_c);
+ if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
+ s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
+ s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
+ } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
+ s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
+ s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
+ s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
+ s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
+ s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
+ s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
}
}
} else {
@@ -1973,6 +2113,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
s->interlaced_dct = 1;
dct_offset = wrap_y;
+ uv_dct_offset = wrap_c;
wrap_y <<= 1;
if (s->chroma_format == CHROMA_422)
wrap_c <<= 1;
@@ -1994,10 +2135,10 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
if (!s->chroma_y_shift) { /* 422 */
- s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
- dest_cb + (dct_offset >> 1), wrap_c);
- s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
- dest_cr + (dct_offset >> 1), wrap_c);
+ s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
+ dest_cb + uv_dct_offset, wrap_c);
+ s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
+ dest_cr + uv_dct_offset, wrap_c);
}
}
/* pre quantization */
@@ -2024,12 +2165,12 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
wrap_c, 8) < 20 * s->qscale)
skip_dct[5] = 1;
if (!s->chroma_y_shift) { /* 422 */
- if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
- dest_cb + (dct_offset >> 1),
+ if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
+ dest_cb + uv_dct_offset,
wrap_c, 8) < 20 * s->qscale)
skip_dct[6] = 1;
- if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
- dest_cr + (dct_offset >> 1),
+ if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
+ dest_cr + uv_dct_offset,
wrap_c, 8) < 20 * s->qscale)
skip_dct[7] = 1;
}
@@ -2051,17 +2192,17 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
get_visual_weight(weight[5], ptr_cr , wrap_c);
if (!s->chroma_y_shift) { /* 422 */
if (!skip_dct[6])
- get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
+ get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
wrap_c);
if (!skip_dct[7])
- get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
+ get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
wrap_c);
}
memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
}
/* DCT & quantize */
- assert(s->out_format != FMT_MJPEG || s->qscale == 8);
+ av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
{
for (i = 0; i < mb_block_count; i++) {
if (!skip_dct[i]) {
@@ -2107,6 +2248,12 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
s->block_last_index[5] = 0;
s->block[4][0] =
s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
+ if (!s->chroma_y_shift) { /* 422 / 444 */
+ for (i=6; i<12; i++) {
+ s->block_last_index[i] = 0;
+ s->block[i][0] = s->block[4][0];
+ }
+ }
}
// non c quantize code returns incorrect block_last_index FIXME
@@ -2157,18 +2304,20 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
ff_h263_encode_mb(s, s->block, motion_x, motion_y);
break;
case AV_CODEC_ID_MJPEG:
+ case AV_CODEC_ID_AMV:
if (CONFIG_MJPEG_ENCODER)
ff_mjpeg_encode_mb(s, s->block);
break;
default:
- assert(0);
+ av_assert1(0);
}
}
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
{
- if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
- else encode_mb_internal(s, motion_x, motion_y, 16, 8);
+ if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
+ else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
+ else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
}
static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
@@ -2305,7 +2454,7 @@ static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, in
}
}
- assert(acc>=0);
+ av_assert2(acc>=0);
return acc;
}
@@ -2411,7 +2560,7 @@ static void write_slice_end(MpegEncContext *s){
ff_mpeg4_stuffing(&s->pb);
}else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
- ff_mjpeg_encode_stuffing(&s->pb);
+ ff_mjpeg_encode_stuffing(s);
}
avpriv_align_put_bits(&s->pb);
@@ -2500,6 +2649,11 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->current_picture.f.error[i] = 0;
}
+ if(s->codec_id==AV_CODEC_ID_AMV){
+ s->last_dc[0] = 128*8/13;
+ s->last_dc[1] = 128*8/14;
+ s->last_dc[2] = 128*8/14;
+ }
s->mb_skip_run = 0;
memset(s->last_mv, 0, sizeof(s->last_mv));
@@ -2543,7 +2697,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
if(s->data_partitioning){
if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
|| s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
- av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
return -1;
}
}
@@ -2579,6 +2733,9 @@ static int encode_thread(AVCodecContext *c, void *arg){
case AV_CODEC_ID_MPEG1VIDEO:
if(s->mb_skip_run) is_gob_start=0;
break;
+ case AV_CODEC_ID_MJPEG:
+ if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
+ break;
}
if(is_gob_start){
@@ -2590,7 +2747,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
}
}
- assert((put_bits_count(&s->pb)&7) == 0);
+ av_assert2((put_bits_count(&s->pb)&7) == 0);
current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
@@ -2797,8 +2954,9 @@ static int encode_thread(AVCodecContext *c, void *arg){
int16_t ac[6][16];
const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
static const int dquant_tab[4]={-1,1,-2,2};
+ int storecoefs = s->mb_intra && s->dc_val[0];
- assert(backup_s.dquant == 0);
+ av_assert2(backup_s.dquant == 0);
//FIXME intra
s->mv_dir= best_s.mv_dir;
@@ -2816,7 +2974,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
if(qp < s->avctx->qmin || qp > s->avctx->qmax)
continue;
backup_s.dquant= dquant;
- if(s->mb_intra && s->dc_val[0]){
+ if(storecoefs){
for(i=0; i<6; i++){
dc[i]= s->dc_val[0][ s->block_index[i] ];
memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
@@ -2826,7 +2984,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
&dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
if(best_s.qscale != qp){
- if(s->mb_intra && s->dc_val[0]){
+ if(storecoefs){
for(i=0; i<6; i++){
s->dc_val[0][ s->block_index[i] ]= dc[i];
memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
@@ -3226,6 +3384,13 @@ static int encode_picture(MpegEncContext *s, int picture_number)
update_qscale(s);
}
+ if(s->codec_id != AV_CODEC_ID_AMV){
+ if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
+ if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
+ s->q_chroma_intra_matrix = s->q_intra_matrix;
+ s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
+ }
+
s->mb_intra=0; //for the rate distortion & bit compare functions
for(i=1; i<context_count; i++){
ret = ff_update_duplicate_context(s->thread_context[i], s);
@@ -3268,6 +3433,8 @@ static int encode_picture(MpegEncContext *s, int picture_number)
s->pict_type= AV_PICTURE_TYPE_I;
for(i=0; i<s->mb_stride*s->mb_height; i++)
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
+ if(s->msmpeg4_version >= 3)
+ s->no_rounding=1;
av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
}
@@ -3346,6 +3513,25 @@ static int encode_picture(MpegEncContext *s, int picture_number)
s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
s->qscale= 8;
}
+ if(s->codec_id == AV_CODEC_ID_AMV){
+ static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
+ static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
+ for(i=1;i<64;i++){
+ int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
+
+ s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
+ s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
+ }
+ s->y_dc_scale_table= y;
+ s->c_dc_scale_table= c;
+ s->intra_matrix[0] = 13;
+ s->chroma_intra_matrix[0] = 14;
+ ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
+ s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
+ ff_convert_matrix(&s->dsp, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
+ s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
+ s->qscale= 8;
+ }
//FIXME var duplication
s->current_picture_ptr->f.key_frame =
@@ -3356,6 +3542,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
if (s->current_picture.f.key_frame)
s->picture_in_gop_number=0;
+ s->mb_x = s->mb_y = 0;
s->last_bits= put_bits_count(&s->pb);
switch(s->out_format) {
case FMT_MJPEG:
@@ -3388,7 +3575,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
ff_mpeg1_encode_picture_header(s, picture_number);
break;
default:
- assert(0);
+ av_assert0(0);
}
bits= put_bits_count(&s->pb);
s->header_bits= bits - s->last_bits;
@@ -3479,7 +3666,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
block[0] = (block[0] + (q >> 1)) / q;
start_i = 1;
last_non_zero = 0;
- qmat = s->q_intra_matrix[qscale];
+ qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
if(s->mpeg_quant || s->out_format == FMT_MPEG1)
bias= 1<<(QMAT_SHIFT-1);
length = s->intra_ac_vlc_length;
@@ -3525,7 +3712,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
// coeff[2][k]= -level+2;
}
coeff_count[i]= FFMIN(level, 2);
- assert(coeff_count[i]);
+ av_assert2(coeff_count[i]);
max |=level;
}else{
coeff[0][i]= (level>>31)|1;
@@ -3559,9 +3746,9 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
const int alevel= FFABS(level);
int unquant_coeff;
- assert(level);
+ av_assert2(level);
- if(s->out_format == FMT_H263){
+ if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
unquant_coeff= alevel*qmul + qadd;
}else{ //MPEG1
j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
@@ -3590,7 +3777,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
}
}
- if(s->out_format == FMT_H263){
+ if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
for(j=survivor_count-1; j>=0; j--){
int run= i - survivor[j];
int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
@@ -3616,7 +3803,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
}
}
- if(s->out_format == FMT_H263){
+ if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
for(j=survivor_count-1; j>=0; j--){
int run= i - survivor[j];
int score= distortion + score_tab[i-run];
@@ -3649,7 +3836,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
survivor[ survivor_count++ ]= i+1;
}
- if(s->out_format != FMT_H263){
+ if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
last_score= 256*256*256*120;
for(i= survivor[0]; i<=last_non_zero + 1; i++){
int score= score_tab[i];
@@ -3682,7 +3869,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
int alevel= FFABS(level);
int unquant_coeff, score, distortion;
- if(s->out_format == FMT_H263){
+ if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
unquant_coeff= (alevel*qmul + qadd)>>3;
}else{ //MPEG1
unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
@@ -3708,7 +3895,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
}
i= last_i;
- assert(last_level);
+ av_assert2(last_level);
block[ perm_scantable[last_non_zero] ]= last_level;
i -= last_run + 1;
@@ -3824,8 +4011,8 @@ STOP_TIMER("memset rem[]")}
weight[i] = w;
// w=weight[i] = (63*qns + (w/2)) / w;
- assert(w>0);
- assert(w<(1<<6));
+ av_assert2(w>0);
+ av_assert2(w<(1<<6));
sum += w*w;
}
lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
@@ -3891,7 +4078,7 @@ STOP_TIMER("dct")}
const int level= block[0];
int change, old_coeff;
- assert(s->mb_intra);
+ av_assert2(s->mb_intra);
old_coeff= q*level;
@@ -3934,7 +4121,7 @@ STOP_TIMER("dct")}
}else{
old_coeff=0;
run2--;
- assert(run2>=0 || i >= last_non_zero );
+ av_assert2(run2>=0 || i >= last_non_zero );
}
for(change=-1; change<=1; change+=2){
@@ -3962,7 +4149,7 @@ STOP_TIMER("dct")}
- last_length[UNI_AC_ENC_INDEX(run, level+64)];
}
}else{
- assert(FFABS(new_level)==1);
+ av_assert2(FFABS(new_level)==1);
if(analyze_gradient){
int g= d1[ scantable[i] ];
@@ -3995,7 +4182,7 @@ STOP_TIMER("dct")}
}
}else{
new_coeff=0;
- assert(FFABS(level)==1);
+ av_assert2(FFABS(level)==1);
if(i < last_non_zero){
int next_i= i + run2 + 1;
@@ -4024,7 +4211,7 @@ STOP_TIMER("dct")}
score *= lambda;
unquant_change= new_coeff - old_coeff;
- assert((score < 100*lambda && score > -100*lambda) || lambda==0);
+ av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
if(score<best_score){
@@ -4055,7 +4242,7 @@ STOP_TIMER("iterative step")}
if(best_coeff > last_non_zero){
last_non_zero= best_coeff;
- assert(block[j]);
+ av_assert2(block[j]);
#ifdef REFINE_STATS
after_last++;
#endif
@@ -4083,7 +4270,7 @@ if(block[j]){
#ifdef REFINE_STATS
count++;
if(256*256*256*64 % count == 0){
- printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
+ av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
}
#endif
run=0;
@@ -4146,7 +4333,7 @@ int ff_dct_quantize_c(MpegEncContext *s,
block[0] = (block[0] + (q >> 1)) / q;
start_i = 1;
last_non_zero = 0;
- qmat = s->q_intra_matrix[qscale];
+ qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
} else {
start_i = 0;