From 835f798c7d20bca89eb4f3593846251ad0d84e4b Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Sun, 10 Aug 2014 08:25:12 -0700 Subject: mpegvideo: cosmetics: Lowercase ugly uppercase MPV_ function name prefixes --- libavcodec/arm/mpegvideo_arm.c | 4 +-- libavcodec/arm/mpegvideo_arm.h | 2 +- libavcodec/arm/mpegvideo_armv5te.c | 2 +- libavcodec/flvenc.c | 6 ++-- libavcodec/h261dec.c | 16 +++++----- libavcodec/h261enc.c | 6 ++-- libavcodec/h263dec.c | 24 +++++++-------- libavcodec/h264_slice.c | 2 +- libavcodec/intrax8.c | 6 ++-- libavcodec/mjpegenc.c | 6 ++-- libavcodec/mpeg12dec.c | 22 +++++++------- libavcodec/mpeg12enc.c | 10 +++---- libavcodec/mpeg4videoenc.c | 6 ++-- libavcodec/mpegvideo.c | 57 ++++++++++++++++++------------------ libavcodec/mpegvideo.h | 47 ++++++++++++++++------------- libavcodec/mpegvideo_enc.c | 60 +++++++++++++++++++------------------- libavcodec/mpegvideo_motion.c | 8 ++--- libavcodec/mpegvideo_xvmc.c | 2 +- libavcodec/mss2.c | 6 ++-- libavcodec/neon/mpegvideo.c | 2 +- libavcodec/ppc/mpegvideo_altivec.c | 2 +- libavcodec/rv10.c | 18 ++++++------ libavcodec/rv10enc.c | 6 ++-- libavcodec/rv20enc.c | 6 ++-- libavcodec/rv34.c | 24 +++++++-------- libavcodec/svq1enc.c | 4 +-- libavcodec/vc1dec.c | 8 ++--- libavcodec/wmv2enc.c | 6 ++-- libavcodec/x86/mpegvideo.c | 2 +- libavcodec/x86/mpegvideoenc.c | 2 +- 30 files changed, 190 insertions(+), 182 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/arm/mpegvideo_arm.c b/libavcodec/arm/mpegvideo_arm.c index 7567127078..34e9cf18b5 100644 --- a/libavcodec/arm/mpegvideo_arm.c +++ b/libavcodec/arm/mpegvideo_arm.c @@ -40,12 +40,12 @@ void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, int16_t *block, void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block, int n, int qscale); -av_cold void ff_MPV_common_init_arm(MpegEncContext *s) +av_cold void ff_mpv_common_init_arm(MpegEncContext *s) { int cpu_flags = av_get_cpu_flags(); if (have_armv5te(cpu_flags)) - ff_MPV_common_init_armv5te(s); + ff_mpv_common_init_armv5te(s); if (have_neon(cpu_flags)) { s->dct_unquantize_h263_intra = ff_dct_unquantize_h263_intra_neon; diff --git a/libavcodec/arm/mpegvideo_arm.h b/libavcodec/arm/mpegvideo_arm.h index 226ba690bb..17e3a5b024 100644 --- a/libavcodec/arm/mpegvideo_arm.h +++ b/libavcodec/arm/mpegvideo_arm.h @@ -21,6 +21,6 @@ #include "libavcodec/mpegvideo.h" -void ff_MPV_common_init_armv5te(MpegEncContext *s); +void ff_mpv_common_init_armv5te(MpegEncContext *s); #endif /* AVCODEC_ARM_MPEGVIDEO_ARM_H */ diff --git a/libavcodec/arm/mpegvideo_armv5te.c b/libavcodec/arm/mpegvideo_armv5te.c index 2066cbc425..3c44cd80b5 100644 --- a/libavcodec/arm/mpegvideo_armv5te.c +++ b/libavcodec/arm/mpegvideo_armv5te.c @@ -94,7 +94,7 @@ static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s, ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1); } -av_cold void ff_MPV_common_init_armv5te(MpegEncContext *s) +av_cold void ff_mpv_common_init_armv5te(MpegEncContext *s) { s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te; diff --git a/libavcodec/flvenc.c b/libavcodec/flvenc.c index fbdb23d79b..4d5eb1dd9b 100644 --- a/libavcodec/flvenc.c +++ b/libavcodec/flvenc.c @@ -92,9 +92,9 @@ AVCodec ff_flv_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_FLV1, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &flv_class, }; diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c index 6b358a6d5c..eefb5d33f4 100644 --- a/libavcodec/h261dec.c +++ b/libavcodec/h261dec.c @@ -74,7 +74,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx) MpegEncContext *const s = &h->s; // set defaults - ff_MPV_decode_defaults(s); + ff_mpv_decode_defaults(s); s->avctx = avctx; s->width = s->avctx->coded_width; s->height = s->avctx->coded_height; @@ -218,7 +218,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2) s->mb_skipped = 1; h->mtype &= ~MB_TYPE_H261_FIL; - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); } return 0; @@ -445,7 +445,7 @@ intra: s->block_last_index[i] = -1; } - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); return SLICE_OK; } @@ -596,12 +596,12 @@ retry: if (s->width != avctx->coded_width || s->height != avctx->coded_height) { ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat s->parse_context.buffer = 0; - ff_MPV_common_end(s); + ff_mpv_common_end(s); s->parse_context = pc; } if (!s->context_initialized) - if ((ret = ff_MPV_common_init(s)) < 0) + if ((ret = ff_mpv_common_init(s)) < 0) return ret; if (!s->context_initialized) { @@ -621,7 +621,7 @@ retry: avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); - if (ff_MPV_frame_start(s, avctx) < 0) + if (ff_mpv_frame_start(s, avctx) < 0) return -1; ff_mpeg_er_frame_start(s); @@ -635,7 +635,7 @@ retry: break; h261_decode_gob(h); } - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); assert(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type); assert(s->current_picture.f->pict_type == s->pict_type); @@ -654,7 +654,7 @@ static av_cold int h261_decode_end(AVCodecContext *avctx) H261Context *h = avctx->priv_data; MpegEncContext *s = &h->s; - ff_MPV_common_end(s); + ff_mpv_common_end(s); return 0; } diff --git a/libavcodec/h261enc.c b/libavcodec/h261enc.c index 5f0baba465..f24e590f92 100644 --- a/libavcodec/h261enc.c +++ b/libavcodec/h261enc.c @@ -331,9 +331,9 @@ AVCodec ff_h261_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H261, .priv_data_size = sizeof(H261Context), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &h261_class, diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c index c4db99e6c3..a9cdb9e37d 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c @@ -52,7 +52,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx) s->workaround_bugs = avctx->workaround_bugs; // set defaults - ff_MPV_decode_defaults(s); + ff_mpv_decode_defaults(s); s->quant_precision = 5; s->decode_mb = ff_h263_decode_mb; s->low_delay = 1; @@ -115,7 +115,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx) if (avctx->codec->id != AV_CODEC_ID_H263 && avctx->codec->id != AV_CODEC_ID_MPEG4) { ff_mpv_idct_init(s); - if ((ret = ff_MPV_common_init(s)) < 0) + if ((ret = ff_mpv_common_init(s)) < 0) return ret; } @@ -130,7 +130,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; - ff_MPV_common_end(s); + ff_mpv_common_end(s); return 0; } @@ -239,7 +239,7 @@ static int decode_slice(MpegEncContext *s) if (ret < 0) { const int xy = s->mb_x + s->mb_y * s->mb_stride; if (ret == SLICE_END) { - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); @@ -251,7 +251,7 @@ static int decode_slice(MpegEncContext *s) if (++s->mb_x >= s->mb_width) { s->mb_x = 0; ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); - ff_MPV_report_decode_progress(s); + ff_mpv_report_decode_progress(s); s->mb_y++; } return 0; @@ -270,13 +270,13 @@ static int decode_slice(MpegEncContext *s) return AVERROR_INVALIDDATA; } - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); } ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); - ff_MPV_report_decode_progress(s); + ff_mpv_report_decode_progress(s); s->mb_x = 0; } @@ -454,7 +454,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, } if (!s->context_initialized) - if ((ret = ff_MPV_common_init(s)) < 0) + if ((ret = ff_mpv_common_init(s)) < 0) return ret; if (s->current_picture_ptr == NULL || s->current_picture_ptr->f->data[0]) { @@ -503,7 +503,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ff_set_sar(avctx, avctx->sample_aspect_ratio); - if ((ret = ff_MPV_common_frame_size_change(s))) + if ((ret = ff_mpv_common_frame_size_change(s))) return ret; } @@ -542,7 +542,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; } - if ((ret = ff_MPV_frame_start(s, avctx)) < 0) + if ((ret = ff_mpv_frame_start(s, avctx)) < 0) return ret; if (!s->divx_packed && !avctx->hwaccel) @@ -559,7 +559,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, /* the second part of the wmv2 header contains the MB skip bits which * are stored in current_picture->mb_type which is not available before - * ff_MPV_frame_start() */ + * ff_mpv_frame_start() */ if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) { ret = ff_wmv2_decode_secondary_picture_header(s); if (ret < 0) @@ -613,7 +613,7 @@ intrax8_decoded: return ret; } - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); if (!s->divx_packed && avctx->hwaccel) ff_thread_finish_setup(avctx); diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c index ce62fbf68e..83d3426353 100644 --- a/libavcodec/h264_slice.c +++ b/libavcodec/h264_slice.c @@ -733,7 +733,7 @@ static int h264_frame_start(H264Context *h) /* We mark the current picture as non-reference after allocating it, so * that if we break out due to an error it can be released automatically - * in the next ff_MPV_frame_start(). + * in the next ff_mpv_frame_start(). */ h->cur_pic_ptr->reference = 0; diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c index d37eb793cd..303b9263be 100644 --- a/libavcodec/intrax8.c +++ b/libavcodec/intrax8.c @@ -718,9 +718,9 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w) /** * Decode single IntraX8 frame. * The parent codec must fill s->loopfilter and s->gb (bitstream). - * The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function. - * The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function. - * This function does not use MPV_decode_mb(). + * The parent codec must call ff_mpv_frame_start(), ff_er_frame_start() before calling this function. + * The parent codec must call ff_er_frame_end(), ff_mpv_frame_end() after calling this function. + * This function does not use ff_mpv_decode_mb(). * @param w pointer to IntraX8Context * @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1. * @param quant_offset offset away from zero diff --git a/libavcodec/mjpegenc.c b/libavcodec/mjpegenc.c index fdb882e71c..8ac5cfe87d 100644 --- a/libavcodec/mjpegenc.c +++ b/libavcodec/mjpegenc.c @@ -159,9 +159,9 @@ AVCodec ff_mjpeg_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_MJPEG, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE }, diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c index 29f8980913..5a875c2518 100644 --- a/libavcodec/mpeg12dec.c +++ b/libavcodec/mpeg12dec.c @@ -1098,7 +1098,7 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx) Mpeg1Context *s = avctx->priv_data; MpegEncContext *s2 = &s->mpeg_enc_ctx; - ff_MPV_decode_defaults(s2); + ff_mpv_decode_defaults(s2); s->mpeg_enc_ctx.avctx = avctx; s->mpeg_enc_ctx.flags = avctx->flags; @@ -1221,7 +1221,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) if (s1->mpeg_enc_ctx_allocated) { ParseContext pc = s->parse_context; s->parse_context.buffer = 0; - ff_MPV_common_end(s); + ff_mpv_common_end(s); s->parse_context = pc; } @@ -1312,7 +1312,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t)); ff_mpv_idct_init(s); - if (ff_MPV_common_init(s) < 0) + if (ff_mpv_common_init(s) < 0) return -2; quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation); @@ -1590,7 +1590,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) if (s->first_field || s->picture_structure == PICT_FRAME) { AVFrameSideData *pan_scan; - if (ff_MPV_frame_start(s, avctx) < 0) + if (ff_mpv_frame_start(s, avctx) < 0) return -1; ff_mpeg_er_frame_start(s); @@ -1676,7 +1676,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) #if FF_API_XVMC FF_DISABLE_DEPRECATION_WARNINGS -// MPV_frame_start will call this function too, +// ff_mpv_frame_start will call this function too, // but we need to call it on every field if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) if (ff_xvmc_field_start(s, avctx) < 0) @@ -1841,13 +1841,13 @@ FF_ENABLE_DEPRECATION_WARNINGS s->dest[1] += 16 >> s->chroma_x_shift; s->dest[2] += 16 >> s->chroma_x_shift; - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); if (++s->mb_x >= s->mb_width) { const int mb_size = 16; ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size); - ff_MPV_report_decode_progress(s); + ff_mpv_report_decode_progress(s); s->mb_x = 0; s->mb_y += 1 << field_pic; @@ -2016,7 +2016,7 @@ FF_ENABLE_DEPRECATION_WARNINGS ff_er_frame_end(&s->er); - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { int ret = av_frame_ref(pict, s->current_picture_ptr->f); @@ -2133,7 +2133,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) /* start new MPEG-1 context decoding */ s->out_format = FMT_MPEG1; if (s1->mpeg_enc_ctx_allocated) { - ff_MPV_common_end(s); + ff_mpv_common_end(s); } s->width = avctx->coded_width; s->height = avctx->coded_height; @@ -2151,7 +2151,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) avctx->idct_algo = FF_IDCT_SIMPLE; ff_mpv_idct_init(s); - if (ff_MPV_common_init(s) < 0) + if (ff_mpv_common_init(s) < 0) return -1; s1->mpeg_enc_ctx_allocated = 1; @@ -2653,7 +2653,7 @@ static av_cold int mpeg_decode_end(AVCodecContext *avctx) Mpeg1Context *s = avctx->priv_data; if (s->mpeg_enc_ctx_allocated) - ff_MPV_common_end(&s->mpeg_enc_ctx); + ff_mpv_common_end(&s->mpeg_enc_ctx); av_freep(&s->a53_caption); return 0; } diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c index f87286aa57..c4089c9582 100644 --- a/libavcodec/mpeg12enc.c +++ b/libavcodec/mpeg12enc.c @@ -130,7 +130,7 @@ static av_cold int encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; - if (ff_MPV_encode_init(avctx) < 0) + if (ff_mpv_encode_init(avctx) < 0) return -1; if (find_frame_rate_index(s) < 0) { @@ -1082,8 +1082,8 @@ AVCodec ff_mpeg1video_encoder = { .id = AV_CODEC_ID_MPEG1VIDEO, .priv_data_size = sizeof(MpegEncContext), .init = encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .supported_framerates = ff_mpeg12_frame_rate_tab + 1, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, @@ -1098,8 +1098,8 @@ AVCodec ff_mpeg2video_encoder = { .id = AV_CODEC_ID_MPEG2VIDEO, .priv_data_size = sizeof(MpegEncContext), .init = encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .supported_framerates = ff_mpeg12_frame_rate_tab + 1, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c index 6b87ec7edb..e9bc85f878 100644 --- a/libavcodec/mpeg4videoenc.c +++ b/libavcodec/mpeg4videoenc.c @@ -1287,7 +1287,7 @@ static av_cold int encode_init(AVCodecContext *avctx) int ret; static int done = 0; - if ((ret = ff_MPV_encode_init(avctx)) < 0) + if ((ret = ff_mpv_encode_init(avctx)) < 0) return ret; if (!done) { @@ -1401,8 +1401,8 @@ AVCodec ff_mpeg4_encoder = { .id = AV_CODEC_ID_MPEG4, .priv_data_size = sizeof(MpegEncContext), .init = encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .priv_class = &mpeg4enc_class, diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 540c959ddb..b0136c7431 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -371,7 +371,7 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); assert(ref == 0); - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); } /* init common dct for both encoder and decoder */ @@ -393,14 +393,14 @@ static av_cold int dct_init(MpegEncContext *s) s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; if (HAVE_INTRINSICS_NEON) - ff_MPV_common_init_neon(s); + ff_mpv_common_init_neon(s); if (ARCH_ARM) - ff_MPV_common_init_arm(s); + ff_mpv_common_init_arm(s); if (ARCH_PPC) - ff_MPV_common_init_ppc(s); + ff_mpv_common_init_ppc(s); if (ARCH_X86) - ff_MPV_common_init_x86(s); + ff_mpv_common_init_x86(s); return 0; } @@ -821,7 +821,7 @@ static int init_duplicate_context(MpegEncContext *s) return 0; fail: - return -1; // free() through ff_MPV_common_end() + return -1; // free() through ff_mpv_common_end() } static void free_duplicate_context(MpegEncContext *s) @@ -915,7 +915,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0; ff_mpv_idct_init(s); - ff_MPV_common_init(s); + ff_mpv_common_init(s); } if (s->height != s1->height || s->width != s1->width || s->context_reinit) { @@ -923,7 +923,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, s->context_reinit = 0; s->height = s1->height; s->width = s1->width; - if ((err = ff_MPV_common_frame_size_change(s)) < 0) + if ((err = ff_mpv_common_frame_size_change(s)) < 0) return err; } @@ -1024,7 +1024,7 @@ do {\ * The changed fields will not depend upon the * prior state of the MpegEncContext. */ -void ff_MPV_common_defaults(MpegEncContext *s) +void ff_mpv_common_defaults(MpegEncContext *s) { s->y_dc_scale_table = s->c_dc_scale_table = ff_mpeg1_dc_scale_table; @@ -1047,9 +1047,9 @@ void ff_MPV_common_defaults(MpegEncContext *s) * the changed fields will not depend upon * the prior state of the MpegEncContext. */ -void ff_MPV_decode_defaults(MpegEncContext *s) +void ff_mpv_decode_defaults(MpegEncContext *s) { - ff_MPV_common_defaults(s); + ff_mpv_common_defaults(s); } static int init_er(MpegEncContext *s) @@ -1232,7 +1232,7 @@ fail: * init common structure for both encoder and decoder. * this assumes that some variables like width/height are already set */ -av_cold int ff_MPV_common_init(MpegEncContext *s) +av_cold int ff_mpv_common_init(MpegEncContext *s) { int i; int nb_slices = (HAVE_THREADS && @@ -1343,7 +1343,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) return 0; fail: - ff_MPV_common_end(s); + ff_mpv_common_end(s); return -1; } @@ -1402,7 +1402,7 @@ static int free_context_frame(MpegEncContext *s) return 0; } -int ff_MPV_common_frame_size_change(MpegEncContext *s) +int ff_mpv_common_frame_size_change(MpegEncContext *s) { int i, err = 0; @@ -1470,12 +1470,12 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s) return 0; fail: - ff_MPV_common_end(s); + ff_mpv_common_end(s); return err; } /* init common structure for both encoder and decoder */ -void ff_MPV_common_end(MpegEncContext *s) +void ff_mpv_common_end(MpegEncContext *s) { int i; @@ -1674,7 +1674,7 @@ int ff_find_unused_picture(MpegEncContext *s, int shared) * generic function called after decoding * the header and before a frame is decoded. */ -int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) +int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i, ret; Picture *pic; @@ -1889,7 +1889,7 @@ FF_ENABLE_DEPRECATION_WARNINGS } /* called after a frame has been decoded. */ -void ff_MPV_frame_end(MpegEncContext *s) +void ff_mpv_frame_end(MpegEncContext *s) { #if FF_API_XVMC FF_DISABLE_DEPRECATION_WARNINGS @@ -2010,7 +2010,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p) /** * find the lowest MB row referenced in the MVs */ -int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir) +int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir) { int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample; int my, off, i, mvs; @@ -2116,7 +2116,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s) s->interlaced_dct : true if interlaced dct used (mpeg2) */ static av_always_inline -void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], +void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int is_mpeg12) { const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; @@ -2206,12 +2206,12 @@ FF_ENABLE_DEPRECATION_WARNINGS if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { if (s->mv_dir & MV_DIR_FORWARD) { ff_thread_await_progress(&s->last_picture_ptr->tf, - ff_MPV_lowest_referenced_row(s, 0), + ff_mpv_lowest_referenced_row(s, 0), 0); } if (s->mv_dir & MV_DIR_BACKWARD) { ff_thread_await_progress(&s->next_picture_ptr->tf, - ff_MPV_lowest_referenced_row(s, 1), + ff_mpv_lowest_referenced_row(s, 1), 0); } } @@ -2223,12 +2223,12 @@ FF_ENABLE_DEPRECATION_WARNINGS op_pix = s->hdsp.put_no_rnd_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { - ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix); + ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix); op_pix = s->hdsp.avg_pixels_tab; op_qpix= s->me.qpel_avg; } if (s->mv_dir & MV_DIR_BACKWARD) { - ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix); + ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix); } } @@ -2351,13 +2351,14 @@ skip_idct: } } -void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){ +void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64]) +{ #if !CONFIG_SMALL if(s->out_format == FMT_MPEG1) { - MPV_decode_mb_internal(s, block, 1); + mpv_decode_mb_internal(s, block, 1); } else #endif - MPV_decode_mb_internal(s, block, 0); + mpv_decode_mb_internal(s, block, 0); } void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h) @@ -2472,7 +2473,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale) s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ]; } -void ff_MPV_report_decode_progress(MpegEncContext *s) +void ff_mpv_report_decode_progress(MpegEncContext *s) { if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred) ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0); diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index e8c3581274..6df828837a 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -689,24 +689,33 @@ static const AVClass name ## _class = {\ * and decoding). The changed fields will not depend upon the prior * state of the MpegEncContext. */ -void ff_MPV_common_defaults(MpegEncContext *s); - -void ff_MPV_decode_defaults(MpegEncContext *s); -int ff_MPV_common_init(MpegEncContext *s); -int ff_MPV_common_frame_size_change(MpegEncContext *s); -void ff_MPV_common_end(MpegEncContext *s); -void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]); -int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx); -void ff_MPV_frame_end(MpegEncContext *s); -int ff_MPV_encode_init(AVCodecContext *avctx); -int ff_MPV_encode_end(AVCodecContext *avctx); -int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, +void ff_mpv_common_defaults(MpegEncContext *s); + +int ff_mpv_common_init(MpegEncContext *s); +void ff_mpv_common_init_arm(MpegEncContext *s); +void ff_mpv_common_init_neon(MpegEncContext *s); +void ff_mpv_common_init_ppc(MpegEncContext *s); +void ff_mpv_common_init_x86(MpegEncContext *s); + +int ff_mpv_common_frame_size_change(MpegEncContext *s); +void ff_mpv_common_end(MpegEncContext *s); + +void ff_mpv_decode_defaults(MpegEncContext *s); +void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64]); +void ff_mpv_report_decode_progress(MpegEncContext *s); + +int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx); +void ff_mpv_frame_end(MpegEncContext *s); + +int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir); + +int ff_mpv_encode_init(AVCodecContext *avctx); +void ff_mpv_encode_init_x86(MpegEncContext *s); + +int ff_mpv_encode_end(AVCodecContext *avctx); +int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet); -void ff_MPV_encode_init_x86(MpegEncContext *s); -void ff_MPV_common_init_x86(MpegEncContext *s); -void ff_MPV_common_init_arm(MpegEncContext *s); -void ff_MPV_common_init_neon(MpegEncContext *s); -void ff_MPV_common_init_ppc(MpegEncContext *s); + void ff_clean_intra_table_entries(MpegEncContext *s); void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h); void ff_mpeg_flush(AVCodecContext *avctx); @@ -715,8 +724,6 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix); int ff_find_unused_picture(MpegEncContext *s, int shared); void ff_denoise_dct(MpegEncContext *s, int16_t *block); int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src); -int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir); -void ff_MPV_report_decode_progress(MpegEncContext *s); int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src); void ff_set_qscale(MpegEncContext * s, int qscale); @@ -727,7 +734,7 @@ int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int void ff_init_block_index(MpegEncContext *s); -void ff_MPV_motion(MpegEncContext *s, +void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index 7355ebf355..fd84deb584 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -218,10 +218,10 @@ static void update_duplicate_context_after_me(MpegEncContext *dst, * Set the given MpegEncContext to defaults for encoding. * the changed fields will not depend upon the prior state of the MpegEncContext. */ -static void MPV_encode_defaults(MpegEncContext *s) +static void mpv_encode_defaults(MpegEncContext *s) { int i; - ff_MPV_common_defaults(s); + ff_mpv_common_defaults(s); for (i = -16; i < 16; i++) { default_fcode_tab[i + MAX_MV] = 1; @@ -234,12 +234,12 @@ static void MPV_encode_defaults(MpegEncContext *s) } /* init video encoder */ -av_cold int ff_MPV_encode_init(AVCodecContext *avctx) +av_cold int ff_mpv_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i, ret, format_supported; - MPV_encode_defaults(s); + mpv_encode_defaults(s); switch (avctx->codec_id) { case AV_CODEC_ID_MPEG2VIDEO: @@ -701,11 +701,11 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) /* init */ ff_mpv_idct_init(s); - if (ff_MPV_common_init(s) < 0) + if (ff_mpv_common_init(s) < 0) return -1; if (ARCH_X86) - ff_MPV_encode_init_x86(s); + ff_mpv_encode_init_x86(s); ff_fdctdsp_init(&s->fdsp, avctx); ff_me_cmp_init(&s->mecc, avctx); @@ -838,18 +838,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) return 0; fail: - ff_MPV_encode_end(avctx); + ff_mpv_encode_end(avctx); return AVERROR_UNKNOWN; } -av_cold int ff_MPV_encode_end(AVCodecContext *avctx) +av_cold int ff_mpv_encode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i; ff_rate_control_uninit(s); - ff_MPV_common_end(s); + ff_mpv_common_end(s); if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG) ff_mjpeg_encode_close(s); @@ -1530,7 +1530,7 @@ static int frame_start(MpegEncContext *s) return 0; } -int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, +int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet) { MpegEncContext *s = avctx->priv_data; @@ -1986,14 +1986,14 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, } if (s->mv_dir & MV_DIR_FORWARD) { - ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, + ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix); op_pix = s->hdsp.avg_pixels_tab; op_qpix = s->qdsp.avg_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { - ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, + ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix); } @@ -2314,7 +2314,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE } if(s->avctx->mb_decision == FF_MB_DECISION_RD){ - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); score *= s->lambda2; score += sse_mb(s) << FF_LAMBDA_SHIFT; @@ -2959,7 +2959,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ } if(s->avctx->mb_decision == FF_MB_DECISION_BITS) - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); } else { int motion_x = 0, motion_y = 0; s->mv_type=MV_TYPE_16X16; @@ -3078,7 +3078,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); } /* clean the MV table in IPS frames for direct mode in B frames */ @@ -4259,9 +4259,9 @@ AVCodec ff_h263_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H263, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE}, .priv_class = &h263_class, }; @@ -4287,9 +4287,9 @@ AVCodec ff_h263p_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H263P, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &h263p_class, @@ -4303,9 +4303,9 @@ AVCodec ff_msmpeg4v2_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_MSMPEG4V2, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &msmpeg4v2_class, }; @@ -4318,9 +4318,9 @@ AVCodec ff_msmpeg4v3_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_MSMPEG4V3, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &msmpeg4v3_class, }; @@ -4333,9 +4333,9 @@ AVCodec ff_wmv1_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_WMV1, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &wmv1_class, }; diff --git a/libavcodec/mpegvideo_motion.c b/libavcodec/mpegvideo_motion.c index b399db844b..f33db341de 100644 --- a/libavcodec/mpegvideo_motion.c +++ b/libavcodec/mpegvideo_motion.c @@ -826,7 +826,7 @@ static inline void apply_8x8(MpegEncContext *s, * @param qpix_op qpel motion compensation function (average or put normally) * the motion vectors are taken from s->mv and the MV type from s->mv_type */ -static av_always_inline void MPV_motion_internal(MpegEncContext *s, +static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, @@ -965,7 +965,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s, } } -void ff_MPV_motion(MpegEncContext *s, +void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, @@ -974,10 +974,10 @@ void ff_MPV_motion(MpegEncContext *s, { #if !CONFIG_SMALL if (s->out_format == FMT_MPEG1) - MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, + mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir, ref_picture, pix_op, qpix_op, 1); else #endif - MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, + mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir, ref_picture, pix_op, qpix_op, 0); } diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c index a8e068b76c..fb2dd80140 100644 --- a/libavcodec/mpegvideo_xvmc.c +++ b/libavcodec/mpegvideo_xvmc.c @@ -169,7 +169,7 @@ void ff_xvmc_decode_mb(MpegEncContext *s) return; } - // from MPV_decode_mb(), update DC predictors for P macroblocks + // from ff_mpv_decode_mb(), update DC predictors for P macroblocks if (!s->mb_intra) { s->last_dc[0] = s->last_dc[1] = diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c index ffbba6d0fd..d71d30844a 100644 --- a/libavcodec/mss2.c +++ b/libavcodec/mss2.c @@ -396,8 +396,8 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, avctx->pix_fmt = AV_PIX_FMT_YUV420P; - if ((ret = ff_MPV_frame_start(s, avctx)) < 0) { - av_log(v->s.avctx, AV_LOG_ERROR, "ff_MPV_frame_start error\n"); + if ((ret = ff_mpv_frame_start(s, avctx)) < 0) { + av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n"); avctx->pix_fmt = AV_PIX_FMT_RGB24; return ret; } @@ -417,7 +417,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, ff_er_frame_end(&s->er); - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); f = s->current_picture.f; diff --git a/libavcodec/neon/mpegvideo.c b/libavcodec/neon/mpegvideo.c index cb9bd66ed0..fe952ae256 100644 --- a/libavcodec/neon/mpegvideo.c +++ b/libavcodec/neon/mpegvideo.c @@ -123,7 +123,7 @@ static void dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block, } -av_cold void ff_MPV_common_init_neon(MpegEncContext *s) +av_cold void ff_mpv_common_init_neon(MpegEncContext *s) { int cpu_flags = av_get_cpu_flags(); diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c index 98ef143eb9..9ae849f173 100644 --- a/libavcodec/ppc/mpegvideo_altivec.c +++ b/libavcodec/ppc/mpegvideo_altivec.c @@ -115,7 +115,7 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s, #endif /* HAVE_ALTIVEC */ -av_cold void ff_MPV_common_init_ppc(MpegEncContext *s) +av_cold void ff_mpv_common_init_ppc(MpegEncContext *s) { #if HAVE_ALTIVEC if (!PPC_ALTIVEC(av_get_cpu_flags())) diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c index e3e572099c..1787a7e78e 100644 --- a/libavcodec/rv10.c +++ b/libavcodec/rv10.c @@ -372,7 +372,7 @@ static int rv20_decode_picture_header(RVDecContext *rv) if (new_w != s->width || new_h != s->height) { av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h); - ff_MPV_common_end(s); + ff_mpv_common_end(s); ret = ff_set_dimensions(s->avctx, new_w, new_h); if (ret < 0) @@ -380,7 +380,7 @@ static int rv20_decode_picture_header(RVDecContext *rv) s->width = new_w; s->height = new_h; - if ((ret = ff_MPV_common_init(s)) < 0) + if ((ret = ff_mpv_common_init(s)) < 0) return ret; } @@ -455,7 +455,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx) avctx->coded_height, 0, avctx)) < 0) return ret; - ff_MPV_decode_defaults(s); + ff_mpv_decode_defaults(s); s->avctx = avctx; s->out_format = FMT_H263; @@ -499,7 +499,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx) avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_mpv_idct_init(s); - if ((ret = ff_MPV_common_init(s)) < 0) + if ((ret = ff_mpv_common_init(s)) < 0) return ret; ff_h263dsp_init(&s->h263dsp); @@ -523,7 +523,7 @@ static av_cold int rv10_decode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; - ff_MPV_common_end(s); + ff_mpv_common_end(s); return 0; } @@ -561,10 +561,10 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf, // FIXME write parser so we always have complete frames? if (s->current_picture_ptr) { ff_er_frame_end(&s->er); - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0; } - if ((ret = ff_MPV_frame_start(s, avctx)) < 0) + if ((ret = ff_mpv_frame_start(s, avctx)) < 0) return ret; ff_mpeg_er_frame_start(s); } else { @@ -646,7 +646,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf, } if (s->pict_type != AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); - ff_MPV_decode_mb(s, s->block); + ff_mpv_decode_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); @@ -739,7 +739,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, if (s->current_picture_ptr != NULL && s->mb_y >= s->mb_height) { ff_er_frame_end(&s->er); - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0) diff --git a/libavcodec/rv10enc.c b/libavcodec/rv10enc.c index 9b23d7d92a..ca760524a8 100644 --- a/libavcodec/rv10enc.c +++ b/libavcodec/rv10enc.c @@ -64,9 +64,9 @@ AVCodec ff_rv10_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_RV10, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &rv10_class, }; diff --git a/libavcodec/rv20enc.c b/libavcodec/rv20enc.c index 67879e2e98..4462bde01d 100644 --- a/libavcodec/rv20enc.c +++ b/libavcodec/rv20enc.c @@ -65,9 +65,9 @@ AVCodec ff_rv20_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_RV20, .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .init = ff_mpv_encode_init, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .priv_class = &rv20_class, }; diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index 7411f6fa4b..75f321a430 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -1475,7 +1475,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx) MpegEncContext *s = &r->s; int ret; - ff_MPV_decode_defaults(s); + ff_mpv_decode_defaults(s); s->avctx = avctx; s->out_format = FMT_H263; s->codec_id = avctx->codec_id; @@ -1489,7 +1489,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx) s->low_delay = 0; ff_mpv_idct_init(s); - if ((ret = ff_MPV_common_init(s)) < 0) + if ((ret = ff_mpv_common_init(s)) < 0) return ret; ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1); @@ -1504,7 +1504,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx) #endif if ((ret = rv34_decoder_alloc(r)) < 0) { - ff_MPV_common_end(&r->s); + ff_mpv_common_end(&r->s); return ret; } @@ -1526,10 +1526,10 @@ int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx) if (avctx->internal->is_copy) { r->tmp_b_block_base = NULL; ff_mpv_idct_init(&r->s); - if ((err = ff_MPV_common_init(&r->s)) < 0) + if ((err = ff_mpv_common_init(&r->s)) < 0) return err; if ((err = rv34_decoder_alloc(r)) < 0) { - ff_MPV_common_end(&r->s); + ff_mpv_common_end(&r->s); return err; } } @@ -1549,7 +1549,7 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte if (s->height != s1->height || s->width != s1->width) { s->height = s1->height; s->width = s1->width; - if ((err = ff_MPV_common_frame_size_change(s)) < 0) + if ((err = ff_mpv_common_frame_size_change(s)) < 0) return err; if ((err = rv34_decoder_realloc(r)) < 0) return err; @@ -1580,7 +1580,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict) int got_picture = 0, ret; ff_er_frame_end(&s->er); - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); s->mb_num_left = 0; if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) @@ -1665,7 +1665,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.", s->mb_num_left); ff_er_frame_end(&s->er); - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); } if (s->width != si.width || s->height != si.height) { @@ -1681,13 +1681,13 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, if (err < 0) return err; - if ((err = ff_MPV_common_frame_size_change(s)) < 0) + if ((err = ff_mpv_common_frame_size_change(s)) < 0) return err; if ((err = rv34_decoder_realloc(r)) < 0) return err; } s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I; - if (ff_MPV_frame_start(s, s->avctx) < 0) + if (ff_mpv_frame_start(s, s->avctx) < 0) return -1; ff_mpeg_er_frame_start(s); if (!r->tmp_b_block_base) { @@ -1792,7 +1792,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, /* always mark the current frame as finished, frame-mt supports * only complete frames */ ff_er_frame_end(&s->er); - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); s->mb_num_left = 0; ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0); return AVERROR_INVALIDDATA; @@ -1806,7 +1806,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx) { RV34DecContext *r = avctx->priv_data; - ff_MPV_common_end(&r->s); + ff_mpv_common_end(&r->s); rv34_decoder_free(r); return 0; diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c index 506ee9be9e..361c465569 100644 --- a/libavcodec/svq1enc.c +++ b/libavcodec/svq1enc.c @@ -483,7 +483,7 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx) avctx->frame_number)); s->m.mb_type = NULL; - ff_MPV_common_end(&s->m); + ff_mpv_common_end(&s->m); av_freep(&s->m.me.scratchpad); av_freep(&s->m.me.map); @@ -533,7 +533,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx) s->avctx = avctx; s->m.avctx = avctx; - if ((ret = ff_MPV_common_init(&s->m)) < 0) { + if ((ret = ff_mpv_common_init(&s->m)) < 0) { svq1_encode_end(avctx); return ret; } diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index f7f6a9f121..85ee9de941 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -5749,7 +5749,7 @@ av_cold int ff_vc1_decode_end(AVCodecContext *avctx) av_freep(&v->sr_rows[i >> 1][i & 1]); av_freep(&v->hrd_rate); av_freep(&v->hrd_buffer); - ff_MPV_common_end(&v->s); + ff_mpv_common_end(&v->s); av_freep(&v->mv_type_mb_plane); av_freep(&v->direct_mb_plane); av_freep(&v->forward_mb_plane); @@ -5927,7 +5927,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, if (ff_msmpeg4_decode_init(avctx) < 0) goto err; if (ff_vc1_decode_init_alloc_tables(v) < 0) { - ff_MPV_common_end(s); + ff_mpv_common_end(s); goto err; } @@ -5980,7 +5980,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, s->next_p_frame_damaged = 0; } - if (ff_MPV_frame_start(s, avctx) < 0) { + if (ff_mpv_frame_start(s, avctx) < 0) { goto err; } @@ -6093,7 +6093,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, ff_er_frame_end(&s->er); } - ff_MPV_frame_end(s); + ff_mpv_frame_end(s); if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { image: diff --git a/libavcodec/wmv2enc.c b/libavcodec/wmv2enc.c index 256c4e546f..9da2b3f324 100644 --- a/libavcodec/wmv2enc.c +++ b/libavcodec/wmv2enc.c @@ -54,7 +54,7 @@ static int encode_ext_header(Wmv2Context *w){ static av_cold int wmv2_encode_init(AVCodecContext *avctx){ Wmv2Context * const w= avctx->priv_data; - if(ff_MPV_encode_init(avctx) < 0) + if (ff_mpv_encode_init(avctx) < 0) return -1; ff_wmv2_common_init(w); @@ -217,7 +217,7 @@ AVCodec ff_wmv2_encoder = { .id = AV_CODEC_ID_WMV2, .priv_data_size = sizeof(Wmv2Context), .init = wmv2_encode_init, - .encode2 = ff_MPV_encode_picture, - .close = ff_MPV_encode_end, + .encode2 = ff_mpv_encode_picture, + .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, }; diff --git a/libavcodec/x86/mpegvideo.c b/libavcodec/x86/mpegvideo.c index db94ffabb0..1395156a36 100644 --- a/libavcodec/x86/mpegvideo.c +++ b/libavcodec/x86/mpegvideo.c @@ -444,7 +444,7 @@ __asm__ volatile( #endif /* HAVE_INLINE_ASM */ -av_cold void ff_MPV_common_init_x86(MpegEncContext *s) +av_cold void ff_mpv_common_init_x86(MpegEncContext *s) { #if HAVE_INLINE_ASM int cpu_flags = av_get_cpu_flags(); diff --git a/libavcodec/x86/mpegvideoenc.c b/libavcodec/x86/mpegvideoenc.c index 2e4f06c8dc..47349d17ec 100644 --- a/libavcodec/x86/mpegvideoenc.c +++ b/libavcodec/x86/mpegvideoenc.c @@ -193,7 +193,7 @@ static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){ } #endif /* HAVE_INLINE_ASM */ -av_cold void ff_MPV_encode_init_x86(MpegEncContext *s) +av_cold void ff_mpv_encode_init_x86(MpegEncContext *s) { const int dct_algo = s->avctx->dct_algo; int i; -- cgit v1.2.3