From b073819bc974965056f435d69dc51e9ec5877395 Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Sun, 8 Apr 2012 15:00:01 -0400 Subject: avconv: allow '-async -1' to disable timestamp sync for audio encoding This will allow a workaround for cases where input timestamps are invalid or when decoder delay of 1 packet or more confuses avconv into using the wrong timestamps as a sync reference. --- avconv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/avconv.c b/avconv.c index 2dce66f7f7..7344028bce 100644 --- a/avconv.c +++ b/avconv.c @@ -1101,7 +1101,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt); } - if (audio_sync_method) { + if (audio_sync_method > 0) { double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts - av_fifo_size(ost->fifo) / (enc->channels * osize); int idelta = delta * dec->sample_rate / enc->sample_rate; @@ -1148,7 +1148,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate); } } - } else + } else if (audio_sync_method == 0) ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) - av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong -- cgit v1.2.3 From 02c39f056a77427850f43aaa19b8856534a1693a Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Mon, 2 Apr 2012 19:03:30 +0200 Subject: ppc: Add/remove a number of const qualifiers to fix related warnings. --- libavcodec/ppc/gmc_altivec.c | 2 +- libavcodec/ppc/int_altivec.c | 3 ++- libswscale/ppc/yuv2rgb_altivec.c | 10 +++++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/libavcodec/ppc/gmc_altivec.c b/libavcodec/ppc/gmc_altivec.c index 965921ab7f..fb67b9ec36 100644 --- a/libavcodec/ppc/gmc_altivec.c +++ b/libavcodec/ppc/gmc_altivec.c @@ -48,7 +48,7 @@ void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int unsigned long dst_odd = (unsigned long)dst & 0x0000000F; unsigned long src_really_odd = (unsigned long)src & 0x0000000F; - tempA = vec_ld(0, (unsigned short*)ABCD); + tempA = vec_ld(0, (const unsigned short*)ABCD); Av = vec_splat(tempA, 0); Bv = vec_splat(tempA, 1); Cv = vec_splat(tempA, 2); diff --git a/libavcodec/ppc/int_altivec.c b/libavcodec/ppc/int_altivec.c index ce996fa68a..f81b478449 100644 --- a/libavcodec/ppc/int_altivec.c +++ b/libavcodec/ppc/int_altivec.c @@ -79,7 +79,8 @@ static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2, return u.score[3]; } -static int32_t scalarproduct_int16_altivec(const int16_t * v1, const int16_t * v2, int order, const int shift) +static int32_t scalarproduct_int16_altivec(int16_t *v1, const int16_t *v2, + int order, const int shift) { int i; LOAD_ZERO; diff --git a/libswscale/ppc/yuv2rgb_altivec.c b/libswscale/ppc/yuv2rgb_altivec.c index 523d9966c6..3a140cb1f8 100644 --- a/libswscale/ppc/yuv2rgb_altivec.c +++ b/libswscale/ppc/yuv2rgb_altivec.c @@ -300,7 +300,7 @@ static int altivec_ ## name(SwsContext *c, const unsigned char **in, \ vector signed short R1, G1, B1; \ vector unsigned char R, G, B; \ \ - vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \ + const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \ vector unsigned char align_perm; \ \ vector signed short lCY = c->CY; \ @@ -335,10 +335,10 @@ static int altivec_ ## name(SwsContext *c, const unsigned char **in, \ vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1); \ \ for (j = 0; j < w / 16; j++) { \ - y1ivP = (vector unsigned char *) y1i; \ - y2ivP = (vector unsigned char *) y2i; \ - uivP = (vector unsigned char *) ui; \ - vivP = (vector unsigned char *) vi; \ + y1ivP = (const vector unsigned char *) y1i; \ + y2ivP = (const vector unsigned char *) y2i; \ + uivP = (const vector unsigned char *) ui; \ + vivP = (const vector unsigned char *) vi; \ \ align_perm = vec_lvsl(0, y1i); \ y0 = (vector unsigned char) \ -- cgit v1.2.3 From 294b3a5074820d4c02488ef6dee40bd872272189 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 12:12:33 +0200 Subject: lavc doxy: add core functions/definitions to a doxy group. --- libavcodec/avcodec.h | 342 ++++++++++++++++++++++++++------------------------- 1 file changed, 177 insertions(+), 165 deletions(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index ef43962c2d..d44151f24d 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -67,6 +67,14 @@ * */ +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ /** * Identify the syntax and semantics of the bitstream. @@ -3038,6 +3046,175 @@ typedef struct AVSubtitle { int64_t pts; ///< Same as packet pts, in AV_TIME_BASE } AVSubtitle; +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +AVCodec *av_codec_next(AVCodec *c); + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see avcodec_register_all() + */ +void avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see avcodec_register + * @see av_register_codec_parser + * @see av_register_bitstream_filter + */ +void avcodec_register_all(void); + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by calling avcodec_close() on it followed + * by av_free(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + */ +AVCodecContext *avcodec_alloc_context3(AVCodec *codec); + +/** + * Set the fields of the given AVCodecContext to default values corresponding + * to the given codec (defaults may be codec-dependent). + * + * Do not call this function if a non-NULL codec has been passed + * to avcodec_alloc_context3() that allocated this AVCodecContext. + * If codec is non-NULL, it is illegal to call avcodec_open2() with a + * different codec on this AVCodecContext. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, AVCodec *codec); + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + */ +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct can be deallocated by simply calling av_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * @see avcodec_get_frame_defaults + */ +AVFrame *avcodec_alloc_frame(void); + +/** + * Set the fields of the given AVFrame to default values. + * + * @param pic The AVFrame of which the fields should be set to default values. + */ +void avcodec_get_frame_defaults(AVFrame *pic); + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * av_dict_set(&opts, "b", "2.5M", 0); + * codec = avcodec_find_decoder(CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * avcodec_get_context_defaults3() for this context, then this + * parameter MUST be either NULL or equal to the previously passed + * codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), + * av_dict_set(), av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() / + * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will + * do nothing. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + /* packet functions */ /** @@ -3363,38 +3540,6 @@ int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, /* external high level API */ -/** - * If c is NULL, returns the first registered codec, - * if c is non-NULL, returns the next registered codec after c, - * or NULL if c is the last one. - */ -AVCodec *av_codec_next(AVCodec *c); - -/** - * Return the LIBAVCODEC_VERSION_INT constant. - */ -unsigned avcodec_version(void); - -/** - * Return the libavcodec build-time configuration. - */ -const char *avcodec_configuration(void); - -/** - * Return the libavcodec license. - */ -const char *avcodec_license(void); - -/** - * Register the codec codec and initialize libavcodec. - * - * @warning either this function or avcodec_register_all() must be called - * before any other libavcodec functions. - * - * @see avcodec_register_all() - */ -void avcodec_register(AVCodec *codec); - /** * Find a registered encoder with a matching codec ID. * @@ -3437,63 +3582,6 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); */ const char *av_get_profile_name(const AVCodec *codec, int profile); -/** - * Set the fields of the given AVCodecContext to default values corresponding - * to the given codec (defaults may be codec-dependent). - * - * Do not call this function if a non-NULL codec has been passed - * to avcodec_alloc_context3() that allocated this AVCodecContext. - * If codec is non-NULL, it is illegal to call avcodec_open2() with a - * different codec on this AVCodecContext. - */ -int avcodec_get_context_defaults3(AVCodecContext *s, AVCodec *codec); - -/** - * Allocate an AVCodecContext and set its fields to default values. The - * resulting struct can be deallocated by calling avcodec_close() on it followed - * by av_free(). - * - * @param codec if non-NULL, allocate private data and initialize defaults - * for the given codec. It is illegal to then call avcodec_open2() - * with a different codec. - * If NULL, then the codec-specific defaults won't be initialized, - * which may result in suboptimal default settings (this is - * important mainly for encoders, e.g. libx264). - * - * @return An AVCodecContext filled with default values or NULL on failure. - * @see avcodec_get_context_defaults - */ -AVCodecContext *avcodec_alloc_context3(AVCodec *codec); - -/** - * Copy the settings of the source AVCodecContext into the destination - * AVCodecContext. The resulting destination codec context will be - * unopened, i.e. you are required to call avcodec_open2() before you - * can use this AVCodecContext to decode/encode video/audio data. - * - * @param dest target codec context, should be initialized with - * avcodec_alloc_context3(), but otherwise uninitialized - * @param src source codec context - * @return AVERROR() on error (e.g. memory allocation error), 0 on success - */ -int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); - -/** - * Set the fields of the given AVFrame to default values. - * - * @param pic The AVFrame of which the fields should be set to default values. - */ -void avcodec_get_frame_defaults(AVFrame *pic); - -/** - * Allocate an AVFrame and set its fields to default values. The resulting - * struct can be deallocated by simply calling av_free(). - * - * @return An AVFrame filled with default values or NULL on failure. - * @see avcodec_get_frame_defaults - */ -AVFrame *avcodec_alloc_frame(void); - int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); @@ -3534,44 +3622,6 @@ int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, v int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); //FIXME func typedef -/** - * Initialize the AVCodecContext to use the given AVCodec. Prior to using this - * function the context has to be allocated with avcodec_alloc_context3(). - * - * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), - * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for - * retrieving a codec. - * - * @warning This function is not thread safe! - * - * @code - * avcodec_register_all(); - * av_dict_set(&opts, "b", "2.5M", 0); - * codec = avcodec_find_decoder(CODEC_ID_H264); - * if (!codec) - * exit(1); - * - * context = avcodec_alloc_context3(codec); - * - * if (avcodec_open2(context, codec, opts) < 0) - * exit(1); - * @endcode - * - * @param avctx The context to initialize. - * @param codec The codec to open this context for. If a non-NULL codec has been - * previously passed to avcodec_alloc_context3() or - * avcodec_get_context_defaults3() for this context, then this - * parameter MUST be either NULL or equal to the previously passed - * codec. - * @param options A dictionary filled with AVCodecContext and codec-private options. - * On return this object will be filled with options that were not found. - * - * @return zero on success, a negative value on error - * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), - * av_dict_set(), av_opt_find(). - */ -int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options); - #if FF_API_OLD_DECODE_AUDIO /** * Wrapper function which calls avcodec_decode_audio4. @@ -3733,13 +3783,6 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt); -/** - * Free all allocated data in the given subtitle struct. - * - * @param sub AVSubtitle to free. - */ -void avsubtitle_free(AVSubtitle *sub); - #if FF_API_OLD_ENCODE_AUDIO /** * Encode an audio frame from samples into buf. @@ -3895,29 +3938,6 @@ int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub); -/** - * Close a given AVCodecContext and free all the data associated with it - * (but not the AVCodecContext itself). - * - * Calling this function on an AVCodecContext that hasn't been opened will free - * the codec-specific data allocated in avcodec_alloc_context3() / - * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will - * do nothing. - */ -int avcodec_close(AVCodecContext *avctx); - -/** - * Register all the codecs, parsers and bitstream filters which were enabled at - * configuration time. If you do not call this function you can select exactly - * which formats you want to support, by using the individual registration - * functions. - * - * @see avcodec_register - * @see av_register_codec_parser - * @see av_register_bitstream_filter - */ -void avcodec_register_all(void); - /** * Flush buffers, should be called when seeking or when switching to a different stream. */ @@ -4306,14 +4326,6 @@ int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); */ enum AVMediaType avcodec_get_type(enum CodecID codec_id); -/** - * Get the AVClass for AVCodecContext. It can be used in combination with - * AV_OPT_SEARCH_FAKE_OBJ for examining options. - * - * @see av_opt_find(). - */ -const AVClass *avcodec_get_class(void); - /** * @return a positive value if s is open (i.e. avcodec_open2() was called on it * with no corresponding avcodec_close()), 0 otherwise. -- cgit v1.2.3 From 30f3f625632e5a29cb0141a805474e4078a5ab53 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 12:28:46 +0200 Subject: lavc doxy: add AVPacket-related stuff to a separate doxy group. Also move AV_PKT_DATA_PARAM_CHANGE/AV_PKT_DATA_H263_MB_INFO to the proper place. --- libavcodec/avcodec.h | 82 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 33 deletions(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index d44151f24d..93779d2b42 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -750,10 +750,48 @@ typedef struct AVPanScan{ #define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. #define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ enum AVPacketSideDataType { AV_PKT_DATA_PALETTE, AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + */ AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + */ AV_PKT_DATA_H263_MB_INFO, }; @@ -823,44 +861,15 @@ typedef struct AVPacket { #define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe #define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted -/** - * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: - * u32le param_flags - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) - * s32le channel_count - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) - * u64le channel_layout - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) - * s32le sample_rate - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) - * s32le width - * s32le height - */ - -/** - * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of - * structures with info about macroblocks relevant to splitting the - * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). - * That is, it does not necessarily contain info about all macroblocks, - * as long as the distance between macroblocks in the info is smaller - * than the target payload size. - * Each MB info structure is 12 bytes, and is laid out as follows: - * u32le bit offset from the start of the packet - * u8 current quantizer at the start of the macroblock - * u8 GOB number - * u16le macroblock address within the GOB - * u8 horizontal MV predictor - * u8 vertical MV predictor - * u8 horizontal MV predictor for block number 3 - * u8 vertical MV predictor for block number 3 - */ - enum AVSideDataParamChangeFlags { AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, }; +/** + * @} + */ /** * Audio Video Frame. @@ -3215,7 +3224,10 @@ void avsubtitle_free(AVSubtitle *sub); * @} */ -/* packet functions */ +/** + * @addtogroup lavc_packet + * @{ + */ /** * @deprecated use NULL instead @@ -3306,6 +3318,10 @@ int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size); +/** + * @} + */ + /* resample.c */ struct ReSampleContext; -- cgit v1.2.3 From 199ada494452d2af92cd07928d1a7d752badf3fb Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 12:31:56 +0200 Subject: lavc doxy: fix formatting of AV_PKT_DATA_{PARAM_CHANGE,H263_MB_INFO} --- libavcodec/avcodec.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 93779d2b42..7bcd7d2f01 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -762,6 +762,7 @@ enum AVPacketSideDataType { /** * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code * u32le param_flags * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) * s32le channel_count @@ -772,6 +773,7 @@ enum AVPacketSideDataType { * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) * s32le width * s32le height + * @endcode */ AV_PKT_DATA_PARAM_CHANGE, @@ -783,6 +785,7 @@ enum AVPacketSideDataType { * as long as the distance between macroblocks in the info is smaller * than the target payload size. * Each MB info structure is 12 bytes, and is laid out as follows: + * @code * u32le bit offset from the start of the packet * u8 current quantizer at the start of the macroblock * u8 GOB number @@ -791,6 +794,7 @@ enum AVPacketSideDataType { * u8 vertical MV predictor * u8 horizontal MV predictor for block number 3 * u8 vertical MV predictor for block number 3 + * @endcode */ AV_PKT_DATA_H263_MB_INFO, }; -- cgit v1.2.3 From c8ef8464c277d0544691246ec07ca193f8cd3776 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 12:45:58 +0200 Subject: lavc doxy: add decoding functions to a doxy group. --- libavcodec/avcodec.h | 1151 +++++++++++++++++++++++++------------------------- 1 file changed, 586 insertions(+), 565 deletions(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 7bcd7d2f01..aa02cbba2f 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -425,6 +425,7 @@ enum CodecID { #endif /** + * @ingroup lavc_decoding * Required number of additionally allocated bytes at the end of the input bitstream for decoding. * This is mainly needed because some optimized bitstream readers read * 32 or 64 bit at once and could read over the end.
@@ -456,6 +457,9 @@ enum Motion_Est_ID { ME_TESA, ///< transformed exhaustive search algorithm }; +/** + * @ingroup lavc_decoding + */ enum AVDiscard{ /* We leave some space between them for extensions (drop some * keyframes for intra-only or drop just some bidir frames). */ @@ -3326,174 +3330,596 @@ uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, * @} */ -/* resample.c */ - -struct ReSampleContext; -struct AVResampleContext; - -typedef struct ReSampleContext ReSampleContext; - /** - * Initialize audio resampling context. - * - * @param output_channels number of output channels - * @param input_channels number of input channels - * @param output_rate output sample rate - * @param input_rate input sample rate - * @param sample_fmt_out requested output sample format - * @param sample_fmt_in input sample format - * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency - * @param log2_phase_count log2 of the number of entries in the polyphase filterbank - * @param linear if 1 then the used FIR filter will be linearly interpolated - between the 2 closest, if 0 the closest will be used - * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate - * @return allocated ReSampleContext, NULL if error occurred + * @addtogroup lavc_decoding + * @{ */ -ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, - int output_rate, int input_rate, - enum AVSampleFormat sample_fmt_out, - enum AVSampleFormat sample_fmt_in, - int filter_length, int log2_phase_count, - int linear, double cutoff); - -int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); /** - * Free resample context. + * Find a registered decoder with a matching codec ID. * - * @param s a non-NULL pointer to a resample context previously - * created with av_audio_resample_init() - */ -void audio_resample_close(ReSampleContext *s); - - -/** - * Initialize an audio resampler. - * Note, if either rate is not an integer then simply scale both rates up so they are. - * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq - * @param log2_phase_count log2 of the number of entries in the polyphase filterbank - * @param linear If 1 then the used FIR filter will be linearly interpolated - between the 2 closest, if 0 the closest will be used - * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + * @param id CodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. */ -struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); +AVCodec *avcodec_find_decoder(enum CodecID id); /** - * Resample an array of samples using a previously configured context. - * @param src an array of unconsumed samples - * @param consumed the number of samples of src which have been consumed are returned here - * @param src_size the number of unconsumed samples available - * @param dst_size the amount of space in samples available in dst - * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. - * @return the number of samples written in dst or -1 if an error occurred + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. */ -int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); +AVCodec *avcodec_find_decoder_by_name(const char *name); +int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); +void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); +int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); /** - * Compensate samplerate/timestamp drift. The compensation is done by changing - * the resampler parameters, so no audible clicks or similar distortions occur - * @param compensation_distance distance in output samples over which the compensation should be performed - * @param sample_delta number of output samples which should be output less - * - * example: av_resample_compensate(c, 10, 500) - * here instead of 510 samples only 500 samples would be output + * Return the amount of padding in pixels which the get_buffer callback must + * provide around the edge of the image for codecs which do not have the + * CODEC_FLAG_EMU_EDGE flag. * - * note, due to rounding the actual compensation might be slightly different, - * especially if the compensation_distance is large and the in_rate used during init is small + * @return Required padding in pixels. */ -void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); -void av_resample_close(struct AVResampleContext *c); +unsigned avcodec_get_edge_width(void); /** - * Allocate memory for a picture. Call avpicture_free() to free it. - * - * @see avpicture_fill() + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. * - * @param picture the picture to be filled in - * @param pix_fmt the format of the picture - * @param width the width of the picture - * @param height the height of the picture - * @return zero if successful, a negative value if not + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. */ -int avpicture_alloc(AVPicture *picture, enum PixelFormat pix_fmt, int width, int height); +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); /** - * Free a picture previously allocated by avpicture_alloc(). - * The data buffer used by the AVPicture is freed, but the AVPicture structure - * itself is not. + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. * - * @param picture the AVPicture to be freed + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. */ -void avpicture_free(AVPicture *picture); +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); +#if FF_API_OLD_DECODE_AUDIO /** - * Fill in the AVPicture fields. - * The fields of the given AVPicture are filled in by using the 'ptr' address - * which points to the image data buffer. Depending on the specified picture - * format, one or multiple image data pointers and line sizes will be set. - * If a planar format is specified, several pointers will be set pointing to - * the different picture planes and the line sizes of the different planes - * will be stored in the lines_sizes array. - * Call with ptr == NULL to get the required size for the ptr buffer. + * Wrapper function which calls avcodec_decode_audio4. * - * To allocate the buffer and fill in the AVPicture fields in one call, - * use avpicture_alloc(). + * @deprecated Use avcodec_decode_audio4 instead. * - * @param picture AVPicture whose fields are to be filled in - * @param ptr Buffer which will contain or contains the actual image data - * @param pix_fmt The format in which the picture data is stored. - * @param width the width of the image in pixels - * @param height the height of the image in pixels - * @return size of the image data in bytes - */ -int avpicture_fill(AVPicture *picture, uint8_t *ptr, - enum PixelFormat pix_fmt, int width, int height); - -/** - * Copy pixel data from an AVPicture into a buffer. - * The data is stored compactly, without any gaps for alignment or padding - * which may be applied by avpicture_fill(). + * Decode the audio frame of size avpkt->size from avpkt->data into samples. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. In this case, + * avcodec_decode_audio3 has to be called again with an AVPacket that contains + * the remaining data in order to decode the second frame etc. + * If no frame + * could be outputted, frame_size_ptr is zero. Otherwise, it is the + * decompressed frame size in bytes. * - * @see avpicture_get_size() + * @warning You must set frame_size_ptr to the allocated size of the + * output buffer before calling avcodec_decode_audio3(). * - * @param[in] src AVPicture containing image data - * @param[in] pix_fmt The format in which the picture data is stored. - * @param[in] width the width of the image in pixels. - * @param[in] height the height of the image in pixels. - * @param[out] dest A buffer into which picture data will be copied. - * @param[in] dest_size The size of 'dest'. - * @return The number of bytes written to dest, or a negative value (error code) on error. - */ -int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, int height, - unsigned char *dest, int dest_size); - -/** - * Calculate the size in bytes that a picture of the given width and height - * would occupy if stored in the given picture format. - * Note that this returns the size of a compact representation as generated - * by avpicture_layout(), which can be smaller than the size required for e.g. - * avpicture_fill(). + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. * - * @param pix_fmt the given picture format - * @param width the width of the image - * @param height the height of the image - * @return Image data size in bytes or -1 on error (e.g. too large dimensions). + * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @warning You must not provide a custom get_buffer() when using + * avcodec_decode_audio3(). Doing so will override it with + * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead, + * which does allow the application to provide a custom get_buffer(). + * + * @note You might have to align the input buffer avpkt->data and output buffer + * samples. The alignment requirements depend on the CPU: On some CPUs it isn't + * necessary at all, on others it won't work at all if not aligned and on others + * it will work but it will have an impact on performance. + * + * In practice, avpkt->data should have 4 byte alignment at minimum and + * samples should be 16 byte aligned unless the CPU doesn't need it + * (AltiVec and SSE do). + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] samples the output buffer, sample type in avctx->sample_fmt + * If the sample format is planar, each channel plane will + * be the same size, with no padding between channels. + * @param[in,out] frame_size_ptr the output buffer size in bytes + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields. + * All decoders are designed to use the least fields possible though. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame data was decompressed (used) from the input AVPacket. */ -int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height); -void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift); - -void avcodec_set_dimensions(AVCodecContext *s, int width, int height); +attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, + int *frame_size_ptr, + AVPacket *avpkt); +#endif /** - * Return a value representing the fourCC code associated to the - * pixel format pix_fmt, or 0 if no associated fourCC code can be - * found. + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame. In this case, + * avcodec_decode_audio4 has to be called again with an AVPacket containing + * the remaining data in order to decode the second frame, etc... + * Even if no frames are returned, the packet needs to be fed to the decoder + * with remaining data until it is completely consumed or an error occurs. + * + * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @note You might have to align the input buffer. The alignment requirements + * depend on the CPU and the decoder. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer(). The + * decoder may, however, only utilize part of the buffer by + * setting AVFrame.nb_samples to a smaller value in the + * output frame. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. */ -unsigned int avcodec_pix_fmt_to_codec_tag(enum PixelFormat pix_fmt); +int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, AVPacket *avpkt); /** - * Put a string representing the codec tag codec_tag in buf. + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note You might have to align the input buffer avpkt->data. + * The alignment requirements depend on the CPU: on some CPUs it isn't + * necessary at all, on others it won't work at all if not aligned and on others + * it will work but it will have an impact on performance. + * + * In practice, avpkt->data should have 4 byte alignment at minimum. + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use avcodec_alloc_frame to get an AVFrame, the codec will + * allocate memory for the actual bitmap. + * with default get/release_buffer(), the decoder frees/reuses the bitmap as it sees fit. + * with overridden get/release_buffer() (needs CODEC_CAP_DR1) the user decides into what buffer the decoder + * decodes and the decoder tells the user once it does not need the data anymore, + * the user app can at this point free/reuse/keep the memory as it sees fit. + * + * @param[in] avpkt The input AVpacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + */ +int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * @param avctx the codec context + * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be + freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + + /** + * Time difference in stream time base units from the pts of this + * packet to the point at which the output from the decoder has converged + * independent from the availability of previous frames. That is, the + * frames are virtually identical no matter if decoding started from + * the very first frame or from this keyframe. + * Is AV_NOPTS_VALUE if unknown. + * This field is not the display duration of the current frame. + * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY + * set. + * + * The purpose of this field is to allow seeking in streams that have no + * keyframes in the conventional sense. It corresponds to the + * recovery point SEI in H.264 and match_time_delta in NUT. It is also + * essential for some types of subtitle streams to ensure that all + * subtitles are correctly displayed after seeking. + */ + int64_t convergence_duration; + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +AVCodecParser *av_parser_next(AVCodecParser *c); + +void av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +int av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/* resample.c */ + +struct ReSampleContext; +struct AVResampleContext; + +typedef struct ReSampleContext ReSampleContext; + +/** + * Initialize audio resampling context. + * + * @param output_channels number of output channels + * @param input_channels number of input channels + * @param output_rate output sample rate + * @param input_rate input sample rate + * @param sample_fmt_out requested output sample format + * @param sample_fmt_in input sample format + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear if 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + * @return allocated ReSampleContext, NULL if error occurred + */ +ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, + int output_rate, int input_rate, + enum AVSampleFormat sample_fmt_out, + enum AVSampleFormat sample_fmt_in, + int filter_length, int log2_phase_count, + int linear, double cutoff); + +int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); + +/** + * Free resample context. + * + * @param s a non-NULL pointer to a resample context previously + * created with av_audio_resample_init() + */ +void audio_resample_close(ReSampleContext *s); + + +/** + * Initialize an audio resampler. + * Note, if either rate is not an integer then simply scale both rates up so they are. + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear If 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + */ +struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); + +/** + * Resample an array of samples using a previously configured context. + * @param src an array of unconsumed samples + * @param consumed the number of samples of src which have been consumed are returned here + * @param src_size the number of unconsumed samples available + * @param dst_size the amount of space in samples available in dst + * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. + * @return the number of samples written in dst or -1 if an error occurred + */ +int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); + + +/** + * Compensate samplerate/timestamp drift. The compensation is done by changing + * the resampler parameters, so no audible clicks or similar distortions occur + * @param compensation_distance distance in output samples over which the compensation should be performed + * @param sample_delta number of output samples which should be output less + * + * example: av_resample_compensate(c, 10, 500) + * here instead of 510 samples only 500 samples would be output + * + * note, due to rounding the actual compensation might be slightly different, + * especially if the compensation_distance is large and the in_rate used during init is small + */ +void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); +void av_resample_close(struct AVResampleContext *c); + +/** + * Allocate memory for a picture. Call avpicture_free() to free it. + * + * @see avpicture_fill() + * + * @param picture the picture to be filled in + * @param pix_fmt the format of the picture + * @param width the width of the picture + * @param height the height of the picture + * @return zero if successful, a negative value if not + */ +int avpicture_alloc(AVPicture *picture, enum PixelFormat pix_fmt, int width, int height); + +/** + * Free a picture previously allocated by avpicture_alloc(). + * The data buffer used by the AVPicture is freed, but the AVPicture structure + * itself is not. + * + * @param picture the AVPicture to be freed + */ +void avpicture_free(AVPicture *picture); + +/** + * Fill in the AVPicture fields. + * The fields of the given AVPicture are filled in by using the 'ptr' address + * which points to the image data buffer. Depending on the specified picture + * format, one or multiple image data pointers and line sizes will be set. + * If a planar format is specified, several pointers will be set pointing to + * the different picture planes and the line sizes of the different planes + * will be stored in the lines_sizes array. + * Call with ptr == NULL to get the required size for the ptr buffer. + * + * To allocate the buffer and fill in the AVPicture fields in one call, + * use avpicture_alloc(). + * + * @param picture AVPicture whose fields are to be filled in + * @param ptr Buffer which will contain or contains the actual image data + * @param pix_fmt The format in which the picture data is stored. + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @return size of the image data in bytes + */ +int avpicture_fill(AVPicture *picture, uint8_t *ptr, + enum PixelFormat pix_fmt, int width, int height); + +/** + * Copy pixel data from an AVPicture into a buffer. + * The data is stored compactly, without any gaps for alignment or padding + * which may be applied by avpicture_fill(). + * + * @see avpicture_get_size() + * + * @param[in] src AVPicture containing image data + * @param[in] pix_fmt The format in which the picture data is stored. + * @param[in] width the width of the image in pixels. + * @param[in] height the height of the image in pixels. + * @param[out] dest A buffer into which picture data will be copied. + * @param[in] dest_size The size of 'dest'. + * @return The number of bytes written to dest, or a negative value (error code) on error. + */ +int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, int height, + unsigned char *dest, int dest_size); + +/** + * Calculate the size in bytes that a picture of the given width and height + * would occupy if stored in the given picture format. + * Note that this returns the size of a compact representation as generated + * by avpicture_layout(), which can be smaller than the size required for e.g. + * avpicture_fill(). + * + * @param pix_fmt the given picture format + * @param width the width of the image + * @param height the height of the image + * @return Image data size in bytes or -1 on error (e.g. too large dimensions). + */ +int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height); +void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift); + +void avcodec_set_dimensions(AVCodecContext *s, int width, int height); + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int avcodec_pix_fmt_to_codec_tag(enum PixelFormat pix_fmt); + +/** + * Put a string representing the codec tag codec_tag in buf. * * @param buf_size size in bytes of buf * @return the length of the string that would have been generated if @@ -3558,250 +3984,40 @@ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelForma int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, enum PixelFormat pix_fmt, int width, int height); -/* external high level API */ - -/** - * Find a registered encoder with a matching codec ID. - * - * @param id CodecID of the requested encoder - * @return An encoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_encoder(enum CodecID id); - -/** - * Find a registered encoder with the specified name. - * - * @param name name of the requested encoder - * @return An encoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_encoder_by_name(const char *name); - -/** - * Find a registered decoder with a matching codec ID. - * - * @param id CodecID of the requested decoder - * @return A decoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_decoder(enum CodecID id); - -/** - * Find a registered decoder with the specified name. - * - * @param name name of the requested decoder - * @return A decoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_decoder_by_name(const char *name); -void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); - -/** - * Return a name for the specified profile, if available. - * - * @param codec the codec that is searched for the given profile - * @param profile the profile value for which a name is requested - * @return A name for the profile if found, NULL otherwise. - */ -const char *av_get_profile_name(const AVCodec *codec, int profile); - -int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); -void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); -int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); - -/** - * Return the amount of padding in pixels which the get_buffer callback must - * provide around the edge of the image for codecs which do not have the - * CODEC_FLAG_EMU_EDGE flag. - * - * @return Required padding in pixels. - */ -unsigned avcodec_get_edge_width(void); -/** - * Modify width and height values so that they will result in a memory - * buffer that is acceptable for the codec if you do not use any horizontal - * padding. - * - * May only be used if a codec with CODEC_CAP_DR1 has been opened. - * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased - * according to avcodec_get_edge_width() before. - */ -void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); -/** - * Modify width and height values so that they will result in a memory - * buffer that is acceptable for the codec if you also ensure that all - * line sizes are a multiple of the respective linesize_align[i]. - * - * May only be used if a codec with CODEC_CAP_DR1 has been opened. - * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased - * according to avcodec_get_edge_width() before. - */ -void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, - int linesize_align[AV_NUM_DATA_POINTERS]); - -enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); - -int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); -int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); -//FIXME func typedef - -#if FF_API_OLD_DECODE_AUDIO -/** - * Wrapper function which calls avcodec_decode_audio4. - * - * @deprecated Use avcodec_decode_audio4 instead. - * - * Decode the audio frame of size avpkt->size from avpkt->data into samples. - * Some decoders may support multiple frames in a single AVPacket, such - * decoders would then just decode the first frame. In this case, - * avcodec_decode_audio3 has to be called again with an AVPacket that contains - * the remaining data in order to decode the second frame etc. - * If no frame - * could be outputted, frame_size_ptr is zero. Otherwise, it is the - * decompressed frame size in bytes. - * - * @warning You must set frame_size_ptr to the allocated size of the - * output buffer before calling avcodec_decode_audio3(). - * - * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than - * the actual read bytes because some optimized bitstream readers read 32 or 64 - * bits at once and could read over the end. - * - * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that - * no overreading happens for damaged MPEG streams. - * - * @warning You must not provide a custom get_buffer() when using - * avcodec_decode_audio3(). Doing so will override it with - * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead, - * which does allow the application to provide a custom get_buffer(). - * - * @note You might have to align the input buffer avpkt->data and output buffer - * samples. The alignment requirements depend on the CPU: On some CPUs it isn't - * necessary at all, on others it won't work at all if not aligned and on others - * it will work but it will have an impact on performance. - * - * In practice, avpkt->data should have 4 byte alignment at minimum and - * samples should be 16 byte aligned unless the CPU doesn't need it - * (AltiVec and SSE do). - * - * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay - * between input and output, these need to be fed with avpkt->data=NULL, - * avpkt->size=0 at the end to return the remaining frames. - * - * @param avctx the codec context - * @param[out] samples the output buffer, sample type in avctx->sample_fmt - * If the sample format is planar, each channel plane will - * be the same size, with no padding between channels. - * @param[in,out] frame_size_ptr the output buffer size in bytes - * @param[in] avpkt The input AVPacket containing the input buffer. - * You can create such packet with av_init_packet() and by then setting - * data and size, some decoders might in addition need other fields. - * All decoders are designed to use the least fields possible though. - * @return On error a negative value is returned, otherwise the number of bytes - * used or zero if no frame data was decompressed (used) from the input AVPacket. - */ -attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, - int *frame_size_ptr, - AVPacket *avpkt); -#endif - -/** - * Decode the audio frame of size avpkt->size from avpkt->data into frame. - * - * Some decoders may support multiple frames in a single AVPacket. Such - * decoders would then just decode the first frame. In this case, - * avcodec_decode_audio4 has to be called again with an AVPacket containing - * the remaining data in order to decode the second frame, etc... - * Even if no frames are returned, the packet needs to be fed to the decoder - * with remaining data until it is completely consumed or an error occurs. - * - * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE - * larger than the actual read bytes because some optimized bitstream - * readers read 32 or 64 bits at once and could read over the end. - * - * @note You might have to align the input buffer. The alignment requirements - * depend on the CPU and the decoder. - * - * @param avctx the codec context - * @param[out] frame The AVFrame in which to store decoded audio samples. - * Decoders request a buffer of a particular size by setting - * AVFrame.nb_samples prior to calling get_buffer(). The - * decoder may, however, only utilize part of the buffer by - * setting AVFrame.nb_samples to a smaller value in the - * output frame. - * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is - * non-zero. - * @param[in] avpkt The input AVPacket containing the input buffer. - * At least avpkt->data and avpkt->size should be set. Some - * decoders might also require additional fields to be set. - * @return A negative error code is returned if an error occurred during - * decoding, otherwise the number of bytes consumed from the input - * AVPacket is returned. - */ -int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, - int *got_frame_ptr, AVPacket *avpkt); - -/** - * Decode the video frame of size avpkt->size from avpkt->data into picture. - * Some decoders may support multiple frames in a single AVPacket, such - * decoders would then just decode the first frame. - * - * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than - * the actual read bytes because some optimized bitstream readers read 32 or 64 - * bits at once and could read over the end. - * - * @warning The end of the input buffer buf should be set to 0 to ensure that - * no overreading happens for damaged MPEG streams. - * - * @note You might have to align the input buffer avpkt->data. - * The alignment requirements depend on the CPU: on some CPUs it isn't - * necessary at all, on others it won't work at all if not aligned and on others - * it will work but it will have an impact on performance. - * - * In practice, avpkt->data should have 4 byte alignment at minimum. - * - * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay - * between input and output, these need to be fed with avpkt->data=NULL, - * avpkt->size=0 at the end to return the remaining frames. +/* external high level API */ + +/** + * Find a registered encoder with a matching codec ID. * - * @param avctx the codec context - * @param[out] picture The AVFrame in which the decoded video frame will be stored. - * Use avcodec_alloc_frame to get an AVFrame, the codec will - * allocate memory for the actual bitmap. - * with default get/release_buffer(), the decoder frees/reuses the bitmap as it sees fit. - * with overridden get/release_buffer() (needs CODEC_CAP_DR1) the user decides into what buffer the decoder - * decodes and the decoder tells the user once it does not need the data anymore, - * the user app can at this point free/reuse/keep the memory as it sees fit. + * @param id CodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder(enum CodecID id); + +/** + * Find a registered encoder with the specified name. * - * @param[in] avpkt The input AVpacket containing the input buffer. - * You can create such packet with av_init_packet() and by then setting - * data and size, some decoders might in addition need other fields like - * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least - * fields possible. - * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. - * @return On error a negative value is returned, otherwise the number of bytes - * used or zero if no frame could be decompressed. + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. */ -int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, - int *got_picture_ptr, - AVPacket *avpkt); +AVCodec *avcodec_find_encoder_by_name(const char *name); + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); /** - * Decode a subtitle message. - * Return a negative value on error, otherwise return the number of bytes used. - * If no subtitle could be decompressed, got_sub_ptr is zero. - * Otherwise, the subtitle is stored in *sub. - * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for - * simplicity, because the performance difference is expect to be negligible - * and reusing a get_buffer written for video codecs would probably perform badly - * due to a potentially very different allocation pattern. + * Return a name for the specified profile, if available. * - * @param avctx the codec context - * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be - freed with avsubtitle_free if *got_sub_ptr is set. - * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. - * @param[in] avpkt The input AVPacket containing the input buffer. + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. */ -int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, - int *got_sub_ptr, - AVPacket *avpkt); +const char *av_get_profile_name(const AVCodec *codec, int profile); + +enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef #if FF_API_OLD_ENCODE_AUDIO /** @@ -3995,201 +4211,6 @@ int av_get_exact_bits_per_sample(enum CodecID codec_id); */ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); -/* frame parsing */ -typedef struct AVCodecParserContext { - void *priv_data; - struct AVCodecParser *parser; - int64_t frame_offset; /* offset of the current frame */ - int64_t cur_offset; /* current offset - (incremented by each av_parser_parse()) */ - int64_t next_frame_offset; /* offset of the next frame */ - /* video info */ - int pict_type; /* XXX: Put it back in AVCodecContext. */ - /** - * This field is used for proper frame duration computation in lavf. - * It signals, how much longer the frame duration of the current frame - * is compared to normal frame duration. - * - * frame_duration = (1 + repeat_pict) * time_base - * - * It is used by codecs like H.264 to display telecined material. - */ - int repeat_pict; /* XXX: Put it back in AVCodecContext. */ - int64_t pts; /* pts of the current frame */ - int64_t dts; /* dts of the current frame */ - - /* private data */ - int64_t last_pts; - int64_t last_dts; - int fetch_timestamp; - -#define AV_PARSER_PTS_NB 4 - int cur_frame_start_index; - int64_t cur_frame_offset[AV_PARSER_PTS_NB]; - int64_t cur_frame_pts[AV_PARSER_PTS_NB]; - int64_t cur_frame_dts[AV_PARSER_PTS_NB]; - - int flags; -#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 -#define PARSER_FLAG_ONCE 0x0002 -/// Set if the parser has a valid file offset -#define PARSER_FLAG_FETCHED_OFFSET 0x0004 - - int64_t offset; ///< byte offset from starting packet start - int64_t cur_frame_end[AV_PARSER_PTS_NB]; - - /** - * Set by parser to 1 for key frames and 0 for non-key frames. - * It is initialized to -1, so if the parser doesn't set this flag, - * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames - * will be used. - */ - int key_frame; - - /** - * Time difference in stream time base units from the pts of this - * packet to the point at which the output from the decoder has converged - * independent from the availability of previous frames. That is, the - * frames are virtually identical no matter if decoding started from - * the very first frame or from this keyframe. - * Is AV_NOPTS_VALUE if unknown. - * This field is not the display duration of the current frame. - * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY - * set. - * - * The purpose of this field is to allow seeking in streams that have no - * keyframes in the conventional sense. It corresponds to the - * recovery point SEI in H.264 and match_time_delta in NUT. It is also - * essential for some types of subtitle streams to ensure that all - * subtitles are correctly displayed after seeking. - */ - int64_t convergence_duration; - - // Timestamp generation support: - /** - * Synchronization point for start of timestamp generation. - * - * Set to >0 for sync point, 0 for no sync point and <0 for undefined - * (default). - * - * For example, this corresponds to presence of H.264 buffering period - * SEI message. - */ - int dts_sync_point; - - /** - * Offset of the current timestamp against last timestamp sync point in - * units of AVCodecContext.time_base. - * - * Set to INT_MIN when dts_sync_point unused. Otherwise, it must - * contain a valid timestamp offset. - * - * Note that the timestamp of sync point has usually a nonzero - * dts_ref_dts_delta, which refers to the previous sync point. Offset of - * the next frame after timestamp sync point will be usually 1. - * - * For example, this corresponds to H.264 cpb_removal_delay. - */ - int dts_ref_dts_delta; - - /** - * Presentation delay of current frame in units of AVCodecContext.time_base. - * - * Set to INT_MIN when dts_sync_point unused. Otherwise, it must - * contain valid non-negative timestamp delta (presentation time of a frame - * must not lie in the past). - * - * This delay represents the difference between decoding and presentation - * time of the frame. - * - * For example, this corresponds to H.264 dpb_output_delay. - */ - int pts_dts_delta; - - /** - * Position of the packet in file. - * - * Analogous to cur_frame_pts/dts - */ - int64_t cur_frame_pos[AV_PARSER_PTS_NB]; - - /** - * Byte position of currently parsed frame in stream. - */ - int64_t pos; - - /** - * Previous frame byte position. - */ - int64_t last_pos; - - /** - * Duration of the current frame. - * For audio, this is in units of 1 / AVCodecContext.sample_rate. - * For all other types, this is in units of AVCodecContext.time_base. - */ - int duration; -} AVCodecParserContext; - -typedef struct AVCodecParser { - int codec_ids[5]; /* several codec IDs are permitted */ - int priv_data_size; - int (*parser_init)(AVCodecParserContext *s); - int (*parser_parse)(AVCodecParserContext *s, - AVCodecContext *avctx, - const uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size); - void (*parser_close)(AVCodecParserContext *s); - int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); - struct AVCodecParser *next; -} AVCodecParser; - -AVCodecParser *av_parser_next(AVCodecParser *c); - -void av_register_codec_parser(AVCodecParser *parser); -AVCodecParserContext *av_parser_init(int codec_id); - -/** - * Parse a packet. - * - * @param s parser context. - * @param avctx codec context. - * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. - * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. - * @param buf input buffer. - * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). - * @param pts input presentation timestamp. - * @param dts input decoding timestamp. - * @param pos input byte position in stream. - * @return the number of bytes of the input bitstream used. - * - * Example: - * @code - * while(in_len){ - * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, - * in_data, in_len, - * pts, dts, pos); - * in_data += len; - * in_len -= len; - * - * if(size) - * decode_frame(data, size); - * } - * @endcode - */ -int av_parser_parse2(AVCodecParserContext *s, - AVCodecContext *avctx, - uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size, - int64_t pts, int64_t dts, - int64_t pos); - -int av_parser_change(AVCodecParserContext *s, - AVCodecContext *avctx, - uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size, int keyframe); -void av_parser_close(AVCodecParserContext *s); - typedef struct AVBitStreamFilterContext { void *priv_data; -- cgit v1.2.3 From f038515f0a58fd02528380d8cfec67e43f7e3dec Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 12:45:58 +0200 Subject: lavc doxy: add encoding functions to a doxy group. --- libavcodec/avcodec.h | 317 +++++++++++++++++++++++++++------------------------ 1 file changed, 166 insertions(+), 151 deletions(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index aa02cbba2f..25828e50fa 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -435,6 +435,7 @@ enum CodecID { #define FF_INPUT_BUFFER_PADDING_SIZE 8 /** + * @ingroup lavc_encoding * minimum encoding buffer size * Used to avoid some checks during header writing. */ @@ -442,6 +443,7 @@ enum CodecID { /** + * @ingroup lavc_encoding * motion estimation type. */ enum Motion_Est_ID { @@ -537,6 +539,9 @@ enum AVAudioServiceType { AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI }; +/** + * @ingroup lavc_encoding + */ typedef struct RcOverride{ int start_frame; int end_frame; @@ -3752,6 +3757,167 @@ void av_parser_close(AVCodecParserContext *s); * @} */ +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id CodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder(enum CodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder_by_name(const char *name); + +#if FF_API_OLD_ENCODE_AUDIO +/** + * Encode an audio frame from samples into buf. + * + * @deprecated Use avcodec_encode_audio2 instead. + * + * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. + * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user + * will know how much space is needed because it depends on the value passed + * in buf_size as described below. In that case a lower value can be used. + * + * @param avctx the codec context + * @param[out] buf the output buffer + * @param[in] buf_size the output buffer size + * @param[in] samples the input buffer containing the samples + * The number of samples read from this buffer is frame_size*channels, + * both of which are defined in avctx. + * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of + * samples read from samples is equal to: + * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id)) + * This also implies that av_get_bits_per_sample() must not return 0 for these + * codecs. + * @return On error a negative value is returned, on success zero or the number + * of bytes used to encode the data read from the input buffer. + */ +int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx, + uint8_t *buf, int buf_size, + const short *samples); +#endif + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * There are 2 codec capabilities that affect the allowed + * values of frame->nb_samples. + * If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final + * frame may be smaller than avctx->frame_size, and all other + * frames must be equal to avctx->frame_size. + * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If neither is set, frame->nb_samples must be equal to + * avctx->frame_size for all frames. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +#if FF_API_OLD_ENCODE_VIDEO +/** + * @deprecated use avcodec_encode_video2() instead. + * + * Encode a video frame from pict into buf. + * The input picture should be + * stored using a specific format, namely avctx.pix_fmt. + * + * @param avctx the codec context + * @param[out] buf the output buffer for the bitstream of encoded frame + * @param[in] buf_size the size of the output buffer in bytes + * @param[in] pict the input picture to encode + * @return On error a negative value is returned, on success zero or the number + * of bytes used from the output buffer. + */ +attribute_deprecated +int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVFrame *pict); +#endif + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + /* resample.c */ struct ReSampleContext; @@ -3986,22 +4152,6 @@ int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, /* external high level API */ -/** - * Find a registered encoder with a matching codec ID. - * - * @param id CodecID of the requested encoder - * @return An encoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_encoder(enum CodecID id); - -/** - * Find a registered encoder with the specified name. - * - * @param name name of the requested encoder - * @return An encoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_encoder_by_name(const char *name); - void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); /** @@ -4019,81 +4169,6 @@ int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, v int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); //FIXME func typedef -#if FF_API_OLD_ENCODE_AUDIO -/** - * Encode an audio frame from samples into buf. - * - * @deprecated Use avcodec_encode_audio2 instead. - * - * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. - * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user - * will know how much space is needed because it depends on the value passed - * in buf_size as described below. In that case a lower value can be used. - * - * @param avctx the codec context - * @param[out] buf the output buffer - * @param[in] buf_size the output buffer size - * @param[in] samples the input buffer containing the samples - * The number of samples read from this buffer is frame_size*channels, - * both of which are defined in avctx. - * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of - * samples read from samples is equal to: - * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id)) - * This also implies that av_get_bits_per_sample() must not return 0 for these - * codecs. - * @return On error a negative value is returned, on success zero or the number - * of bytes used to encode the data read from the input buffer. - */ -int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx, - uint8_t *buf, int buf_size, - const short *samples); -#endif - -/** - * Encode a frame of audio. - * - * Takes input samples from frame and writes the next output packet, if - * available, to avpkt. The output packet does not necessarily contain data for - * the most recent frame, as encoders can delay, split, and combine input frames - * internally as needed. - * - * @param avctx codec context - * @param avpkt output AVPacket. - * The user can supply an output buffer by setting - * avpkt->data and avpkt->size prior to calling the - * function, but if the size of the user-provided data is not - * large enough, encoding will fail. All other AVPacket fields - * will be reset by the encoder using av_init_packet(). If - * avpkt->data is NULL, the encoder will allocate it. - * The encoder will set avpkt->size to the size of the - * output packet. - * - * If this function fails or produces no output, avpkt will be - * freed using av_free_packet() (i.e. avpkt->destruct will be - * called to free the user supplied buffer). - * @param[in] frame AVFrame containing the raw audio data to be encoded. - * May be NULL when flushing an encoder that has the - * CODEC_CAP_DELAY capability set. - * There are 2 codec capabilities that affect the allowed - * values of frame->nb_samples. - * If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final - * frame may be smaller than avctx->frame_size, and all other - * frames must be equal to avctx->frame_size. - * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame - * can have any number of samples. - * If neither is set, frame->nb_samples must be equal to - * avctx->frame_size for all frames. - * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the - * output packet is non-empty, and to 0 if it is - * empty. If the function returns an error, the - * packet can be assumed to be invalid, and the - * value of got_packet_ptr is undefined and should - * not be used. - * @return 0 on success, negative error code on failure - */ -int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr); - /** * Fill audio frame data and linesize. * AVFrame extended_data channel pointers are allocated if necessary for @@ -4114,66 +4189,6 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, enum AVSampleFormat sample_fmt, const uint8_t *buf, int buf_size, int align); -#if FF_API_OLD_ENCODE_VIDEO -/** - * @deprecated use avcodec_encode_video2() instead. - * - * Encode a video frame from pict into buf. - * The input picture should be - * stored using a specific format, namely avctx.pix_fmt. - * - * @param avctx the codec context - * @param[out] buf the output buffer for the bitstream of encoded frame - * @param[in] buf_size the size of the output buffer in bytes - * @param[in] pict the input picture to encode - * @return On error a negative value is returned, on success zero or the number - * of bytes used from the output buffer. - */ -attribute_deprecated -int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, - const AVFrame *pict); -#endif - -/** - * Encode a frame of video. - * - * Takes input raw video data from frame and writes the next output packet, if - * available, to avpkt. The output packet does not necessarily contain data for - * the most recent frame, as encoders can delay and reorder input frames - * internally as needed. - * - * @param avctx codec context - * @param avpkt output AVPacket. - * The user can supply an output buffer by setting - * avpkt->data and avpkt->size prior to calling the - * function, but if the size of the user-provided data is not - * large enough, encoding will fail. All other AVPacket fields - * will be reset by the encoder using av_init_packet(). If - * avpkt->data is NULL, the encoder will allocate it. - * The encoder will set avpkt->size to the size of the - * output packet. The returned data (if any) belongs to the - * caller, he is responsible for freeing it. - * - * If this function fails or produces no output, avpkt will be - * freed using av_free_packet() (i.e. avpkt->destruct will be - * called to free the user supplied buffer). - * @param[in] frame AVFrame containing the raw video data to be encoded. - * May be NULL when flushing an encoder that has the - * CODEC_CAP_DELAY capability set. - * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the - * output packet is non-empty, and to 0 if it is - * empty. If the function returns an error, the - * packet can be assumed to be invalid, and the - * value of got_packet_ptr is undefined and should - * not be used. - * @return 0 on success, negative error code on failure - */ -int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr); - -int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, - const AVSubtitle *sub); - /** * Flush buffers, should be called when seeking or when switching to a different stream. */ -- cgit v1.2.3 From 40ca0e6a7593f22419a7c258fe7f3f32945d8503 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 13:05:53 +0200 Subject: lavc doxy: replace \ with / It's the more proper symbol to use and it prevents doxygen from thinking it's a command. --- libavcodec/avcodec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 25828e50fa..55d62a1343 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -978,7 +978,7 @@ typedef struct AVFrame { uint8_t *base[AV_NUM_DATA_POINTERS]; /** - * sample aspect ratio for the video frame, 0/1 if unknown\unspecified + * sample aspect ratio for the video frame, 0/1 if unknown/unspecified * - encoding: unused * - decoding: Read by user. */ -- cgit v1.2.3 From 56512ce104e7f39409fee55fbf14085d5ab9c800 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 13:22:19 +0200 Subject: lavc doxy: add resampling functions to a doxy group. --- libavcodec/avcodec.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 55d62a1343..9cecf0c922 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -3918,8 +3918,12 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, * @} */ -/* resample.c */ - +/** + * @defgroup lavc_resample Audio resampling + * @ingroup libavc + * + * @{ + */ struct ReSampleContext; struct AVResampleContext; @@ -3997,6 +4001,10 @@ int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consum void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); void av_resample_close(struct AVResampleContext *c); +/** + * @} + */ + /** * Allocate memory for a picture. Call avpicture_free() to free it. * -- cgit v1.2.3 From 6aadfbda09df9728e0957c7ca17b9ecadf276f56 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 13:22:19 +0200 Subject: lavc doxy: add AVPicture functions to a doxy group. --- libavcodec/avcodec.h | 67 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 23 deletions(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 9cecf0c922..a2e27dc973 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -3005,6 +3005,13 @@ typedef struct AVHWAccel { int priv_data_size; } AVHWAccel; +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + /** * four components are given, that's all. * the last component is alpha @@ -3014,6 +3021,10 @@ typedef struct AVPicture { int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line } AVPicture; +/** + * @} + */ + #define AVPALETTE_SIZE 1024 #define AVPALETTE_COUNT 256 @@ -4005,6 +4016,11 @@ void av_resample_close(struct AVResampleContext *c); * @} */ +/** + * @addtogroup lavc_picture + * @{ + */ + /** * Allocate memory for a picture. Call avpicture_free() to free it. * @@ -4081,6 +4097,34 @@ int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, * @return Image data size in bytes or -1 on error (e.g. too large dimensions). */ int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height); + +/** + * deinterlace - if not supported return -1 + */ +int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, + enum PixelFormat pix_fmt, int width, int height); +/** + * Copy image src to dst. Wraps av_picture_data_copy() above. + */ +void av_picture_copy(AVPicture *dst, const AVPicture *src, + enum PixelFormat pix_fmt, int width, int height); + +/** + * Crop image top and left side. + */ +int av_picture_crop(AVPicture *dst, const AVPicture *src, + enum PixelFormat pix_fmt, int top_band, int left_band); + +/** + * Pad image. + */ +int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ + void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift); void avcodec_set_dimensions(AVCodecContext *s, int width, int height); @@ -4153,11 +4197,6 @@ int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); -/* deinterlace a picture */ -/* deinterlace - if not supported return -1 */ -int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, - enum PixelFormat pix_fmt, int width, int height); - /* external high level API */ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); @@ -4296,24 +4335,6 @@ void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); */ void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); -/** - * Copy image src to dst. Wraps av_picture_data_copy() above. - */ -void av_picture_copy(AVPicture *dst, const AVPicture *src, - enum PixelFormat pix_fmt, int width, int height); - -/** - * Crop image top and left side. - */ -int av_picture_crop(AVPicture *dst, const AVPicture *src, - enum PixelFormat pix_fmt, int top_band, int left_band); - -/** - * Pad image. - */ -int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt, - int padtop, int padbottom, int padleft, int padright, int *color); - /** * Encode extradata length to a buffer. Used by xiph codecs. * -- cgit v1.2.3 From 8a74029ea2e9cf058b184dd33e6c49e54574a9e3 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 13:35:50 +0200 Subject: lavc doxy: add remaining avcodec.h functions to a misc doxygen group. --- libavcodec/avcodec.h | 52 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 16 deletions(-) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index a2e27dc973..92a5d0a173 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -4125,9 +4125,23 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, * @} */ -void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift); +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ -void avcodec_set_dimensions(AVCodecContext *s, int width, int height); +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift); /** * Return a value representing the fourCC code associated to the @@ -4136,15 +4150,6 @@ void avcodec_set_dimensions(AVCodecContext *s, int width, int height); */ unsigned int avcodec_pix_fmt_to_codec_tag(enum PixelFormat pix_fmt); -/** - * Put a string representing the codec tag codec_tag in buf. - * - * @param buf_size size in bytes of buf - * @return the length of the string that would have been generated if - * enough space had been available, excluding the trailing null - */ -size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); - #define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ #define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ #define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ @@ -4197,7 +4202,22 @@ int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); -/* external high level API */ +enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); + +/** + * @} + */ + +void avcodec_set_dimensions(AVCodecContext *s, int width, int height); + +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf_size size in bytes of buf + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + */ +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); @@ -4210,8 +4230,6 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); */ const char *av_get_profile_name(const AVCodec *codec, int profile); -enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); - int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); //FIXME func typedef @@ -4243,8 +4261,6 @@ void avcodec_flush_buffers(AVCodecContext *avctx); void avcodec_default_free_buffers(AVCodecContext *s); -/* misc useful functions */ - /** * Return codec bits per sample. * @@ -4427,4 +4443,8 @@ int av_codec_is_encoder(AVCodec *codec); */ int av_codec_is_decoder(AVCodec *codec); +/** + * @} + */ + #endif /* AVCODEC_AVCODEC_H */ -- cgit v1.2.3 From ec57b7de745c851ad7afa93cee659688a4ddd600 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 13:49:15 +0200 Subject: lavc doxy: add avfft to the main lavc group. --- libavcodec/avfft.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/libavcodec/avfft.h b/libavcodec/avfft.h index 91fe2f4297..b89618258e 100644 --- a/libavcodec/avfft.h +++ b/libavcodec/avfft.h @@ -19,6 +19,19 @@ #ifndef AVCODEC_AVFFT_H #define AVCODEC_AVFFT_H +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + typedef float FFTSample; typedef struct FFTComplex { @@ -96,4 +109,8 @@ DCTContext *av_dct_init(int nbits, enum DCTTransformType type); void av_dct_calc(DCTContext *s, FFTSample *data); void av_dct_end (DCTContext *s); +/** + * @} + */ + #endif /* AVCODEC_AVFFT_H */ -- cgit v1.2.3 From 7c59b5c2a5a19638c6c4411fb96fcd50066905c6 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 8 Apr 2012 14:08:05 +0200 Subject: lavc doxy: add all installed headers to doxy groups. --- libavcodec/dxva2.h | 17 +++++++++++++++++ libavcodec/vaapi.h | 10 ++++++++-- libavcodec/vda.h | 17 +++++++++++++++++ libavcodec/vdpau.h | 12 +++++++++--- libavcodec/version.h | 6 ++++++ libavcodec/xvmc.h | 17 +++++++++++++++++ 6 files changed, 74 insertions(+), 5 deletions(-) diff --git a/libavcodec/dxva2.h b/libavcodec/dxva2.h index 374ae039ac..c06f1f3332 100644 --- a/libavcodec/dxva2.h +++ b/libavcodec/dxva2.h @@ -23,11 +23,24 @@ #ifndef AVCODEC_DXVA_H #define AVCODEC_DXVA_H +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + #include #include #include +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + #define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards /** @@ -68,4 +81,8 @@ struct dxva_context { unsigned report_id; }; +/** + * @} + */ + #endif /* AVCODEC_DXVA_H */ diff --git a/libavcodec/vaapi.h b/libavcodec/vaapi.h index 36fb386acf..39e88259d6 100644 --- a/libavcodec/vaapi.h +++ b/libavcodec/vaapi.h @@ -24,11 +24,17 @@ #ifndef AVCODEC_VAAPI_H #define AVCODEC_VAAPI_H +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + #include /** - * @defgroup VAAPI_Decoding VA API Decoding - * @ingroup Decoder + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel * @{ */ diff --git a/libavcodec/vda.h b/libavcodec/vda.h index 2cb51c5f53..79fbfe86ac 100644 --- a/libavcodec/vda.h +++ b/libavcodec/vda.h @@ -23,6 +23,12 @@ #ifndef AVCODEC_VDA_H #define AVCODEC_VDA_H +/** + * @file + * @ingroup lavc_codec_hwaccel_vda + * Public libavcodec VDA header. + */ + #include #include @@ -34,6 +40,13 @@ #include #undef Picture +/** + * @defgroup lavc_codec_hwaccel_vda VDA + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + /** * This structure is used to store a decoded frame information and data. */ @@ -141,4 +154,8 @@ vda_frame *ff_vda_queue_pop(struct vda_context *vda_ctx); /** Release the given frame. */ void ff_vda_release_vda_frame(vda_frame *frame); +/** + * @} + */ + #endif /* AVCODEC_VDA_H */ diff --git a/libavcodec/vdpau.h b/libavcodec/vdpau.h index 6f1386067b..241ff19051 100644 --- a/libavcodec/vdpau.h +++ b/libavcodec/vdpau.h @@ -25,7 +25,15 @@ #define AVCODEC_VDPAU_H /** - * @defgroup Decoder VDPAU Decoder and Renderer + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel * * VDPAU hardware acceleration has two modules * - VDPAU decoding @@ -38,8 +46,6 @@ * and rendering (API calls) are done as part of the VDPAU * presentation (vo_vdpau.c) module. * - * @defgroup VDPAU_Decoding VDPAU Decoding - * @ingroup Decoder * @{ */ diff --git a/libavcodec/version.h b/libavcodec/version.h index 5719e7f564..c35fce40e3 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -20,6 +20,12 @@ #ifndef AVCODEC_VERSION_H #define AVCODEC_VERSION_H +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + #define LIBAVCODEC_VERSION_MAJOR 54 #define LIBAVCODEC_VERSION_MINOR 11 #define LIBAVCODEC_VERSION_MICRO 1 diff --git a/libavcodec/xvmc.h b/libavcodec/xvmc.h index 1239015fcd..cdec161c80 100644 --- a/libavcodec/xvmc.h +++ b/libavcodec/xvmc.h @@ -21,10 +21,23 @@ #ifndef AVCODEC_XVMC_H #define AVCODEC_XVMC_H +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + #include #include "avcodec.h" +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + #define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct the number is 1337 speak for the letters IDCT MCo (motion compensation) */ @@ -148,4 +161,8 @@ struct xvmc_pix_fmt { int next_free_data_block_num; }; +/** + * @} + */ + #endif /* AVCODEC_XVMC_H */ -- cgit v1.2.3 From 90f65dc6cf65d1c849a9fa372ac2dda427a258e2 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Mon, 9 Apr 2012 06:05:50 +0200 Subject: vf_scale: support named constants for sws flags. --- libavfilter/vf_scale.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c index a4da088936..85095b78d7 100644 --- a/libavfilter/vf_scale.c +++ b/libavfilter/vf_scale.c @@ -27,6 +27,7 @@ #include "libavutil/avstring.h" #include "libavutil/eval.h" #include "libavutil/mathematics.h" +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libswscale/swscale.h" @@ -91,7 +92,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) if (args) { sscanf(args, "%255[^:]:%255[^:]", scale->w_expr, scale->h_expr); p = strstr(args,"flags="); - if (p) scale->flags = strtoul(p+6, NULL, 0); + if (p) { + const AVClass *class = sws_get_class(); + const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0, + AV_OPT_SEARCH_FAKE_OBJ); + int ret = av_opt_eval_flags(&class, o, p + 6, &scale->flags); + + if (ret < 0) + return ret; + } } return 0; -- cgit v1.2.3 From e36b25d1df2ab30540c2d8939c5f2b0d6c059317 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 1 Apr 2012 18:51:27 +0200 Subject: vf_overlay: implement poll_frame() Signal that it can output a frame when there are frames on the main input and EOF on the overlay input, but a frame is buffered -- e.g. single picture overlay. --- libavfilter/vf_overlay.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 2115141b0e..e8171e0069 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -345,6 +345,18 @@ static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { static void null_end_frame(AVFilterLink *inlink) { } +static int poll_frame(AVFilterLink *link) +{ + AVFilterContext *s = link->src; + OverlayContext *over = s->priv; + int ret = avfilter_poll_frame(s->inputs[OVERLAY]); + + if (ret == AVERROR_EOF) + ret = !!over->overpicref; + + return ret && avfilter_poll_frame(s->inputs[MAIN]); +} + AVFilter avfilter_vf_overlay = { .name = "overlay", .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."), @@ -376,6 +388,7 @@ AVFilter avfilter_vf_overlay = { { .name = NULL}}, .outputs = (AVFilterPad[]) {{ .name = "default", .type = AVMEDIA_TYPE_VIDEO, - .config_props = config_output, }, + .config_props = config_output, + .poll_frame = poll_frame }, { .name = NULL}}, }; -- cgit v1.2.3 From 14f063d294a18a31928d2167a66b1087910e14c8 Mon Sep 17 00:00:00 2001 From: Martin Storsjö Date: Sun, 8 Apr 2012 17:38:45 +0300 Subject: mem: Consistently return NULL for av_malloc(0) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plain POSIX malloc(0) is allowed to return either NULL or a non-NULL pointer. The calling code should be ready to handle a NULL return as a correct return (instead of a failure) if the size to allocate was 0 - this makes sure the condition is handled in a consistent way across platforms. This also avoids calling posix_memalign(&ptr, 32, 0) on OS X, which returns an invalid pointer (a non-NULL pointer that causes crashes when passed to av_free). Abort in debug mode, to help track down issues related to incorrect handling of this case. Signed-off-by: Martin Storsjö --- libavutil/mem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libavutil/mem.c b/libavutil/mem.c index b6230cf0e3..bf1a542db8 100644 --- a/libavutil/mem.c +++ b/libavutil/mem.c @@ -68,8 +68,10 @@ void *av_malloc(size_t size) long diff; #endif + assert(size); + /* let's disallow possible ambiguous cases */ - if(size > (INT_MAX-32) ) + if (size > (INT_MAX-32) || !size) return NULL; #if CONFIG_MEMALIGN_HACK -- cgit v1.2.3 From a40ba3afe8182c86ed6fc80437c843b3d88e0d00 Mon Sep 17 00:00:00 2001 From: Sebastien Zwickert Date: Tue, 10 Apr 2012 13:48:08 +0200 Subject: vda: Signal 4 byte NAL headers to the decoder regardless of what's in the extradata MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Martin Storsjö --- libavcodec/vda.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/libavcodec/vda.c b/libavcodec/vda.c index 34739f8e0c..3c03dcd3e0 100644 --- a/libavcodec/vda.c +++ b/libavcodec/vda.c @@ -142,6 +142,26 @@ int ff_vda_create_decoder(struct vda_context *vda_ctx, pthread_mutex_init(&vda_ctx->queue_mutex, NULL); + /* Each VCL NAL in the bistream sent to the decoder + * is preceeded by a 4 bytes length header. + * Change the avcC atom header if needed, to signal headers of 4 bytes. */ + if (extradata_size >= 4 && (extradata[4] & 0x03) != 0x03) { + uint8_t *rw_extradata; + + if (!(rw_extradata = av_malloc(extradata_size))) + return AVERROR(ENOMEM); + + memcpy(rw_extradata, extradata, extradata_size); + + rw_extradata[4] |= 0x03; + + avc_data = CFDataCreate(kCFAllocatorDefault, rw_extradata, extradata_size); + + av_freep(&rw_extradata); + } else { + avc_data = CFDataCreate(kCFAllocatorDefault, extradata, extradata_size); + } + config_info = CFDictionaryCreateMutable(kCFAllocatorDefault, 4, &kCFTypeDictionaryKeyCallBacks, @@ -150,7 +170,6 @@ int ff_vda_create_decoder(struct vda_context *vda_ctx, height = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->height); width = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->width); format = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vda_ctx->format); - avc_data = CFDataCreate(kCFAllocatorDefault, extradata, extradata_size); CFDictionarySetValue(config_info, kVDADecoderConfiguration_Height, height); CFDictionarySetValue(config_info, kVDADecoderConfiguration_Width, width); -- cgit v1.2.3 From a559d65c0711284fc63689991f69db01bcfbb140 Mon Sep 17 00:00:00 2001 From: Asen Lekov Date: Fri, 2 Dec 2011 16:14:05 +0000 Subject: nutdec: K&R formatting cosmetics Signed-off-by: Diego Biurrun --- libavformat/nutdec.c | 936 +++++++++++++++++++++++++++------------------------ 1 file changed, 500 insertions(+), 436 deletions(-) diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c index a2104f181e..32db763a67 100644 --- a/libavformat/nutdec.c +++ b/libavformat/nutdec.c @@ -33,79 +33,97 @@ #define NUT_MAX_STREAMS 256 /* arbitrary sanity check value */ -static int get_str(AVIOContext *bc, char *string, unsigned int maxlen){ - unsigned int len= ffio_read_varlen(bc); +static int get_str(AVIOContext *bc, char *string, unsigned int maxlen) +{ + unsigned int len = ffio_read_varlen(bc); - if(len && maxlen) + if (len && maxlen) avio_read(bc, string, FFMIN(len, maxlen)); - while(len > maxlen){ + while (len > maxlen) { avio_r8(bc); len--; } - if(maxlen) - string[FFMIN(len, maxlen-1)]= 0; + if (maxlen) + string[FFMIN(len, maxlen - 1)] = 0; - if(maxlen == len) + if (maxlen == len) return -1; else return 0; } -static int64_t get_s(AVIOContext *bc){ +static int64_t get_s(AVIOContext *bc) +{ int64_t v = ffio_read_varlen(bc) + 1; - if (v&1) return -(v>>1); - else return (v>>1); + if (v & 1) + return -(v >> 1); + else + return (v >> 1); } -static uint64_t get_fourcc(AVIOContext *bc){ - unsigned int len= ffio_read_varlen(bc); +static uint64_t get_fourcc(AVIOContext *bc) +{ + unsigned int len = ffio_read_varlen(bc); - if (len==2) return avio_rl16(bc); - else if(len==4) return avio_rl32(bc); - else return -1; + if (len == 2) + return avio_rl16(bc); + else if (len == 4) + return avio_rl32(bc); + else + return -1; } #ifdef TRACE -static inline uint64_t get_v_trace(AVIOContext *bc, char *file, char *func, int line){ - uint64_t v= ffio_read_varlen(bc); +static inline uint64_t get_v_trace(AVIOContext *bc, char *file, + char *func, int line) +{ + uint64_t v = ffio_read_varlen(bc); - av_log(NULL, AV_LOG_DEBUG, "get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line); + av_log(NULL, AV_LOG_DEBUG, "get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", + v, v, file, func, line); return v; } -static inline int64_t get_s_trace(AVIOContext *bc, char *file, char *func, int line){ - int64_t v= get_s(bc); +static inline int64_t get_s_trace(AVIOContext *bc, char *file, + char *func, int line) +{ + int64_t v = get_s(bc); - av_log(NULL, AV_LOG_DEBUG, "get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line); + av_log(NULL, AV_LOG_DEBUG, "get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", + v, v, file, func, line); return v; } -static inline uint64_t get_vb_trace(AVIOContext *bc, char *file, char *func, int line){ - uint64_t v= get_vb(bc); +static inline uint64_t get_vb_trace(AVIOContext *bc, char *file, + char *func, int line) +{ + uint64_t v = get_vb(bc); - av_log(NULL, AV_LOG_DEBUG, "get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line); + av_log(NULL, AV_LOG_DEBUG, "get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", + v, v, file, func, line); return v; } -#define ffio_read_varlen(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) -#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) -#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) +#define ffio_read_varlen(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) +#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) +#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) #endif -static int get_packetheader(NUTContext *nut, AVIOContext *bc, int calculate_checksum, uint64_t startcode) +static int get_packetheader(NUTContext *nut, AVIOContext *bc, + int calculate_checksum, uint64_t startcode) { int64_t size; -// start= avio_tell(bc) - 8; +// start = avio_tell(bc) - 8; - startcode= av_be2ne64(startcode); - startcode= ff_crc04C11DB7_update(0, (uint8_t*)&startcode, 8); + startcode = av_be2ne64(startcode); + startcode = ff_crc04C11DB7_update(0, (uint8_t*) &startcode, 8); ffio_init_checksum(bc, ff_crc04C11DB7_update, startcode); - size= ffio_read_varlen(bc); - if(size > 4096) + size = ffio_read_varlen(bc); + if (size > 4096) avio_rb32(bc); - if(ffio_get_checksum(bc) && size > 4096) + if (ffio_get_checksum(bc) && size > 4096) return -1; ffio_init_checksum(bc, calculate_checksum ? ff_crc04C11DB7_update : NULL, 0); @@ -113,17 +131,19 @@ static int get_packetheader(NUTContext *nut, AVIOContext *bc, int calculate_chec return size; } -static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos){ - uint64_t state=0; - - if(pos >= 0) - avio_seek(bc, pos, SEEK_SET); //note, this may fail if the stream is not seekable, but that should not matter, as in this case we simply start where we currently are - - while(!bc->eof_reached){ - state= (state<<8) | avio_r8(bc); - if((state>>56) != 'N') +static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos) +{ + uint64_t state = 0; + + if (pos >= 0) + /* Note, this may fail if the stream is not seekable, but that should + * not matter, as in this case we simply start where we currently are */ + avio_seek(bc, pos, SEEK_SET); + while (!bc->eof_reached) { + state = (state << 8) | avio_r8(bc); + if ((state >> 56) != 'N') continue; - switch(state){ + switch (state) { case MAIN_STARTCODE: case STREAM_STARTCODE: case SYNCPOINT_STARTCODE: @@ -142,20 +162,22 @@ static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos){ * @param pos the start position of the search, or -1 if the current position * @return the position of the startcode or -1 if not found */ -static int64_t find_startcode(AVIOContext *bc, uint64_t code, int64_t pos){ - for(;;){ - uint64_t startcode= find_any_startcode(bc, pos); - if(startcode == code) +static int64_t find_startcode(AVIOContext *bc, uint64_t code, int64_t pos) +{ + for (;;) { + uint64_t startcode = find_any_startcode(bc, pos); + if (startcode == code) return avio_tell(bc) - 8; - else if(startcode == 0) + else if (startcode == 0) return -1; - pos=-1; + pos = -1; } } -static int nut_probe(AVProbeData *p){ +static int nut_probe(AVProbeData *p) +{ int i; - uint64_t code= 0; + uint64_t code = 0; for (i = 0; i < p->buf_size; i++) { code = (code << 8) | p->buf[i]; @@ -165,225 +187,248 @@ static int nut_probe(AVProbeData *p){ return 0; } -#define GET_V(dst, check) \ - tmp= ffio_read_varlen(bc);\ - if(!(check)){\ - av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp);\ - return -1;\ - }\ - dst= tmp; +#define GET_V(dst, check) \ + tmp = ffio_read_varlen(bc); \ + if (!(check)) { \ + av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp); \ + return -1; \ + } \ + dst = tmp; -static int skip_reserved(AVIOContext *bc, int64_t pos){ +static int skip_reserved(AVIOContext *bc, int64_t pos) +{ pos -= avio_tell(bc); - if(pos<0){ + if (pos < 0) { avio_seek(bc, pos, SEEK_CUR); return -1; - }else{ - while(pos--) + } else { + while (pos--) avio_r8(bc); return 0; } } -static int decode_main_header(NUTContext *nut){ - AVFormatContext *s= nut->avf; - AVIOContext *bc = s->pb; +static int decode_main_header(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; uint64_t tmp, end; unsigned int stream_count; - int i, j, tmp_stream, tmp_mul, tmp_pts, tmp_size, count, tmp_res, tmp_head_idx; + int i, j, count; + int tmp_stream, tmp_mul, tmp_pts, tmp_size, tmp_res, tmp_head_idx; - end= get_packetheader(nut, bc, 1, MAIN_STARTCODE); + end = get_packetheader(nut, bc, 1, MAIN_STARTCODE); end += avio_tell(bc); - GET_V(tmp , tmp >=2 && tmp <= 3) - GET_V(stream_count , tmp > 0 && tmp <= NUT_MAX_STREAMS) + GET_V(tmp, tmp >= 2 && tmp <= 3) + GET_V(stream_count, tmp > 0 && tmp <= NUT_MAX_STREAMS) nut->max_distance = ffio_read_varlen(bc); - if(nut->max_distance > 65536){ + if (nut->max_distance > 65536) { av_log(s, AV_LOG_DEBUG, "max_distance %d\n", nut->max_distance); - nut->max_distance= 65536; + nut->max_distance = 65536; } - GET_V(nut->time_base_count, tmp>0 && tmptime_base= av_malloc(nut->time_base_count * sizeof(AVRational)); + GET_V(nut->time_base_count, tmp > 0 && tmp < INT_MAX / sizeof(AVRational)) + nut->time_base = av_malloc(nut->time_base_count * sizeof(AVRational)); - for(i=0; itime_base_count; i++){ - GET_V(nut->time_base[i].num, tmp>0 && tmp<(1ULL<<31)) - GET_V(nut->time_base[i].den, tmp>0 && tmp<(1ULL<<31)) - if(av_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1){ + for (i = 0; i < nut->time_base_count; i++) { + GET_V(nut->time_base[i].num, tmp > 0 && tmp < (1ULL << 31)) + GET_V(nut->time_base[i].den, tmp > 0 && tmp < (1ULL << 31)) + if (av_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1) { av_log(s, AV_LOG_ERROR, "time base invalid\n"); return AVERROR_INVALIDDATA; } } - tmp_pts=0; - tmp_mul=1; - tmp_stream=0; - tmp_head_idx= 0; - for(i=0; i<256;){ - int tmp_flags = ffio_read_varlen(bc); - int tmp_fields= ffio_read_varlen(bc); - if(tmp_fields>0) tmp_pts = get_s(bc); - if(tmp_fields>1) tmp_mul = ffio_read_varlen(bc); - if(tmp_fields>2) tmp_stream= ffio_read_varlen(bc); - if(tmp_fields>3) tmp_size = ffio_read_varlen(bc); - else tmp_size = 0; - if(tmp_fields>4) tmp_res = ffio_read_varlen(bc); - else tmp_res = 0; - if(tmp_fields>5) count = ffio_read_varlen(bc); - else count = tmp_mul - tmp_size; - if(tmp_fields>6) get_s(bc); - if(tmp_fields>7) tmp_head_idx= ffio_read_varlen(bc); - - while(tmp_fields-- > 8) - ffio_read_varlen(bc); - - if(count == 0 || i+count > 256){ + tmp_pts = 0; + tmp_mul = 1; + tmp_stream = 0; + tmp_head_idx = 0; + for (i = 0; i < 256;) { + int tmp_flags = ffio_read_varlen(bc); + int tmp_fields = ffio_read_varlen(bc); + + if (tmp_fields > 0) + tmp_pts = get_s(bc); + if (tmp_fields > 1) + tmp_mul = ffio_read_varlen(bc); + if (tmp_fields > 2) + tmp_stream = ffio_read_varlen(bc); + if (tmp_fields > 3) + tmp_size = ffio_read_varlen(bc); + else + tmp_size = 0; + if (tmp_fields > 4) + tmp_res = ffio_read_varlen(bc); + else + tmp_res = 0; + if (tmp_fields > 5) + count = ffio_read_varlen(bc); + else + count = tmp_mul - tmp_size; + if (tmp_fields > 6) + get_s(bc); + if (tmp_fields > 7) + tmp_head_idx = ffio_read_varlen(bc); + + while (tmp_fields-- > 8) + ffio_read_varlen(bc); + + if (count == 0 || i + count > 256) { av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i); return AVERROR_INVALIDDATA; } - if(tmp_stream >= stream_count){ + if (tmp_stream >= stream_count) { av_log(s, AV_LOG_ERROR, "illegal stream number\n"); return AVERROR_INVALIDDATA; } - for(j=0; jframe_code[i].flags= FLAG_INVALID; + nut->frame_code[i].flags = FLAG_INVALID; j--; continue; } - nut->frame_code[i].flags = tmp_flags ; - nut->frame_code[i].pts_delta = tmp_pts ; - nut->frame_code[i].stream_id = tmp_stream; - nut->frame_code[i].size_mul = tmp_mul ; - nut->frame_code[i].size_lsb = tmp_size+j; - nut->frame_code[i].reserved_count = tmp_res ; - nut->frame_code[i].header_idx = tmp_head_idx; + nut->frame_code[i].flags = tmp_flags; + nut->frame_code[i].pts_delta = tmp_pts; + nut->frame_code[i].stream_id = tmp_stream; + nut->frame_code[i].size_mul = tmp_mul; + nut->frame_code[i].size_lsb = tmp_size + j; + nut->frame_code[i].reserved_count = tmp_res; + nut->frame_code[i].header_idx = tmp_head_idx; } } assert(nut->frame_code['N'].flags == FLAG_INVALID); - if(end > avio_tell(bc) + 4){ - int rem= 1024; - GET_V(nut->header_count, tmp<128U) + if (end > avio_tell(bc) + 4) { + int rem = 1024; + GET_V(nut->header_count, tmp < 128U) nut->header_count++; - for(i=1; iheader_count; i++){ - GET_V(nut->header_len[i], tmp>0 && tmp<256); + for (i = 1; i < nut->header_count; i++) { + GET_V(nut->header_len[i], tmp > 0 && tmp < 256); rem -= nut->header_len[i]; - if(rem < 0){ + if (rem < 0) { av_log(s, AV_LOG_ERROR, "invalid elision header\n"); return AVERROR_INVALIDDATA; } - nut->header[i]= av_malloc(nut->header_len[i]); + nut->header[i] = av_malloc(nut->header_len[i]); avio_read(bc, nut->header[i], nut->header_len[i]); } - assert(nut->header_len[0]==0); + assert(nut->header_len[0] == 0); } - if(skip_reserved(bc, end) || ffio_get_checksum(bc)){ + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { av_log(s, AV_LOG_ERROR, "main header checksum mismatch\n"); return AVERROR_INVALIDDATA; } - nut->stream = av_mallocz(sizeof(StreamContext)*stream_count); - for(i=0; istream = av_mallocz(sizeof(StreamContext) * stream_count); + for (i = 0; i < stream_count; i++) avformat_new_stream(s, NULL); - } return 0; } -static int decode_stream_header(NUTContext *nut){ - AVFormatContext *s= nut->avf; - AVIOContext *bc = s->pb; +static int decode_stream_header(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; StreamContext *stc; int class, stream_id; uint64_t tmp, end; AVStream *st; - end= get_packetheader(nut, bc, 1, STREAM_STARTCODE); + end = get_packetheader(nut, bc, 1, STREAM_STARTCODE); end += avio_tell(bc); GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base); - stc= &nut->stream[stream_id]; - - st = s->streams[stream_id]; + stc = &nut->stream[stream_id]; + st = s->streams[stream_id]; if (!st) return AVERROR(ENOMEM); - class = ffio_read_varlen(bc); - tmp = get_fourcc(bc); - st->codec->codec_tag= tmp; - switch(class) - { - case 0: - st->codec->codec_type = AVMEDIA_TYPE_VIDEO; - st->codec->codec_id = av_codec_get_id( - (const AVCodecTag * const []) { ff_codec_bmp_tags, ff_nut_video_tags, 0 }, - tmp); - break; - case 1: - st->codec->codec_type = AVMEDIA_TYPE_AUDIO; - st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, tmp); - break; - case 2: - st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; - st->codec->codec_id = ff_codec_get_id(ff_nut_subtitle_tags, tmp); - break; - case 3: - st->codec->codec_type = AVMEDIA_TYPE_DATA; - break; - default: - av_log(s, AV_LOG_ERROR, "unknown stream class (%d)\n", class); - return -1; + class = ffio_read_varlen(bc); + tmp = get_fourcc(bc); + st->codec->codec_tag = tmp; + switch (class) { + case 0: + st->codec->codec_type = AVMEDIA_TYPE_VIDEO; + st->codec->codec_id = av_codec_get_id((const AVCodecTag * const []) { + ff_codec_bmp_tags, + ff_nut_video_tags, + 0 + }, + tmp); + break; + case 1: + st->codec->codec_type = AVMEDIA_TYPE_AUDIO; + st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, tmp); + break; + case 2: + st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; + st->codec->codec_id = ff_codec_get_id(ff_nut_subtitle_tags, tmp); + break; + case 3: + st->codec->codec_type = AVMEDIA_TYPE_DATA; + break; + default: + av_log(s, AV_LOG_ERROR, "unknown stream class (%d)\n", class); + return -1; } - if(class<3 && st->codec->codec_id == CODEC_ID_NONE) - av_log(s, AV_LOG_ERROR, "Unknown codec tag '0x%04x' for stream number %d\n", - (unsigned int)tmp, stream_id); - - GET_V(stc->time_base_id , tmp < nut->time_base_count); - GET_V(stc->msb_pts_shift , tmp < 16); - stc->max_pts_distance= ffio_read_varlen(bc); - GET_V(stc->decode_delay , tmp < 1000); //sanity limit, raise this if Moore's law is true - st->codec->has_b_frames= stc->decode_delay; - ffio_read_varlen(bc); //stream flags - - GET_V(st->codec->extradata_size, tmp < (1<<30)); - if(st->codec->extradata_size){ - st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); + if (class < 3 && st->codec->codec_id == CODEC_ID_NONE) + av_log(s, AV_LOG_ERROR, + "Unknown codec tag '0x%04x' for stream number %d\n", + (unsigned int) tmp, stream_id); + + GET_V(stc->time_base_id, tmp < nut->time_base_count); + GET_V(stc->msb_pts_shift, tmp < 16); + stc->max_pts_distance = ffio_read_varlen(bc); + GET_V(stc->decode_delay, tmp < 1000); // sanity limit, raise this if Moore's law is true + st->codec->has_b_frames = stc->decode_delay; + ffio_read_varlen(bc); // stream flags + + GET_V(st->codec->extradata_size, tmp < (1 << 30)); + if (st->codec->extradata_size) { + st->codec->extradata = av_mallocz(st->codec->extradata_size + + FF_INPUT_BUFFER_PADDING_SIZE); avio_read(bc, st->codec->extradata, st->codec->extradata_size); } - if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ - GET_V(st->codec->width , tmp > 0) + if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { + GET_V(st->codec->width, tmp > 0) GET_V(st->codec->height, tmp > 0) - st->sample_aspect_ratio.num= ffio_read_varlen(bc); - st->sample_aspect_ratio.den= ffio_read_varlen(bc); - if((!st->sample_aspect_ratio.num) != (!st->sample_aspect_ratio.den)){ - av_log(s, AV_LOG_ERROR, "invalid aspect ratio %d/%d\n", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den); + st->sample_aspect_ratio.num = ffio_read_varlen(bc); + st->sample_aspect_ratio.den = ffio_read_varlen(bc); + if ((!st->sample_aspect_ratio.num) != (!st->sample_aspect_ratio.den)) { + av_log(s, AV_LOG_ERROR, "invalid aspect ratio %d/%d\n", + st->sample_aspect_ratio.num, st->sample_aspect_ratio.den); return -1; } ffio_read_varlen(bc); /* csp type */ - }else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO){ - GET_V(st->codec->sample_rate , tmp > 0) + } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { + GET_V(st->codec->sample_rate, tmp > 0) ffio_read_varlen(bc); // samplerate_den GET_V(st->codec->channels, tmp > 0) } - if(skip_reserved(bc, end) || ffio_get_checksum(bc)){ - av_log(s, AV_LOG_ERROR, "stream header %d checksum mismatch\n", stream_id); + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { + av_log(s, AV_LOG_ERROR, + "stream header %d checksum mismatch\n", stream_id); return -1; } - stc->time_base= &nut->time_base[stc->time_base_id]; - avpriv_set_pts_info(s->streams[stream_id], 63, stc->time_base->num, stc->time_base->den); + stc->time_base = &nut->time_base[stc->time_base_id]; + avpriv_set_pts_info(s->streams[stream_id], 63, stc->time_base->num, + stc->time_base->den); return 0; } -static void set_disposition_bits(AVFormatContext* avf, char* value, int stream_id){ +static void set_disposition_bits(AVFormatContext *avf, char *value, + int stream_id) +{ int flag = 0, i; - for (i=0; ff_nut_dispositions[i].flag; ++i) { + + for (i = 0; ff_nut_dispositions[i].flag; ++i) if (!strcmp(ff_nut_dispositions[i].str, value)) flag = ff_nut_dispositions[i].flag; - } if (!flag) av_log(avf, AV_LOG_INFO, "unknown disposition type '%s'\n", value); for (i = 0; i < avf->nb_streams; ++i) @@ -391,61 +436,63 @@ static void set_disposition_bits(AVFormatContext* avf, char* value, int stream_i avf->streams[i]->disposition |= flag; } -static int decode_info_header(NUTContext *nut){ - AVFormatContext *s= nut->avf; - AVIOContext *bc = s->pb; +static int decode_info_header(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; uint64_t tmp, chapter_start, chapter_len; unsigned int stream_id_plus1, count; int chapter_id, i; int64_t value, end; char name[256], str_value[1024], type_str[256]; const char *type; - AVChapter *chapter= NULL; - AVStream *st= NULL; + AVChapter *chapter = NULL; + AVStream *st = NULL; AVDictionary **metadata = NULL; - end= get_packetheader(nut, bc, 1, INFO_STARTCODE); + end = get_packetheader(nut, bc, 1, INFO_STARTCODE); end += avio_tell(bc); GET_V(stream_id_plus1, tmp <= s->nb_streams) - chapter_id = get_s(bc); - chapter_start= ffio_read_varlen(bc); - chapter_len = ffio_read_varlen(bc); - count = ffio_read_varlen(bc); - - if(chapter_id && !stream_id_plus1){ - int64_t start= chapter_start / nut->time_base_count; - chapter= avpriv_new_chapter(s, chapter_id, - nut->time_base[chapter_start % nut->time_base_count], - start, start + chapter_len, NULL); + chapter_id = get_s(bc); + chapter_start = ffio_read_varlen(bc); + chapter_len = ffio_read_varlen(bc); + count = ffio_read_varlen(bc); + + if (chapter_id && !stream_id_plus1) { + int64_t start = chapter_start / nut->time_base_count; + chapter = avpriv_new_chapter(s, chapter_id, + nut->time_base[chapter_start % + nut->time_base_count], + start, start + chapter_len, NULL); metadata = &chapter->metadata; - } else if(stream_id_plus1) { - st= s->streams[stream_id_plus1 - 1]; + } else if (stream_id_plus1) { + st = s->streams[stream_id_plus1 - 1]; metadata = &st->metadata; } else metadata = &s->metadata; - for(i=0; i s->nb_streams) { @@ -453,143 +500,143 @@ static int decode_info_header(NUTContext *nut){ continue; } - if(!strcmp(type, "UTF-8")){ - if(chapter_id==0 && !strcmp(name, "Disposition")) { + if (!strcmp(type, "UTF-8")) { + if (chapter_id == 0 && !strcmp(name, "Disposition")) { set_disposition_bits(s, str_value, stream_id_plus1 - 1); continue; } - if(metadata && av_strcasecmp(name,"Uses") - && av_strcasecmp(name,"Depends") && av_strcasecmp(name,"Replaces")) + if (metadata && av_strcasecmp(name, "Uses") && + av_strcasecmp(name, "Depends") && av_strcasecmp(name, "Replaces")) av_dict_set(metadata, name, str_value, 0); } } - if(skip_reserved(bc, end) || ffio_get_checksum(bc)){ + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { av_log(s, AV_LOG_ERROR, "info header checksum mismatch\n"); return -1; } return 0; } -static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr){ - AVFormatContext *s= nut->avf; - AVIOContext *bc = s->pb; +static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; int64_t end, tmp; - nut->last_syncpoint_pos= avio_tell(bc)-8; + nut->last_syncpoint_pos = avio_tell(bc) - 8; - end= get_packetheader(nut, bc, 1, SYNCPOINT_STARTCODE); + end = get_packetheader(nut, bc, 1, SYNCPOINT_STARTCODE); end += avio_tell(bc); - tmp= ffio_read_varlen(bc); - *back_ptr= nut->last_syncpoint_pos - 16*ffio_read_varlen(bc); - if(*back_ptr < 0) + tmp = ffio_read_varlen(bc); + *back_ptr = nut->last_syncpoint_pos - 16 * ffio_read_varlen(bc); + if (*back_ptr < 0) return -1; - ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count], tmp / nut->time_base_count); + ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count], + tmp / nut->time_base_count); - if(skip_reserved(bc, end) || ffio_get_checksum(bc)){ + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n"); return -1; } - *ts= tmp / s->nb_streams * av_q2d(nut->time_base[tmp % s->nb_streams])*AV_TIME_BASE; + *ts = tmp / s->nb_streams * + av_q2d(nut->time_base[tmp % s->nb_streams]) * AV_TIME_BASE; ff_nut_add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts); return 0; } -static int find_and_decode_index(NUTContext *nut){ - AVFormatContext *s= nut->avf; - AVIOContext *bc = s->pb; +static int find_and_decode_index(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; uint64_t tmp, end; int i, j, syncpoint_count; - int64_t filesize= avio_size(bc); + int64_t filesize = avio_size(bc); int64_t *syncpoints; int8_t *has_keyframe; - int ret= -1; + int ret = -1; - avio_seek(bc, filesize-12, SEEK_SET); - avio_seek(bc, filesize-avio_rb64(bc), SEEK_SET); - if(avio_rb64(bc) != INDEX_STARTCODE){ + avio_seek(bc, filesize - 12, SEEK_SET); + avio_seek(bc, filesize - avio_rb64(bc), SEEK_SET); + if (avio_rb64(bc) != INDEX_STARTCODE) { av_log(s, AV_LOG_ERROR, "no index at the end\n"); return -1; } - end= get_packetheader(nut, bc, 1, INDEX_STARTCODE); + end = get_packetheader(nut, bc, 1, INDEX_STARTCODE); end += avio_tell(bc); - ffio_read_varlen(bc); //max_pts - GET_V(syncpoint_count, tmp < INT_MAX/8 && tmp > 0) - syncpoints= av_malloc(sizeof(int64_t)*syncpoint_count); - has_keyframe= av_malloc(sizeof(int8_t)*(syncpoint_count+1)); - for(i=0; i 0) + syncpoints = av_malloc(sizeof(int64_t) * syncpoint_count); + has_keyframe = av_malloc(sizeof(int8_t) * (syncpoint_count + 1)); + for (i = 0; i < syncpoint_count; i++) { syncpoints[i] = ffio_read_varlen(bc); - if(syncpoints[i] <= 0) + if (syncpoints[i] <= 0) goto fail; - if(i) - syncpoints[i] += syncpoints[i-1]; - } - - for(i=0; inb_streams; i++){ - int64_t last_pts= -1; - for(j=0; j>=1; - if(type){ - int flag= x&1; - x>>=1; - if(n+x >= syncpoint_count + 1){ + if (i) + syncpoints[i] += syncpoints[i - 1]; + } + + for (i = 0; i < s->nb_streams; i++) { + int64_t last_pts = -1; + for (j = 0; j < syncpoint_count;) { + uint64_t x = ffio_read_varlen(bc); + int type = x & 1; + int n = j; + x >>= 1; + if (type) { + int flag = x & 1; + x >>= 1; + if (n + x >= syncpoint_count + 1) { av_log(s, AV_LOG_ERROR, "index overflow A\n"); goto fail; } - while(x--) - has_keyframe[n++]= flag; - has_keyframe[n++]= !flag; - }else{ - while(x != 1){ - if(n>=syncpoint_count + 1){ + while (x--) + has_keyframe[n++] = flag; + has_keyframe[n++] = !flag; + } else { + while (x != 1) { + if (n >= syncpoint_count + 1) { av_log(s, AV_LOG_ERROR, "index overflow B\n"); goto fail; } - has_keyframe[n++]= x&1; - x>>=1; + has_keyframe[n++] = x & 1; + x >>= 1; } } - if(has_keyframe[0]){ + if (has_keyframe[0]) { av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n"); goto fail; } - assert(n<=syncpoint_count+1); - for(; jstreams[i], - 16*syncpoints[j-1], - last_pts + A, - 0, - 0, - AVINDEX_KEYFRAME); + assert(n <= syncpoint_count + 1); + for (; j < n && j < syncpoint_count; j++) { + if (has_keyframe[j]) { + uint64_t B, A = ffio_read_varlen(bc); + if (!A) { + A = ffio_read_varlen(bc); + B = ffio_read_varlen(bc); + // eor_pts[j][i] = last_pts + A + B + } else + B = 0; + av_add_index_entry(s->streams[i], 16 * syncpoints[j - 1], + last_pts + A, 0, 0, AVINDEX_KEYFRAME); last_pts += A + B; } } } } - if(skip_reserved(bc, end) || ffio_get_checksum(bc)){ + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { av_log(s, AV_LOG_ERROR, "index checksum mismatch\n"); goto fail; } - ret= 0; + ret = 0; + fail: av_free(syncpoints); av_free(has_keyframe); @@ -603,53 +650,53 @@ static int nut_read_header(AVFormatContext *s) int64_t pos; int initialized_stream_count; - nut->avf= s; + nut->avf = s; /* main header */ - pos=0; - do{ - pos= find_startcode(bc, MAIN_STARTCODE, pos)+1; - if (pos<0+1){ + pos = 0; + do { + pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1; + if (pos < 0 + 1) { av_log(s, AV_LOG_ERROR, "No main startcode found.\n"); return AVERROR_INVALIDDATA; } - }while(decode_main_header(nut) < 0); + } while (decode_main_header(nut) < 0); /* stream headers */ - pos=0; - for(initialized_stream_count=0; initialized_stream_count < s->nb_streams;){ - pos= find_startcode(bc, STREAM_STARTCODE, pos)+1; - if (pos<0+1){ + pos = 0; + for (initialized_stream_count = 0; initialized_stream_count < s->nb_streams;) { + pos = find_startcode(bc, STREAM_STARTCODE, pos) + 1; + if (pos < 0 + 1) { av_log(s, AV_LOG_ERROR, "Not all stream headers found.\n"); return AVERROR_INVALIDDATA; } - if(decode_stream_header(nut) >= 0) + if (decode_stream_header(nut) >= 0) initialized_stream_count++; } /* info headers */ - pos=0; - for(;;){ - uint64_t startcode= find_any_startcode(bc, pos); - pos= avio_tell(bc); + pos = 0; + for (;;) { + uint64_t startcode = find_any_startcode(bc, pos); + pos = avio_tell(bc); - if(startcode==0){ + if (startcode == 0) { av_log(s, AV_LOG_ERROR, "EOF before video frames\n"); return AVERROR_INVALIDDATA; - }else if(startcode == SYNCPOINT_STARTCODE){ - nut->next_startcode= startcode; + } else if (startcode == SYNCPOINT_STARTCODE) { + nut->next_startcode = startcode; break; - }else if(startcode != INFO_STARTCODE){ + } else if (startcode != INFO_STARTCODE) { continue; } decode_info_header(nut); } - s->data_offset= pos-8; + s->data_offset = pos - 8; - if(bc->seekable){ - int64_t orig_pos= avio_tell(bc); + if (bc->seekable) { + int64_t orig_pos = avio_tell(bc); find_and_decode_index(nut); avio_seek(bc, orig_pos, SEEK_SET); } @@ -660,15 +707,19 @@ static int nut_read_header(AVFormatContext *s) return 0; } -static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, uint8_t *header_idx, int frame_code){ - AVFormatContext *s= nut->avf; - AVIOContext *bc = s->pb; +static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, + uint8_t *header_idx, int frame_code) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; StreamContext *stc; int size, flags, size_mul, pts_delta, i, reserved_count; uint64_t tmp; - if(avio_tell(bc) > nut->last_syncpoint_pos + nut->max_distance){ - av_log(s, AV_LOG_ERROR, "Last frame must have been damaged %"PRId64" > %"PRId64" + %d\n", avio_tell(bc), nut->last_syncpoint_pos, nut->max_distance); + if (avio_tell(bc) > nut->last_syncpoint_pos + nut->max_distance) { + av_log(s, AV_LOG_ERROR, + "Last frame must have been damaged %"PRId64" > %"PRId64" + %d\n", + avio_tell(bc), nut->last_syncpoint_pos, nut->max_distance); return AVERROR_INVALIDDATA; } @@ -680,86 +731,88 @@ static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, ui reserved_count = nut->frame_code[frame_code].reserved_count; *header_idx = nut->frame_code[frame_code].header_idx; - if(flags & FLAG_INVALID) + if (flags & FLAG_INVALID) return AVERROR_INVALIDDATA; - if(flags & FLAG_CODED) + if (flags & FLAG_CODED) flags ^= ffio_read_varlen(bc); - if(flags & FLAG_STREAM_ID){ + if (flags & FLAG_STREAM_ID) { GET_V(*stream_id, tmp < s->nb_streams) } - stc= &nut->stream[*stream_id]; - if(flags&FLAG_CODED_PTS){ - int coded_pts= ffio_read_varlen(bc); -//FIXME check last_pts validity? - if(coded_pts < (1<msb_pts_shift)){ - *pts=ff_lsb2full(stc, coded_pts); - }else - *pts=coded_pts - (1<msb_pts_shift); - }else - *pts= stc->last_pts + pts_delta; - if(flags&FLAG_SIZE_MSB){ - size += size_mul*ffio_read_varlen(bc); - } - if(flags&FLAG_MATCH_TIME) + stc = &nut->stream[*stream_id]; + if (flags & FLAG_CODED_PTS) { + int coded_pts = ffio_read_varlen(bc); + // FIXME check last_pts validity? + if (coded_pts < (1 << stc->msb_pts_shift)) { + *pts = ff_lsb2full(stc, coded_pts); + } else + *pts = coded_pts - (1 << stc->msb_pts_shift); + } else + *pts = stc->last_pts + pts_delta; + if (flags & FLAG_SIZE_MSB) + size += size_mul * ffio_read_varlen(bc); + if (flags & FLAG_MATCH_TIME) get_s(bc); - if(flags&FLAG_HEADER_IDX) - *header_idx= ffio_read_varlen(bc); - if(flags&FLAG_RESERVED) - reserved_count= ffio_read_varlen(bc); - for(i=0; i= (unsigned)nut->header_count){ + if (*header_idx >= (unsigned)nut->header_count) { av_log(s, AV_LOG_ERROR, "header_idx invalid\n"); return AVERROR_INVALIDDATA; } - if(size > 4096) - *header_idx=0; + if (size > 4096) + *header_idx = 0; size -= nut->header_len[*header_idx]; - if(flags&FLAG_CHECKSUM){ - avio_rb32(bc); //FIXME check this - }else if(size > 2*nut->max_distance || FFABS(stc->last_pts - *pts) > stc->max_pts_distance){ + if (flags & FLAG_CHECKSUM) { + avio_rb32(bc); // FIXME check this + } else if (size > 2 * nut->max_distance || FFABS(stc->last_pts - *pts) > + stc->max_pts_distance) { av_log(s, AV_LOG_ERROR, "frame size > 2max_distance and no checksum\n"); return AVERROR_INVALIDDATA; } - stc->last_pts= *pts; - stc->last_flags= flags; + stc->last_pts = *pts; + stc->last_flags = flags; return size; } -static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code){ - AVFormatContext *s= nut->avf; - AVIOContext *bc = s->pb; +static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; int size, stream_id, discard; int64_t pts, last_IP_pts; StreamContext *stc; uint8_t header_idx; - size= decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code); - if(size < 0) + size = decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code); + if (size < 0) return size; - stc= &nut->stream[stream_id]; + stc = &nut->stream[stream_id]; if (stc->last_flags & FLAG_KEY) - stc->skip_until_key_frame=0; - - discard= s->streams[ stream_id ]->discard; - last_IP_pts= s->streams[ stream_id ]->last_IP_pts; - if( (discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) - ||(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts) - || discard >= AVDISCARD_ALL - || stc->skip_until_key_frame){ + stc->skip_until_key_frame = 0; + + discard = s->streams[stream_id]->discard; + last_IP_pts = s->streams[stream_id]->last_IP_pts; + if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) || + (discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && + last_IP_pts > pts) || + discard >= AVDISCARD_ALL || + stc->skip_until_key_frame) { avio_skip(bc, size); return 1; } av_new_packet(pkt, size + nut->header_len[header_idx]); memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]); - pkt->pos= avio_tell(bc); //FIXME + pkt->pos = avio_tell(bc); // FIXME avio_read(bc, pkt->data + nut->header_len[header_idx], size); pkt->stream_index = stream_id; @@ -774,135 +827,146 @@ static int nut_read_packet(AVFormatContext *s, AVPacket *pkt) { NUTContext *nut = s->priv_data; AVIOContext *bc = s->pb; - int i, frame_code=0, ret, skip; + int i, frame_code = 0, ret, skip; int64_t ts, back_ptr; - for(;;){ - int64_t pos= avio_tell(bc); - uint64_t tmp= nut->next_startcode; - nut->next_startcode=0; + for (;;) { + int64_t pos = avio_tell(bc); + uint64_t tmp = nut->next_startcode; + nut->next_startcode = 0; - if(tmp){ - pos-=8; - }else{ + if (tmp) { + pos -= 8; + } else { frame_code = avio_r8(bc); - if(bc->eof_reached) + if (bc->eof_reached) return -1; - if(frame_code == 'N'){ - tmp= frame_code; - for(i=1; i<8; i++) - tmp = (tmp<<8) + avio_r8(bc); + if (frame_code == 'N') { + tmp = frame_code; + for (i = 1; i < 8; i++) + tmp = (tmp << 8) + avio_r8(bc); } } - switch(tmp){ + switch (tmp) { case MAIN_STARTCODE: case STREAM_STARTCODE: case INDEX_STARTCODE: - skip= get_packetheader(nut, bc, 0, tmp); + skip = get_packetheader(nut, bc, 0, tmp); avio_skip(bc, skip); break; case INFO_STARTCODE: - if(decode_info_header(nut)<0) + if (decode_info_header(nut) < 0) goto resync; break; case SYNCPOINT_STARTCODE: - if(decode_syncpoint(nut, &ts, &back_ptr)<0) + if (decode_syncpoint(nut, &ts, &back_ptr) < 0) goto resync; frame_code = avio_r8(bc); case 0: - ret= decode_frame(nut, pkt, frame_code); - if(ret==0) + ret = decode_frame(nut, pkt, frame_code); + if (ret == 0) return 0; - else if(ret==1) //ok but discard packet + else if (ret == 1) // OK but discard packet break; default: resync: -av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos); - tmp= find_any_startcode(bc, nut->last_syncpoint_pos+1); - if(tmp==0) + av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos); + tmp = find_any_startcode(bc, nut->last_syncpoint_pos + 1); + if (tmp == 0) return AVERROR_INVALIDDATA; -av_log(s, AV_LOG_DEBUG, "sync\n"); - nut->next_startcode= tmp; + av_log(s, AV_LOG_DEBUG, "sync\n"); + nut->next_startcode = tmp; } } } -static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit){ +static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, + int64_t *pos_arg, int64_t pos_limit) +{ NUTContext *nut = s->priv_data; AVIOContext *bc = s->pb; int64_t pos, pts, back_ptr; -av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_index, *pos_arg, pos_limit); + av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", + stream_index, *pos_arg, pos_limit); - pos= *pos_arg; - do{ - pos= find_startcode(bc, SYNCPOINT_STARTCODE, pos)+1; - if(pos < 1){ + pos = *pos_arg; + do { + pos = find_startcode(bc, SYNCPOINT_STARTCODE, pos) + 1; + if (pos < 1) { assert(nut->next_startcode == 0); av_log(s, AV_LOG_ERROR, "read_timestamp failed.\n"); return AV_NOPTS_VALUE; } - }while(decode_syncpoint(nut, &pts, &back_ptr) < 0); - *pos_arg = pos-1; + } while (decode_syncpoint(nut, &pts, &back_ptr) < 0); + *pos_arg = pos - 1; assert(nut->last_syncpoint_pos == *pos_arg); - av_log(s, AV_LOG_DEBUG, "return %"PRId64" %"PRId64"\n", pts,back_ptr ); - if (stream_index == -1) return pts; - else if(stream_index == -2) return back_ptr; + av_log(s, AV_LOG_DEBUG, "return %"PRId64" %"PRId64"\n", pts, back_ptr); + if (stream_index == -1) + return pts; + else if (stream_index == -2) + return back_ptr; -assert(0); + assert(0); } -static int read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags){ - NUTContext *nut = s->priv_data; - AVStream *st= s->streams[stream_index]; - Syncpoint dummy={.ts= pts*av_q2d(st->time_base)*AV_TIME_BASE}; - Syncpoint nopts_sp= {.ts= AV_NOPTS_VALUE, .back_ptr= AV_NOPTS_VALUE}; - Syncpoint *sp, *next_node[2]= {&nopts_sp, &nopts_sp}; +static int read_seek(AVFormatContext *s, int stream_index, + int64_t pts, int flags) +{ + NUTContext *nut = s->priv_data; + AVStream *st = s->streams[stream_index]; + Syncpoint dummy = { .ts = pts * av_q2d(st->time_base) * AV_TIME_BASE }; + Syncpoint nopts_sp = { .ts = AV_NOPTS_VALUE, .back_ptr = AV_NOPTS_VALUE }; + Syncpoint *sp, *next_node[2] = { &nopts_sp, &nopts_sp }; int64_t pos, pos2, ts; int i; - if(st->index_entries){ - int index= av_index_search_timestamp(st, pts, flags); - if(index<0) + if (st->index_entries) { + int index = av_index_search_timestamp(st, pts, flags); + if (index < 0) return -1; - pos2= st->index_entries[index].pos; - ts = st->index_entries[index].timestamp; - }else{ + pos2 = st->index_entries[index].pos; + ts = st->index_entries[index].timestamp; + } else { av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pts_cmp, (void **) next_node); - av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", next_node[0]->pos, next_node[1]->pos, - next_node[0]->ts , next_node[1]->ts); - pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos, - next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp); - - if(!(flags & AVSEEK_FLAG_BACKWARD)){ - dummy.pos= pos+16; - next_node[1]= &nopts_sp; + av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", + next_node[0]->pos, next_node[1]->pos, next_node[0]->ts, + next_node[1]->ts); + pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos, + next_node[1]->pos, next_node[1]->pos, + next_node[0]->ts, next_node[1]->ts, + AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp); + + if (!(flags & AVSEEK_FLAG_BACKWARD)) { + dummy.pos = pos + 16; + next_node[1] = &nopts_sp; av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp, (void **) next_node); - pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos, - next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp); - if(pos2>=0) - pos= pos2; - //FIXME dir but I think it does not matter + pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos, + next_node[1]->pos, next_node[1]->pos, + next_node[0]->back_ptr, next_node[1]->back_ptr, + flags, &ts, nut_read_timestamp); + if (pos2 >= 0) + pos = pos2; + // FIXME dir but I think it does not matter } - dummy.pos= pos; - sp= av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp, - NULL); + dummy.pos = pos; + sp = av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp, + NULL); assert(sp); - pos2= sp->back_ptr - 15; + pos2 = sp->back_ptr - 15; } av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos2); - pos= find_startcode(s->pb, SYNCPOINT_STARTCODE, pos2); + pos = find_startcode(s->pb, SYNCPOINT_STARTCODE, pos2); avio_seek(s->pb, pos, SEEK_SET); av_log(NULL, AV_LOG_DEBUG, "SP: %"PRId64"\n", pos); - if(pos2 > pos || pos2 + 15 < pos){ + if (pos2 > pos || pos2 + 15 < pos) av_log(NULL, AV_LOG_ERROR, "no syncpoint at backptr pos\n"); - } - for(i=0; inb_streams; i++) - nut->stream[i].skip_until_key_frame=1; + for (i = 0; i < s->nb_streams; i++) + nut->stream[i].skip_until_key_frame = 1; return 0; } @@ -915,7 +979,7 @@ static int nut_read_close(AVFormatContext *s) av_freep(&nut->time_base); av_freep(&nut->stream); ff_nut_free_sp(nut); - for(i = 1; i < nut->header_count; i++) + for (i = 1; i < nut->header_count; i++) av_freep(&nut->header[i]); return 0; -- cgit v1.2.3 From 95a8dac57b1bc21891387c554e0e8b872bef6103 Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Thu, 5 Apr 2012 17:47:20 -0400 Subject: avconv: parse channel layout string This allows the user to use channel layout names instead of having to use the channel mask values. --- avconv.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/avconv.c b/avconv.c index 7344028bce..2090565eca 100644 --- a/avconv.c +++ b/avconv.c @@ -4377,6 +4377,41 @@ static void parse_cpuflags(int argc, char **argv, const OptionDef *options) opt_cpuflags("cpuflags", argv[idx + 1]); } +static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg) +{ + char layout_str[32]; + char *stream_str; + char *ac_str; + int ret, channels, ac_str_size; + uint64_t layout; + + layout = av_get_channel_layout(arg); + if (!layout) { + av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg); + return AVERROR(EINVAL); + } + snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout); + ret = opt_default(opt, layout_str); + if (ret < 0) + return ret; + + /* set 'ac' option based on channel layout */ + channels = av_get_channel_layout_nb_channels(layout); + snprintf(layout_str, sizeof(layout_str), "%d", channels); + stream_str = strchr(opt, ':'); + ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0); + ac_str = av_mallocz(ac_str_size); + if (!ac_str) + return AVERROR(ENOMEM); + av_strlcpy(ac_str, "ac", 3); + if (stream_str) + av_strlcat(ac_str, stream_str, ac_str_size); + ret = parse_option(o, ac_str, layout_str, options); + av_free(ac_str); + + return ret; +} + #define OFFSET(x) offsetof(OptionsContext, x) static const OptionDef options[] = { /* main options */ @@ -4465,6 +4500,7 @@ static const OptionDef options[] = { { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" }, { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, // { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" }, + { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" }, /* subtitle options */ { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" }, -- cgit v1.2.3 From d3c59d5003a483f1a23e225fc71c19bd1116d11c Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Thu, 5 Apr 2012 18:11:28 -0400 Subject: avconv: use default channel layouts when they are unknown If either input or output layout is known and the channel counts match, use the known layout for both. Otherwise choose the default layout based on av_get_default_channel_layout(). Changed some FATE references due to some WAVE files now having a non-zero channel mask. --- avconv.c | 56 ++++++++++++++++++++++++++++++++++++++++++++ tests/ref/acodec/pcm_f32le | 2 +- tests/ref/acodec/pcm_f64le | 2 +- tests/ref/acodec/pcm_s24daud | 2 +- tests/ref/acodec/pcm_s24le | 2 +- tests/ref/acodec/pcm_s32le | 2 +- 6 files changed, 61 insertions(+), 5 deletions(-) diff --git a/avconv.c b/avconv.c index 2090565eca..2cefe5d549 100644 --- a/avconv.c +++ b/avconv.c @@ -2145,6 +2145,51 @@ static void print_sdp(OutputFile *output_files, int n) av_freep(&avc); } +static void get_default_channel_layouts(OutputStream *ost, InputStream *ist) +{ + char layout_name[256]; + AVCodecContext *enc = ost->st->codec; + AVCodecContext *dec = ist->st->codec; + + if (!dec->channel_layout) { + if (enc->channel_layout && dec->channels == enc->channels) { + dec->channel_layout = enc->channel_layout; + } else { + dec->channel_layout = av_get_default_channel_layout(dec->channels); + + if (!dec->channel_layout) { + av_log(NULL, AV_LOG_FATAL, "Unable to find default channel " + "layout for Input Stream #%d.%d\n", ist->file_index, + ist->st->index); + exit_program(1); + } + } + av_get_channel_layout_string(layout_name, sizeof(layout_name), + dec->channels, dec->channel_layout); + av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream " + "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name); + } + if (!enc->channel_layout) { + if (dec->channels == enc->channels) { + enc->channel_layout = dec->channel_layout; + return; + } else { + enc->channel_layout = av_get_default_channel_layout(enc->channels); + } + if (!enc->channel_layout) { + av_log(NULL, AV_LOG_FATAL, "Unable to find default channel layout " + "for Output Stream #%d.%d\n", ost->file_index, + ost->st->index); + exit_program(1); + } + av_get_channel_layout_string(layout_name, sizeof(layout_name), + enc->channels, enc->channel_layout); + av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Output Stream " + "#%d.%d : %s\n", ost->file_index, ost->st->index, layout_name); + } +} + + static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams, char *error, int error_len) { @@ -2183,6 +2228,17 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb } assert_codec_experimental(ist->st->codec, 0); assert_avoptions(ist->opts); + + if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = &output_streams[i]; + if (ost->source_index == ist_index) { + if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout) + get_default_channel_layouts(ost, ist); + break; + } + } + } } ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; diff --git a/tests/ref/acodec/pcm_f32le b/tests/ref/acodec/pcm_f32le index 38e5c0b719..eb6ea93687 100644 --- a/tests/ref/acodec/pcm_f32le +++ b/tests/ref/acodec/pcm_f32le @@ -1,4 +1,4 @@ -46f44f86a18984a832206ab9e29a79f2 *./tests/data/acodec/pcm_f32le.wav +653d82a64b7bd96ac193e105e9f92d4c *./tests/data/acodec/pcm_f32le.wav 2116880 ./tests/data/acodec/pcm_f32le.wav 64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_f32le.acodec.out.wav stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400 diff --git a/tests/ref/acodec/pcm_f64le b/tests/ref/acodec/pcm_f64le index 42875a8d2f..2f0576bf91 100644 --- a/tests/ref/acodec/pcm_f64le +++ b/tests/ref/acodec/pcm_f64le @@ -1,4 +1,4 @@ -ba17c6d1a270e1333e981f239bf7eb45 *./tests/data/acodec/pcm_f64le.wav +48b4cd378f47a50dc902aa03cc8280ed *./tests/data/acodec/pcm_f64le.wav 4233680 ./tests/data/acodec/pcm_f64le.wav 64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_f64le.acodec.out.wav stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400 diff --git a/tests/ref/acodec/pcm_s24daud b/tests/ref/acodec/pcm_s24daud index eab6f8d28e..fb7cad1d89 100644 --- a/tests/ref/acodec/pcm_s24daud +++ b/tests/ref/acodec/pcm_s24daud @@ -1,4 +1,4 @@ 8168a5c1343553ef027541830f2cb879 *./tests/data/acodec/pcm_s24daud.302 10368730 ./tests/data/acodec/pcm_s24daud.302 -f552afadfdfcd6348a07095da6382de5 *./tests/data/pcm_s24daud.acodec.out.wav +7ce988d6c5b2bf0ebf0216ba15bc5cee *./tests/data/pcm_s24daud.acodec.out.wav stddev: 9416.28 PSNR: 16.85 MAXDIFF:42744 bytes: 6911796/ 1058400 diff --git a/tests/ref/acodec/pcm_s24le b/tests/ref/acodec/pcm_s24le index a724e8c189..0d86d1e7f7 100644 --- a/tests/ref/acodec/pcm_s24le +++ b/tests/ref/acodec/pcm_s24le @@ -1,4 +1,4 @@ -a85380fb79b0d4fff38e24ac1e34bb94 *./tests/data/acodec/pcm_s24le.wav +18ea73985dbdf59e23f5aba66145e6fe *./tests/data/acodec/pcm_s24le.wav 1587668 ./tests/data/acodec/pcm_s24le.wav 64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_s24le.acodec.out.wav stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400 diff --git a/tests/ref/acodec/pcm_s32le b/tests/ref/acodec/pcm_s32le index 86777505f5..2b81c29e6a 100644 --- a/tests/ref/acodec/pcm_s32le +++ b/tests/ref/acodec/pcm_s32le @@ -1,4 +1,4 @@ -da6ed80f4f40f0082577dea80827e014 *./tests/data/acodec/pcm_s32le.wav +8d8849fa5c5d91b9cb74f5c74e937faf *./tests/data/acodec/pcm_s32le.wav 2116868 ./tests/data/acodec/pcm_s32le.wav 64151e4bcc2b717aa5a8454d424d6a1f *./tests/data/pcm_s32le.acodec.out.wav stddev: 0.00 PSNR:999.99 MAXDIFF: 0 bytes: 1058400/ 1058400 -- cgit v1.2.3 From 272b252c0110225188c7d7f31167941210aac197 Mon Sep 17 00:00:00 2001 From: Christophe GISQUET Date: Mon, 19 Mar 2012 22:46:28 +0100 Subject: rv40dsp: implement prescaled versions for biweight. Quite often, the original weights are multiple of 512. By prescaling them by 1/512 when they are computed (once per frame), no intermediate shifting is needed, and no prescaling on each call either. The x86 code already used that trick. Signed-off-by: Ronald S. Bultje --- libavcodec/arm/rv40dsp_init_neon.c | 4 +-- libavcodec/rv34.c | 58 ++++++++++++++++++------------- libavcodec/rv34.h | 2 ++ libavcodec/rv34dsp.h | 7 +++- libavcodec/rv40dsp.c | 20 +++++++++-- libavcodec/x86/rv40dsp.asm | 70 +++++++++++++++++--------------------- libavcodec/x86/rv40dsp_init.c | 30 ++++++++++------ 7 files changed, 112 insertions(+), 79 deletions(-) diff --git a/libavcodec/arm/rv40dsp_init_neon.c b/libavcodec/arm/rv40dsp_init_neon.c index 650ef61878..2ce50a2073 100644 --- a/libavcodec/arm/rv40dsp_init_neon.c +++ b/libavcodec/arm/rv40dsp_init_neon.c @@ -128,8 +128,8 @@ void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp) c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon; c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_neon; - c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_neon; - c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_neon; + c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_16_neon; + c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_8_neon; c->rv40_loop_filter_strength[0] = ff_rv40_h_loop_filter_strength_neon; c->rv40_loop_filter_strength[1] = ff_rv40_v_loop_filter_strength_neon; diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index 3ad1717d13..12475692c6 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -521,7 +521,7 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int */ static int calc_add_mv(RV34DecContext *r, int dir, int val) { - int mul = dir ? -r->weight2 : r->weight1; + int mul = dir ? -r->mv_weight2 : r->mv_weight1; return (val * mul + 0x2000) >> 14; } @@ -776,24 +776,24 @@ static void rv34_mc_1mv(RV34DecContext *r, const int block_type, static void rv4_weight(RV34DecContext *r) { - r->rdsp.rv40_weight_pixels_tab[0](r->s.dest[0], - r->tmp_b_block_y[0], - r->tmp_b_block_y[1], - r->weight1, - r->weight2, - r->s.linesize); - r->rdsp.rv40_weight_pixels_tab[1](r->s.dest[1], - r->tmp_b_block_uv[0], - r->tmp_b_block_uv[2], - r->weight1, - r->weight2, - r->s.uvlinesize); - r->rdsp.rv40_weight_pixels_tab[1](r->s.dest[2], - r->tmp_b_block_uv[1], - r->tmp_b_block_uv[3], - r->weight1, - r->weight2, - r->s.uvlinesize); + r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0], + r->tmp_b_block_y[0], + r->tmp_b_block_y[1], + r->weight1, + r->weight2, + r->s.linesize); + r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1], + r->tmp_b_block_uv[0], + r->tmp_b_block_uv[2], + r->weight1, + r->weight2, + r->s.uvlinesize); + r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2], + r->tmp_b_block_uv[1], + r->tmp_b_block_uv[3], + r->weight1, + r->weight2, + r->s.uvlinesize); } static void rv34_mc_2mv(RV34DecContext *r, const int block_type) @@ -1703,11 +1703,21 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts); int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts); - if (!refdist) { - r->weight1 = r->weight2 = 8192; - } else { - r->weight1 = (dist0 << 14) / refdist; - r->weight2 = (dist1 << 14) / refdist; + if(!refdist){ + r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192; + r->scaled_weight = 0; + }else{ + r->mv_weight1 = (dist0 << 14) / refdist; + r->mv_weight2 = (dist1 << 14) / refdist; + if((r->mv_weight1|r->mv_weight2) & 511){ + r->weight1 = r->mv_weight1; + r->weight2 = r->mv_weight2; + r->scaled_weight = 0; + }else{ + r->weight1 = r->mv_weight1 >> 9; + r->weight2 = r->mv_weight2 >> 9; + r->scaled_weight = 1; + } } } s->mb_x = s->mb_y = 0; diff --git a/libavcodec/rv34.h b/libavcodec/rv34.h index 76232145c5..e7a59c4bed 100644 --- a/libavcodec/rv34.h +++ b/libavcodec/rv34.h @@ -106,7 +106,9 @@ typedef struct RV34DecContext{ int rpr; ///< one field size in RV30 slice header int cur_pts, last_pts, next_pts; + int scaled_weight; int weight1, weight2; ///< B frame distance fractions (0.14) used in motion compensation + int mv_weight1, mv_weight2; uint16_t *cbp_luma; ///< CBP values for luma subblocks uint8_t *cbp_chroma; ///< CBP values for chroma subblocks diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h index c70194cc20..58da59f038 100644 --- a/libavcodec/rv34dsp.h +++ b/libavcodec/rv34dsp.h @@ -58,7 +58,12 @@ typedef struct RV34DSPContext { qpel_mc_func avg_pixels_tab[4][16]; h264_chroma_mc_func put_chroma_pixels_tab[3]; h264_chroma_mc_func avg_chroma_pixels_tab[3]; - rv40_weight_func rv40_weight_pixels_tab[2]; + /** + * Biweight functions, first dimension is transform size (16/8), + * second is whether the weight is prescaled by 1/512 to skip + * the intermediate shifting. + */ + rv40_weight_func rv40_weight_pixels_tab[2][2]; rv34_inv_transform_func rv34_inv_transform; rv34_inv_transform_func rv34_inv_transform_dc; rv34_idct_add_func rv34_idct_add; diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c index c12958a89c..19a18d37a5 100644 --- a/libavcodec/rv40dsp.c +++ b/libavcodec/rv40dsp.c @@ -278,7 +278,7 @@ RV40_CHROMA_MC(put_, op_put) RV40_CHROMA_MC(avg_, op_avg) #define RV40_WEIGHT_FUNC(size) \ -static void rv40_weight_func_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\ +static void rv40_weight_func_rnd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\ {\ int i, j;\ \ @@ -289,6 +289,18 @@ static void rv40_weight_func_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src src2 += stride;\ dst += stride;\ }\ +}\ +static void rv40_weight_func_nornd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\ +{\ + int i, j;\ +\ + for (j = 0; j < size; j++) {\ + for (i = 0; i < size; i++)\ + dst[i] = (w2 * src1[i] + w1 * src2[i] + 0x10) >> 5;\ + src1 += stride;\ + src2 += stride;\ + dst += stride;\ + }\ } RV40_WEIGHT_FUNC(16) @@ -578,8 +590,10 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) { c->avg_chroma_pixels_tab[0] = avg_rv40_chroma_mc8_c; c->avg_chroma_pixels_tab[1] = avg_rv40_chroma_mc4_c; - c->rv40_weight_pixels_tab[0] = rv40_weight_func_16; - c->rv40_weight_pixels_tab[1] = rv40_weight_func_8; + c->rv40_weight_pixels_tab[0][0] = rv40_weight_func_rnd_16; + c->rv40_weight_pixels_tab[0][1] = rv40_weight_func_rnd_8; + c->rv40_weight_pixels_tab[1][0] = rv40_weight_func_nornd_16; + c->rv40_weight_pixels_tab[1][1] = rv40_weight_func_nornd_8; c->rv40_weak_loop_filter[0] = rv40_h_weak_loop_filter; c->rv40_weak_loop_filter[1] = rv40_v_weak_loop_filter; diff --git a/libavcodec/x86/rv40dsp.asm b/libavcodec/x86/rv40dsp.asm index bff3e7b96a..9028e74024 100644 --- a/libavcodec/x86/rv40dsp.asm +++ b/libavcodec/x86/rv40dsp.asm @@ -139,69 +139,61 @@ SECTION .text ; rv40_weight_func_%1(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, int stride) ; %1=size %2=num of xmm regs -%macro RV40_WEIGHT 2 -cglobal rv40_weight_func_%1, 6, 7, %2 +; The weights are FP0.14 notation of fractions depending on pts. +; For timebases without rounding error (i.e. PAL), the fractions +; can be simplified, and several operations can be avoided. +; Therefore, we check here whether they are multiples of 2^9 for +; those simplifications to occur. +%macro RV40_WEIGHT 3 +cglobal rv40_weight_func_%1_%2, 6, 7, %3 %if cpuflag(ssse3) mova m1, [shift_round] %else mova m1, [pw_16] %endif pxor m0, m0 - mov r6, r3 - or r6, r4 - ; The weights are FP0.14 notation of fractions depending on pts. - ; For timebases without rounding error (i.e. PAL), the fractions - ; can be simplified, and several operations can be avoided. - ; Therefore, we check here whether they are multiples of 2^9 for - ; those simplifications to occur. - and r6, 0x1FF ; Set loop counter and increments %if mmsize == 8 - mov r6, %1 + mov r6, %2 %else - mov r6, (%1 * %1) / mmsize + mov r6, (%2 * %2) / mmsize %endif - ; Use result of test now - jz .loop_512 movd m2, r3 movd m3, r4 +%ifidn %1,rnd +%define RND 0 SPLATW m2, m2 - SPLATW m3, m3 - -.loop: - MAIN_LOOP %1, 0 - jnz .loop - REP_RET - - ; Weights are multiple of 512, which allows some shortcuts -.loop_512: - sar r3, 9 - sar r4, 9 - movd m2, r3 - movd m3, r4 +%else +%define RND 1 %if cpuflag(ssse3) punpcklbw m3, m2 - SPLATW m3, m3 %else SPLATW m2, m2 - SPLATW m3, m3 %endif -.loop2: - MAIN_LOOP %1, 1 - jnz .loop2 - REP_RET +%endif + SPLATW m3, m3 +.loop: + MAIN_LOOP %2, RND + jnz .loop + REP_RET %endmacro INIT_MMX mmx -RV40_WEIGHT 8, 0 -RV40_WEIGHT 16, 0 +RV40_WEIGHT rnd, 8, 3 +RV40_WEIGHT rnd, 16, 4 +RV40_WEIGHT nornd, 8, 3 +RV40_WEIGHT nornd, 16, 4 INIT_XMM sse2 -RV40_WEIGHT 8, 8 -RV40_WEIGHT 16, 8 +RV40_WEIGHT rnd, 8, 3 +RV40_WEIGHT rnd, 16, 4 +RV40_WEIGHT nornd, 8, 3 +RV40_WEIGHT nornd, 16, 4 INIT_XMM ssse3 -RV40_WEIGHT 8, 8 -RV40_WEIGHT 16, 8 +RV40_WEIGHT rnd, 8, 3 +RV40_WEIGHT rnd, 16, 4 +RV40_WEIGHT nornd, 8, 3 +RV40_WEIGHT nornd, 16, 4 diff --git a/libavcodec/x86/rv40dsp_init.c b/libavcodec/x86/rv40dsp_init.c index 79c70f78c3..df468aa9e5 100644 --- a/libavcodec/x86/rv40dsp_init.c +++ b/libavcodec/x86/rv40dsp_init.c @@ -41,10 +41,14 @@ void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y); #define DECLARE_WEIGHT(opt) \ -void ff_rv40_weight_func_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \ - int w1, int w2, ptrdiff_t stride); \ -void ff_rv40_weight_func_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \ - int w1, int w2, ptrdiff_t stride); +void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \ + int w1, int w2, ptrdiff_t stride); \ +void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \ + int w1, int w2, ptrdiff_t stride); \ +void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \ + int w1, int w2, ptrdiff_t stride); \ +void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \ + int w1, int w2, ptrdiff_t stride); DECLARE_WEIGHT(mmx) DECLARE_WEIGHT(sse2) DECLARE_WEIGHT(ssse3) @@ -57,8 +61,10 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp) if (mm_flags & AV_CPU_FLAG_MMX) { c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx; c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx; - c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_mmx; - c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_mmx; + c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmx; + c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmx; + c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmx; + c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmx; } if (mm_flags & AV_CPU_FLAG_MMX2) { c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2; @@ -68,12 +74,16 @@ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp) c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow; } if (mm_flags & AV_CPU_FLAG_SSE2) { - c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_sse2; - c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_sse2; + c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2; + c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2; + c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2; + c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2; } if (mm_flags & AV_CPU_FLAG_SSSE3) { - c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_ssse3; - c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_ssse3; + c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3; + c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3; + c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3; + c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3; } #endif } -- cgit v1.2.3 From 2130bd8f5b6504ea14cd41e33f5d4f431eb724f3 Mon Sep 17 00:00:00 2001 From: Christophe GISQUET Date: Tue, 20 Mar 2012 16:13:55 +0100 Subject: rv40dsp x86: use only one register, for both increment and loop counter Around 10 cycles faster for luma. Signed-off-by: Ronald S. Bultje --- libavcodec/x86/rv40dsp.asm | 43 ++++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/libavcodec/x86/rv40dsp.asm b/libavcodec/x86/rv40dsp.asm index 9028e74024..721d3df094 100644 --- a/libavcodec/x86/rv40dsp.asm +++ b/libavcodec/x86/rv40dsp.asm @@ -32,13 +32,14 @@ SECTION .text ; %1=5bits weights?, %2=dst %3=src1 %4=src3 %5=stride if sse2 %macro RV40_WCORE 4-5 - movh m4, [%3 + 0] - movh m5, [%4 + 0] + movh m4, [%3 + r6 + 0] + movh m5, [%4 + r6 + 0] %if %0 == 4 -%define OFFSET mmsize / 2 +%define OFFSET r6 + mmsize / 2 %else ; 8x8 block and sse2, stride was provided -%define OFFSET %5 +%define OFFSET r6 + add r6, r5 %endif movh m6, [%3 + OFFSET] movh m7, [%4 + OFFSET] @@ -99,10 +100,12 @@ SECTION .text packuswb m4, m6 %if %0 == 5 ; Only called for 8x8 blocks and sse2 - movh [%2 + 0], m4 - movhps [%2 + %5], m4 + sub r6, r5 + movh [%2 + r6], m4 + add r6, r5 + movhps [%2 + r6], m4 %else - mova [%2], m4 + mova [%2 + r6], m4 %endif %endmacro @@ -115,26 +118,19 @@ SECTION .text %endif ; Prepare for next loop - add r0, r5 - add r1, r5 - add r2, r5 + add r6, r5 %else %ifidn %1, 8 RV40_WCORE %2, r0, r1, r2, r5 ; Prepare 2 next lines - lea r0, [r0 + 2 * r5] - lea r1, [r1 + 2 * r5] - lea r2, [r2 + 2 * r5] + add r6, r5 %else RV40_WCORE %2, r0, r1, r2 ; Prepare single next line - add r0, r5 - add r1, r5 - add r2, r5 + add r6, r5 %endif %endif - dec r6 %endmacro ; rv40_weight_func_%1(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, int stride) @@ -145,7 +141,7 @@ SECTION .text ; Therefore, we check here whether they are multiples of 2^9 for ; those simplifications to occur. %macro RV40_WEIGHT 3 -cglobal rv40_weight_func_%1_%2, 6, 7, %3 +cglobal rv40_weight_func_%1_%2, 6, 7, 8 %if cpuflag(ssse3) mova m1, [shift_round] %else @@ -153,11 +149,12 @@ cglobal rv40_weight_func_%1_%2, 6, 7, %3 %endif pxor m0, m0 ; Set loop counter and increments -%if mmsize == 8 - mov r6, %2 -%else - mov r6, (%2 * %2) / mmsize -%endif + mov r6, r5 + shl r6, %3 + add r0, r6 + add r1, r6 + add r2, r6 + neg r6 movd m2, r3 movd m3, r4 -- cgit v1.2.3