summaryrefslogtreecommitdiff
path: root/libavcodec/ljpegenc.c
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2013-12-02 19:22:54 +0100
committerAnton Khirnov <anton@khirnov.net>2013-12-05 13:35:55 +0100
commit24abd806ea0cfb0d988d2f0044eac79cff12918c (patch)
treec9ad83f6a63c5a18018ef74068e483e662aef858 /libavcodec/ljpegenc.c
parent86eb2eaac629909d6ee4067c6f1e485a4e70473d (diff)
ljpegenc: deMpegEncContextize
The encoder uses almost none of the mpegvideo infrastructure, only some fields from MpegEncContext. The FATE results change because now an all-zero quant matrix is written into the file. Since it is not used for anything for ljpeg, this should not be a problem.
Diffstat (limited to 'libavcodec/ljpegenc.c')
-rw-r--r--libavcodec/ljpegenc.c179
1 files changed, 132 insertions, 47 deletions
diff --git a/libavcodec/ljpegenc.c b/libavcodec/ljpegenc.c
index 1ca2d37ad1..bb3c188b84 100644
--- a/libavcodec/ljpegenc.c
+++ b/libavcodec/ljpegenc.c
@@ -30,55 +30,68 @@
* lossless JPEG encoder.
*/
+#include "libavutil/frame.h"
+#include "libavutil/mem.h"
+#include "libavutil/pixdesc.h"
+
#include "avcodec.h"
+#include "dsputil.h"
#include "internal.h"
#include "mpegvideo.h"
#include "mjpeg.h"
#include "mjpegenc.h"
+typedef struct LJpegEncContext {
+ DSPContext dsp;
+ ScanTable scantable;
+ uint16_t matrix[64];
+
+ int vsample[3];
+ int hsample[3];
+
+ uint16_t huff_code_dc_luminance[12];
+ uint16_t huff_code_dc_chrominance[12];
+ uint8_t huff_size_dc_luminance[12];
+ uint8_t huff_size_dc_chrominance[12];
+
+ uint16_t (*scratch)[4];
+} LJpegEncContext;
static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
- MpegEncContext * const s = avctx->priv_data;
- MJpegContext * const m = s->mjpeg_ctx;
- const int width= s->width;
- const int height= s->height;
- AVFrame * const p = &s->current_picture.f;
+ LJpegEncContext *s = avctx->priv_data;
+ PutBitContext pb;
+ const int width = avctx->width;
+ const int height = avctx->height;
const int predictor= avctx->prediction_method+1;
- const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0];
- const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];
- int ret, max_pkt_size = FF_MIN_BUFFER_SIZE;
+ const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0];
+ const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0];
+ int max_pkt_size = FF_MIN_BUFFER_SIZE;
+ int ret, header_bits;
if (avctx->pix_fmt == AV_PIX_FMT_BGRA)
max_pkt_size += width * height * 3 * 4;
else {
max_pkt_size += mb_width * mb_height * 3 * 4
- * s->mjpeg_hsample[0] * s->mjpeg_vsample[0];
+ * s->hsample[0] * s->vsample[0];
}
if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", max_pkt_size);
return ret;
}
- init_put_bits(&s->pb, pkt->data, pkt->size);
+ init_put_bits(&pb, pkt->data, pkt->size);
- av_frame_unref(p);
- ret = av_frame_ref(p, pict);
- if (ret < 0)
- return ret;
- p->pict_type= AV_PICTURE_TYPE_I;
- p->key_frame= 1;
-
- ff_mjpeg_encode_picture_header(avctx, &s->pb, &s->intra_scantable,
- s->intra_matrix);
+ ff_mjpeg_encode_picture_header(avctx, &pb, &s->scantable,
+ s->matrix);
- s->header_bits= put_bits_count(&s->pb);
+ header_bits = put_bits_count(&pb);
if(avctx->pix_fmt == AV_PIX_FMT_BGRA){
int x, y, i;
- const int linesize= p->linesize[0];
- uint16_t (*buffer)[4]= (void *) s->rd_scratchpad;
+ const int linesize = pict->linesize[0];
+ uint16_t (*buffer)[4] = s->scratch;
int left[3], top[3], topleft[3];
for(i=0; i<3; i++){
@@ -87,10 +100,10 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
for(y = 0; y < height; y++) {
const int modified_predictor= y ? predictor : 1;
- uint8_t *ptr = p->data[0] + (linesize * y);
+ uint8_t *ptr = pict->data[0] + (linesize * y);
- if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < width*3*4){
- av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ if(pb.buf_end - pb.buf - (put_bits_count(&pb) >> 3) < width * 3 * 4) {
+ av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
return -1;
}
@@ -115,9 +128,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
diff= ((left[i] - pred + 0x100)&0x1FF) - 0x100;
if(i==0)
- ff_mjpeg_encode_dc(&s->pb, diff, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly
+ ff_mjpeg_encode_dc(&pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
else
- ff_mjpeg_encode_dc(&s->pb, diff, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
+ ff_mjpeg_encode_dc(&pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
}
}
}
@@ -125,8 +138,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
int mb_x, mb_y, i;
for(mb_y = 0; mb_y < mb_height; mb_y++) {
- if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){
- av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ if (pb.buf_end - pb.buf - (put_bits_count(&pb) >> 3) <
+ mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) {
+ av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
return -1;
}
for(mb_x = 0; mb_x < mb_width; mb_x++) {
@@ -134,15 +148,15 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
for(i=0;i<3;i++) {
uint8_t *ptr;
int x, y, h, v, linesize;
- h = s->mjpeg_hsample[i];
- v = s->mjpeg_vsample[i];
- linesize= p->linesize[i];
+ h = s->hsample[i];
+ v = s->vsample[i];
+ linesize = pict->linesize[i];
for(y=0; y<v; y++){
for(x=0; x<h; x++){
int pred;
- ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ ptr = pict->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
if(y==0 && mb_y==0){
if(x==0 && mb_x==0){
pred= 128;
@@ -158,9 +172,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
}
if(i==0)
- ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly
+ ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
else
- ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
+ ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
}
}
}
@@ -168,8 +182,8 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
for(i=0;i<3;i++) {
uint8_t *ptr;
int x, y, h, v, linesize;
- h = s->mjpeg_hsample[i];
- v = s->mjpeg_vsample[i];
+ h = s->hsample[i];
+ v = s->vsample[i];
linesize = pict->linesize[i];
for(y=0; y<v; y++){
@@ -180,9 +194,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
if(i==0)
- ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly
+ ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
else
- ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
+ ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
}
}
}
@@ -193,11 +207,10 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
emms_c();
- ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
- s->picture_number++;
+ ff_mjpeg_encode_picture_trailer(&pb, header_bits);
- flush_put_bits(&s->pb);
- pkt->size = put_bits_ptr(&s->pb) - s->pb.buf;
+ flush_put_bits(&pb);
+ pkt->size = put_bits_ptr(&pb) - pb.buf;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
@@ -205,14 +218,86 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
// return (put_bits_count(&f->pb)+7)/8;
}
+static av_cold int ljpeg_encode_close(AVCodecContext *avctx)
+{
+ LJpegEncContext *s = avctx->priv_data;
+
+ av_frame_free(&avctx->coded_frame);
+ av_freep(&s->scratch);
+
+ return 0;
+}
+
+static av_cold int ljpeg_encode_init(AVCodecContext *avctx)
+{
+ LJpegEncContext *s = avctx->priv_data;
+ int chroma_v_shift, chroma_h_shift;
+
+ if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
+ avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
+ avctx->pix_fmt == AV_PIX_FMT_YUV444P) &&
+ avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Limited range YUV is non-standard, set strict_std_compliance to "
+ "at least unofficial to use it.\n");
+ return AVERROR(EINVAL);
+ }
+
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
+
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+ avctx->coded_frame->key_frame = 1;
+
+ s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch));
+
+ ff_dsputil_init(&s->dsp, avctx);
+ ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
+
+ av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
+ &chroma_v_shift);
+
+ if (avctx->pix_fmt == AV_PIX_FMT_BGRA) {
+ s->vsample[0] = s->hsample[0] =
+ s->vsample[1] = s->hsample[1] =
+ s->vsample[2] = s->hsample[2] = 1;
+ } else {
+ s->vsample[0] = 2;
+ s->vsample[1] = 2 >> chroma_v_shift;
+ s->vsample[2] = 2 >> chroma_v_shift;
+ s->hsample[0] = 2;
+ s->hsample[1] = 2 >> chroma_h_shift;
+ s->hsample[2] = 2 >> chroma_h_shift;
+ }
+
+ ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance,
+ s->huff_code_dc_luminance,
+ avpriv_mjpeg_bits_dc_luminance,
+ avpriv_mjpeg_val_dc);
+ ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance,
+ s->huff_code_dc_chrominance,
+ avpriv_mjpeg_bits_dc_chrominance,
+ avpriv_mjpeg_val_dc);
+
+ return 0;
+}
-AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need them
+AVCodec ff_ljpeg_encoder = {
.name = "ljpeg",
.long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_LJPEG,
- .priv_data_size = sizeof(MpegEncContext),
- .init = ff_MPV_encode_init,
+ .priv_data_size = sizeof(LJpegEncContext),
+ .init = ljpeg_encode_init,
.encode2 = encode_picture_lossless,
- .close = ff_MPV_encode_end,
+ .close = ljpeg_encode_close,
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_NONE },
};