summaryrefslogtreecommitdiff
path: root/libavcodec/lagarith.c
diff options
context:
space:
mode:
authorKostya Shishkov <kostya.shishkov@gmail.com>2012-05-07 19:43:52 +0200
committerKostya Shishkov <kostya.shishkov@gmail.com>2012-05-08 07:24:31 +0200
commit464e9ab011897a1220c2a1b8af336bdf4d0b1dd9 (patch)
tree426d0dce25fe6710b490bff8ab1a4aeceda1a4a3 /libavcodec/lagarith.c
parent58637a0b249cba44ed9aaa54d8379a1b70d875d0 (diff)
lagarith: add YUY2 decoding support
Unlike other variants, for YUY2 we need to use different prediction: * on line 0 for luma we should left predict starting from the second pixel * on line 1 we should left predict first 4 pixels for luma and 2 for chroma * median prediction employed here is taken directly from HuffYUV
Diffstat (limited to 'libavcodec/lagarith.c')
-rw-r--r--libavcodec/lagarith.c74
1 files changed, 71 insertions, 3 deletions
diff --git a/libavcodec/lagarith.c b/libavcodec/lagarith.c
index 41df353b3a..35f5a07e4c 100644
--- a/libavcodec/lagarith.c
+++ b/libavcodec/lagarith.c
@@ -269,6 +269,40 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf,
}
}
+static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
+ int width, int stride, int line,
+ int is_luma)
+{
+ int L, TL;
+
+ if (!line) {
+ if (is_luma) {
+ buf++;
+ width--;
+ }
+ l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1, width - 1, buf[0]);
+ return;
+ }
+ if (line == 1) {
+ const int HEAD = is_luma ? 4 : 2;
+ int i;
+
+ L = buf[width - stride - 1];
+ TL = buf[HEAD - stride - 1];
+ for (i = 0; i < HEAD; i++) {
+ L += buf[i];
+ buf[i] = L;
+ }
+ buf += HEAD;
+ width -= HEAD;
+ } else {
+ TL = buf[width - (2 * stride) - 1];
+ L = buf[width - stride - 1];
+ }
+ l->dsp.add_hfyu_median_prediction(buf, buf - stride, buf, width,
+ &L, &TL);
+}
+
static int lag_decode_line(LagarithContext *l, lag_rac *rac,
uint8_t *dst, int width, int stride,
int esc_count)
@@ -432,9 +466,17 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
return -1;
}
- for (i = 0; i < height; i++) {
- lag_pred_line(l, dst, width, stride, i);
- dst += stride;
+ if (l->avctx->pix_fmt != PIX_FMT_YUV422P) {
+ for (i = 0; i < height; i++) {
+ lag_pred_line(l, dst, width, stride, i);
+ dst += stride;
+ }
+ } else {
+ for (i = 0; i < height; i++) {
+ lag_pred_line_yuy2(l, dst, width, stride, i,
+ width == l->avctx->width);
+ dst += stride;
+ }
}
return 0;
@@ -557,6 +599,32 @@ static int lag_decode_frame(AVCodecContext *avctx,
srcs[i] += l->rgb_stride;
}
break;
+ case FRAME_ARITH_YUY2:
+ avctx->pix_fmt = PIX_FMT_YUV422P;
+
+ if (avctx->get_buffer(avctx, p) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ if (offset_ry >= buf_size ||
+ offset_gu >= buf_size ||
+ offset_bv >= buf_size) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid frame offsets\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
+ p->linesize[0], buf + offset_ry,
+ buf_size - offset_ry);
+ lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
+ avctx->height, p->linesize[2],
+ buf + offset_gu, buf_size - offset_gu);
+ lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
+ avctx->height, p->linesize[1],
+ buf + offset_bv, buf_size - offset_bv);
+ break;
case FRAME_ARITH_YV12:
avctx->pix_fmt = PIX_FMT_YUV420P;