From bc6e0b64a9100652c1ce52292408d8fd79930d53 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 23 Nov 2013 10:27:18 -0500 Subject: vp9: split last/cur_frame from the reference buffers. We need more information from last/cur_frame than from reference buffers, so we can use a simplified structure for reference buffers, and then store mvs and segmentation map information in last/cur. This prepares the decoder for frame threading support. Signed-off-by: Anton Khirnov --- libavcodec/vp9.c | 197 ++++++++++++++++++++++++++++++++++++-------------- libavcodec/vp9.h | 22 +++++- libavcodec/vp9block.c | 74 +++++++++++-------- libavcodec/vp9mvs.c | 12 +-- 4 files changed, 207 insertions(+), 98 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c index c11e9b8601..11ed00e3f9 100644 --- a/libavcodec/vp9.c +++ b/libavcodec/vp9.c @@ -34,13 +34,77 @@ #define VP9_SYNCCODE 0x498342 #define MAX_PROB 255 +static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f) +{ + ff_thread_release_buffer(avctx, &f->tf); + av_buffer_unref(&f->segmentation_map_buf); + av_buffer_unref(&f->mv_buf); + f->segmentation_map = NULL; + f->mv = NULL; +} + +static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f) +{ + VP9Context *s = avctx->priv_data; + int ret, sz; + + ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF); + if (ret < 0) + return ret; + + sz = 64 * s->sb_cols * s->sb_rows; + f->segmentation_map_buf = av_buffer_allocz(sz * sizeof(*f->segmentation_map)); + f->mv_buf = av_buffer_allocz(sz * sizeof(*f->mv)); + if (!f->segmentation_map_buf || !f->mv_buf) { + vp9_frame_unref(avctx, f); + return AVERROR(ENOMEM); + } + + f->segmentation_map = f->segmentation_map_buf->data; + f->mv = (VP9MVRefPair*)f->mv_buf->data; + + if (s->segmentation.enabled && !s->segmentation.update_map && + !s->keyframe && !s->intraonly) + memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz); + + return 0; +} + +static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src) +{ + int ret; + + dst->segmentation_map_buf = av_buffer_ref(src->segmentation_map_buf); + dst->mv_buf = av_buffer_ref(src->mv_buf); + if (!dst->segmentation_map_buf || !dst->mv_buf) { + ret = AVERROR(ENOMEM); + goto fail; + } + + ret = ff_thread_ref_frame(&dst->tf, &src->tf); + if (ret < 0) + goto fail; + + dst->segmentation_map = src->segmentation_map; + dst->mv = src->mv; + + return 0; +fail: + av_buffer_unref(&dst->segmentation_map_buf); + av_buffer_unref(&dst->mv_buf); + return ret; +} + static void vp9_decode_flush(AVCodecContext *avctx) { VP9Context *s = avctx->priv_data; int i; + for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) + vp9_frame_unref(avctx, &s->frames[i]); + for (i = 0; i < FF_ARRAY_ELEMS(s->refs); i++) - av_frame_unref(s->refs[i]); + ff_thread_release_buffer(avctx, &s->refs[i]); } static int update_size(AVCodecContext *avctx, int w, int h) @@ -66,8 +130,7 @@ static int update_size(AVCodecContext *avctx, int w, int h) #define assign(var, type, n) var = (type)p; p += s->sb_cols * n * sizeof(*var) av_free(s->above_partition_ctx); p = av_malloc(s->sb_cols * - (240 + sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx) + - 64 * s->sb_rows * (1 + sizeof(*s->mv[0]) * 2))); + (240 + sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx))); if (!p) return AVERROR(ENOMEM); assign(s->above_partition_ctx, uint8_t *, 8); @@ -87,9 +150,6 @@ static int update_size(AVCodecContext *avctx, int w, int h) assign(s->above_filter_ctx, uint8_t *, 8); assign(s->lflvl, VP9Filter *, 1); assign(s->above_mv_ctx, VP56mv(*)[2], 16); - assign(s->segmentation_map, uint8_t *, 64 * s->sb_rows); - assign(s->mv[0], VP9MVRefPair *, 64 * s->sb_rows); - assign(s->mv[1], VP9MVRefPair *, 64 * s->sb_rows); #undef assign return 0; @@ -268,22 +328,22 @@ static int decode_frame_header(AVCodecContext *avctx, s->signbias[1] = get_bits1(&s->gb); s->refidx[2] = get_bits(&s->gb, 3); s->signbias[2] = get_bits1(&s->gb); - if (!s->refs[s->refidx[0]]->buf[0] || - !s->refs[s->refidx[1]]->buf[0] || - !s->refs[s->refidx[2]]->buf[0]) { + if (!s->refs[s->refidx[0]].f->buf[0] || + !s->refs[s->refidx[1]].f->buf[0] || + !s->refs[s->refidx[2]].f->buf[0]) { av_log(avctx, AV_LOG_ERROR, "Not all references are available\n"); return AVERROR_INVALIDDATA; } if (get_bits1(&s->gb)) { - w = s->refs[s->refidx[0]]->width; - h = s->refs[s->refidx[0]]->height; + w = s->refs[s->refidx[0]].f->width; + h = s->refs[s->refidx[0]].f->height; } else if (get_bits1(&s->gb)) { - w = s->refs[s->refidx[1]]->width; - h = s->refs[s->refidx[1]]->height; + w = s->refs[s->refidx[1]].f->width; + h = s->refs[s->refidx[1]].f->height; } else if (get_bits1(&s->gb)) { - w = s->refs[s->refidx[2]]->width; - h = s->refs[s->refidx[2]]->height; + w = s->refs[s->refidx[2]].f->width; + h = s->refs[s->refidx[2]].f->height; } else { w = get_bits(&s->gb, 16) + 1; h = get_bits(&s->gb, 16) + 1; @@ -679,6 +739,7 @@ static int decode_subblock(AVCodecContext *avctx, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl) { VP9Context *s = avctx->priv_data; + AVFrame *f = s->frames[CUR_FRAME].tf.f; int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) | (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1); int ret; @@ -702,8 +763,8 @@ static int decode_subblock(AVCodecContext *avctx, int row, int col, ret = ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp); if (!ret) { - yoff += hbs * 8 * s->cur_frame->linesize[0]; - uvoff += hbs * 4 * s->cur_frame->linesize[1]; + yoff += hbs * 8 * f->linesize[0]; + uvoff += hbs * 4 * f->linesize[1]; ret = ff_vp9_decode_block(avctx, row + hbs, col, lflvl, yoff, uvoff, bl, bp); } @@ -726,8 +787,8 @@ static int decode_subblock(AVCodecContext *avctx, int row, int col, yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1); if (!ret) { - yoff += hbs * 8 * s->cur_frame->linesize[0]; - uvoff += hbs * 4 * s->cur_frame->linesize[1]; + yoff += hbs * 8 * f->linesize[0]; + uvoff += hbs * 4 * f->linesize[1]; ret = decode_subblock(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1); if (!ret) { @@ -758,8 +819,8 @@ static int decode_subblock(AVCodecContext *avctx, int row, int col, bp = PARTITION_SPLIT; ret = decode_subblock(avctx, row, col, lflvl, yoff, uvoff, bl + 1); if (!ret) { - yoff += hbs * 8 * s->cur_frame->linesize[0]; - uvoff += hbs * 4 * s->cur_frame->linesize[1]; + yoff += hbs * 8 * f->linesize[0]; + uvoff += hbs * 4 * f->linesize[1]; ret = decode_subblock(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1); } @@ -782,8 +843,10 @@ static void loopfilter_subblock(AVCodecContext *avctx, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff) { VP9Context *s = avctx->priv_data; - uint8_t *dst = s->cur_frame->data[0] + yoff, *lvl = lflvl->level; - ptrdiff_t ls_y = s->cur_frame->linesize[0], ls_uv = s->cur_frame->linesize[1]; + AVFrame *f = s->frames[CUR_FRAME].tf.f; + uint8_t *dst = f->data[0] + yoff; + ptrdiff_t ls_y = f->linesize[0], ls_uv = f->linesize[1]; + uint8_t *lvl = lflvl->level; int y, x, p; /* FIXME: In how far can we interleave the v/h loopfilter calls? E.g. @@ -860,7 +923,7 @@ static void loopfilter_subblock(AVCodecContext *avctx, VP9Filter *lflvl, // block1 // filter edges between rows, Y plane (e.g. ------) // block2 - dst = s->cur_frame->data[0] + yoff; + dst = f->data[0] + yoff; lvl = lflvl->level; for (y = 0; y < 8; y++, dst += 8 * ls_y, lvl += 8) { uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[0][1][y]; @@ -924,7 +987,7 @@ static void loopfilter_subblock(AVCodecContext *avctx, VP9Filter *lflvl, // same principle but for U/V planes for (p = 0; p < 2; p++) { lvl = lflvl->level; - dst = s->cur_frame->data[1 + p] + uvoff; + dst = f->data[1 + p] + uvoff; for (y = 0; y < 8; y += 4, dst += 16 * ls_uv, lvl += 32) { uint8_t *ptr = dst, *l = lvl, *hmask1 = lflvl->mask[1][0][y]; uint8_t *hmask2 = lflvl->mask[1][0][y + 2]; @@ -971,7 +1034,7 @@ static void loopfilter_subblock(AVCodecContext *avctx, VP9Filter *lflvl, } } lvl = lflvl->level; - dst = s->cur_frame->data[1 + p] + uvoff; + dst = f->data[1 + p] + uvoff; for (y = 0; y < 8; y++, dst += 4 * ls_uv) { uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[1][1][y]; unsigned vm = vmask[0] | vmask[1] | vmask[2]; @@ -1030,6 +1093,7 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const uint8_t *data, int size) { VP9Context *s = avctx->priv_data; + AVFrame *f; int ret, tile_row, tile_col, i, ref = -1, row, col; ptrdiff_t yoff = 0, uvoff = 0; @@ -1037,13 +1101,13 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, if (ret < 0) { return ret; } else if (!ret) { - if (!s->refs[ref]->buf[0]) { + if (!s->refs[ref].f->buf[0]) { av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref); return AVERROR_INVALIDDATA; } - ret = av_frame_ref(frame, s->refs[ref]); + ret = av_frame_ref(frame, s->refs[ref].f); if (ret < 0) return ret; *got_frame = 1; @@ -1052,15 +1116,21 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, data += ret; size -= ret; - s->cur_frame = frame; + vp9_frame_unref(avctx, &s->frames[LAST_FRAME]); + if (!s->keyframe && s->frames[CUR_FRAME].tf.f->buf[0]) { + ret = vp9_frame_ref(&s->frames[LAST_FRAME], &s->frames[CUR_FRAME]); + if (ret < 0) + return ret; + } - av_frame_unref(s->cur_frame); - if ((ret = ff_get_buffer(avctx, s->cur_frame, - s->refreshrefmask ? AV_GET_BUFFER_FLAG_REF : 0)) < 0) + vp9_frame_unref(avctx, &s->frames[CUR_FRAME]); + ret = vp9_frame_alloc(avctx, &s->frames[CUR_FRAME]); + if (ret < 0) return ret; - s->cur_frame->key_frame = s->keyframe; - s->cur_frame->pict_type = s->keyframe ? AV_PICTURE_TYPE_I - : AV_PICTURE_TYPE_P; + + f = s->frames[CUR_FRAME].tf.f; + f->key_frame = s->keyframe; + f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if (s->fullrange) avctx->color_range = AVCOL_RANGE_JPEG; @@ -1110,8 +1180,8 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, for (row = s->tiling.tile_row_start; row < s->tiling.tile_row_end; - row += 8, yoff += s->cur_frame->linesize[0] * 64, - uvoff += s->cur_frame->linesize[1] * 32) { + row += 8, yoff += f->linesize[0] * 64, + uvoff += f->linesize[1] * 32) { VP9Filter *lflvl = s->lflvl; ptrdiff_t yoff2 = yoff, uvoff2 = uvoff; @@ -1149,16 +1219,16 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, // prediction of next row of sb64s if (row + 8 < s->rows) { memcpy(s->intra_pred_data[0], - s->cur_frame->data[0] + yoff + - 63 * s->cur_frame->linesize[0], + f->data[0] + yoff + + 63 * f->linesize[0], 8 * s->cols); memcpy(s->intra_pred_data[1], - s->cur_frame->data[1] + uvoff + - 31 * s->cur_frame->linesize[1], + f->data[1] + uvoff + + 31 * f->linesize[1], 4 * s->cols); memcpy(s->intra_pred_data[2], - s->cur_frame->data[2] + uvoff + - 31 * s->cur_frame->linesize[2], + f->data[2] + uvoff + + 31 * f->linesize[2], 4 * s->cols); } @@ -1194,21 +1264,23 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, ff_vp9_adapt_probs(s); } } - FFSWAP(VP9MVRefPair *, s->mv[0], s->mv[1]); // ref frame setup for (i = 0; i < 8; i++) if (s->refreshrefmask & (1 << i)) { - av_frame_unref(s->refs[i]); - ret = av_frame_ref(s->refs[i], s->cur_frame); + ff_thread_release_buffer(avctx, &s->refs[i]); + ret = ff_thread_ref_frame(&s->refs[i], &s->frames[CUR_FRAME].tf); if (ret < 0) return ret; } - if (s->invisible) - av_frame_unref(s->cur_frame); - else + if (!s->invisible) { + av_frame_unref(frame); + ret = av_frame_ref(frame, s->frames[CUR_FRAME].tf.f); + if (ret < 0) + return ret; *got_frame = 1; + } return 0; } @@ -1267,8 +1339,15 @@ static av_cold int vp9_decode_free(AVCodecContext *avctx) VP9Context *s = avctx->priv_data; int i; - for (i = 0; i < FF_ARRAY_ELEMS(s->refs); i++) - av_frame_free(&s->refs[i]); + for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) { + vp9_frame_unref(avctx, &s->frames[i]); + av_frame_free(&s->frames[i].tf.f); + } + + for (i = 0; i < FF_ARRAY_ELEMS(s->refs); i++) { + ff_thread_release_buffer(avctx, &s->refs[i]); + av_frame_free(&s->refs[i].f); + } av_freep(&s->c_b); av_freep(&s->above_partition_ctx); @@ -1286,17 +1365,23 @@ static av_cold int vp9_decode_init(AVCodecContext *avctx) ff_vp9dsp_init(&s->dsp); ff_videodsp_init(&s->vdsp, 8); + s->frames[0].tf.f = av_frame_alloc(); + s->frames[1].tf.f = av_frame_alloc(); + if (!s->frames[0].tf.f || !s->frames[1].tf.f) + goto fail; + for (i = 0; i < FF_ARRAY_ELEMS(s->refs); i++) { - s->refs[i] = av_frame_alloc(); - if (!s->refs[i]) { - vp9_decode_free(avctx); - return AVERROR(ENOMEM); - } + s->refs[i].f = av_frame_alloc(); + if (!s->refs[i].f) + goto fail; } s->filter.sharpness = -1; return 0; +fail: + vp9_decode_free(avctx); + return AVERROR(ENOMEM); } AVCodec ff_vp9_decoder = { diff --git a/libavcodec/vp9.h b/libavcodec/vp9.h index 31509bfbc5..8711987b8c 100644 --- a/libavcodec/vp9.h +++ b/libavcodec/vp9.h @@ -27,9 +27,11 @@ #include #include +#include "libavutil/buffer.h" #include "libavutil/internal.h" #include "avcodec.h" +#include "thread.h" #include "vp56.h" enum TxfmMode { @@ -225,6 +227,16 @@ typedef struct VP9Filter { [8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */]; } VP9Filter; +typedef struct VP9Frame { + ThreadFrame tf; + + uint8_t *segmentation_map; + VP9MVRefPair *mv; + + AVBufferRef *segmentation_map_buf; + AVBufferRef *mv_buf; +} VP9Frame; + enum BlockLevel { BL_64X64, BL_32X32, @@ -293,8 +305,12 @@ typedef struct VP9Context { uint8_t refidx[3]; uint8_t signbias[3]; uint8_t varcompref[2]; - AVFrame *refs[8]; - AVFrame *cur_frame; + + ThreadFrame refs[8]; + +#define CUR_FRAME 0 +#define LAST_FRAME 1 + VP9Frame frames[2]; struct { uint8_t level; @@ -392,8 +408,6 @@ typedef struct VP9Context { // whole-frame cache uint8_t *intra_pred_data[3]; - uint8_t *segmentation_map; - VP9MVRefPair *mv[2]; VP9Filter *lflvl; DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[71 * 80]; diff --git a/libavcodec/vp9block.c b/libavcodec/vp9block.c index f5f7256424..c018fa0310 100644 --- a/libavcodec/vp9block.c +++ b/libavcodec/vp9block.c @@ -70,13 +70,14 @@ static void decode_mode(VP9Context *s, VP9Block *const b) vp56_rac_get_prob_branchy(&s->c, s->prob.segpred[s->above_segpred_ctx[col] + s->left_segpred_ctx[row7]]))) { + uint8_t *refsegmap = s->frames[LAST_FRAME].segmentation_map; int pred = MAX_SEGMENT - 1; int x; for (y = 0; y < h4; y++) for (x = 0; x < w4; x++) pred = FFMIN(pred, - s->segmentation_map[(y + row) * 8 * s->sb_cols + x + col]); + refsegmap[(y + row) * 8 * s->sb_cols + x + col]); b->seg_id = pred; memset(&s->above_segpred_ctx[col], 1, w4); @@ -89,8 +90,10 @@ static void decode_mode(VP9Context *s, VP9Block *const b) memset(&s->left_segpred_ctx[row7], 0, h4); } if ((s->segmentation.enabled && s->segmentation.update_map) || s->keyframe) { + uint8_t *segmap = s->frames[CUR_FRAME].segmentation_map; + for (y = 0; y < h4; y++) - memset(&s->segmentation_map[(y + row) * 8 * s->sb_cols + col], + memset(&segmap[(y + row) * 8 * s->sb_cols + col], b->seg_id, w4); } @@ -684,24 +687,25 @@ static void decode_mode(VP9Context *s, VP9Block *const b) // FIXME kinda ugly for (y = 0; y < h4; y++) { int x, o = (row + y) * s->sb_cols * 8 + col; + VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[o]; if (b->intra) { for (x = 0; x < w4; x++) { - s->mv[0][o + x].ref[0] = - s->mv[0][o + x].ref[1] = -1; + mv[x].ref[0] = + mv[x].ref[1] = -1; } } else if (b->comp) { for (x = 0; x < w4; x++) { - s->mv[0][o + x].ref[0] = b->ref[0]; - s->mv[0][o + x].ref[1] = b->ref[1]; - AV_COPY32(&s->mv[0][o + x].mv[0], &b->mv[3][0]); - AV_COPY32(&s->mv[0][o + x].mv[1], &b->mv[3][1]); + mv[x].ref[0] = b->ref[0]; + mv[x].ref[1] = b->ref[1]; + AV_COPY32(&mv[x].mv[0], &b->mv[3][0]); + AV_COPY32(&mv[x].mv[1], &b->mv[3][1]); } } else { for (x = 0; x < w4; x++) { - s->mv[0][o + x].ref[0] = b->ref[0]; - s->mv[0][o + x].ref[1] = -1; - AV_COPY32(&s->mv[0][o + x].mv[0], &b->mv[3][0]); + mv[x].ref[0] = b->ref[0]; + mv[x].ref[1] = -1; + AV_COPY32(&mv[x].mv[0], &b->mv[3][0]); } } } @@ -1071,6 +1075,7 @@ static void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off { VP9Context *s = avctx->priv_data; VP9Block *const b = &s->b; + AVFrame *f = s->frames[CUR_FRAME].tf.f; int row = b->row, col = b->col; int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n; int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2); @@ -1078,7 +1083,7 @@ static void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off int end_y = FFMIN(2 * (s->rows - row), h4); int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless; int uvstep1d = 1 << b->uvtx, p; - uint8_t *dst = b->dst[0], *dst_r = s->cur_frame->data[0] + y_off; + uint8_t *dst = b->dst[0], *dst_r = f->data[0] + y_off; for (n = 0, y = 0; y < end_y; y += step1d) { uint8_t *ptr = dst, *ptr_r = dst_r; @@ -1092,7 +1097,7 @@ static void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off int eob = b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n]; mode = check_intra_mode(s, mode, &a, ptr_r, - s->cur_frame->linesize[0], + f->linesize[0], ptr, b->y_stride, l, col, x, w4, row, y, b->tx, 0); s->dsp.intra_pred[b->tx][mode](ptr, b->y_stride, l, a); @@ -1100,7 +1105,7 @@ static void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off s->dsp.itxfm_add[tx][txtp](ptr, b->y_stride, s->block + 16 * n, eob); } - dst_r += 4 * s->cur_frame->linesize[0] * step1d; + dst_r += 4 * f->linesize[0] * step1d; dst += 4 * b->y_stride * step1d; } @@ -1112,7 +1117,7 @@ static void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off step = 1 << (b->uvtx * 2); for (p = 0; p < 2; p++) { dst = b->dst[1 + p]; - dst_r = s->cur_frame->data[1 + p] + uv_off; + dst_r = f->data[1 + p] + uv_off; for (n = 0, y = 0; y < end_y; y += uvstep1d) { uint8_t *ptr = dst, *ptr_r = dst_r; for (x = 0; x < end_x; @@ -1125,7 +1130,7 @@ static void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off : s->uveob[p][n]; mode = check_intra_mode(s, mode, &a, ptr_r, - s->cur_frame->linesize[1], + f->linesize[1], ptr, b->uv_stride, l, col, x, w4, row, y, b->uvtx, p + 1); s->dsp.intra_pred[b->uvtx][mode](ptr, b->uv_stride, l, a); @@ -1134,7 +1139,7 @@ static void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off s->uvblock[p] + 16 * n, eob); } - dst_r += 4 * uvstep1d * s->cur_frame->linesize[1]; + dst_r += 4 * uvstep1d * f->linesize[1]; dst += 4 * uvstep1d * b->uv_stride; } } @@ -1224,8 +1229,12 @@ static int inter_recon(AVCodecContext *avctx) VP9Context *s = avctx->priv_data; VP9Block *const b = &s->b; int row = b->row, col = b->col; - AVFrame *ref1 = s->refs[s->refidx[b->ref[0]]]; - AVFrame *ref2 = b->comp ? s->refs[s->refidx[b->ref[1]]] : NULL; + + ThreadFrame *tref1 = &s->refs[s->refidx[b->ref[0]]]; + ThreadFrame *tref2 = b->comp ? &s->refs[s->refidx[b->ref[1]]] : NULL; + AVFrame *ref1 = tref1->f; + AVFrame *ref2 = tref2 ? tref2->f : NULL; + int w = avctx->width, h = avctx->height; ptrdiff_t ls_y = b->y_stride, ls_uv = b->uv_stride; @@ -1547,6 +1556,7 @@ int ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, { VP9Context *s = avctx->priv_data; VP9Block *const b = &s->b; + AVFrame *f = s->frames[CUR_FRAME].tf.f; enum BlockSize bs = bl * 3 + bp; int ret, y, w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl; int emu[2]; @@ -1582,25 +1592,25 @@ int ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, /* Emulated overhangs if the stride of the target buffer can't hold. * This allows to support emu-edge and so on even if we have large * block overhangs. */ - emu[0] = (col + w4) * 8 > s->cur_frame->linesize[0] || + emu[0] = (col + w4) * 8 > f->linesize[0] || (row + h4) > s->rows; - emu[1] = (col + w4) * 4 > s->cur_frame->linesize[1] || + emu[1] = (col + w4) * 4 > f->linesize[1] || (row + h4) > s->rows; if (emu[0]) { b->dst[0] = s->tmp_y; b->y_stride = 64; } else { - b->dst[0] = s->cur_frame->data[0] + yoff; - b->y_stride = s->cur_frame->linesize[0]; + b->dst[0] = f->data[0] + yoff; + b->y_stride = f->linesize[0]; } if (emu[1]) { b->dst[1] = s->tmp_uv[0]; b->dst[2] = s->tmp_uv[1]; b->uv_stride = 32; } else { - b->dst[1] = s->cur_frame->data[1] + uvoff; - b->dst[2] = s->cur_frame->data[2] + uvoff; - b->uv_stride = s->cur_frame->linesize[1]; + b->dst[1] = f->data[1] + uvoff; + b->dst[2] = f->data[2] + uvoff; + b->uv_stride = f->linesize[1]; } if (b->intra) { intra_recon(avctx, yoff, uvoff); @@ -1618,9 +1628,9 @@ int ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, av_assert2(n <= 4); if (w & bw) { - s->dsp.mc[n][0][0][0][0](s->cur_frame->data[0] + yoff + o, + s->dsp.mc[n][0][0][0][0](f->data[0] + yoff + o, s->tmp_y + o, - s->cur_frame->linesize[0], + f->linesize[0], 64, h, 0, 0); o += bw; } @@ -1636,13 +1646,13 @@ int ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, av_assert2(n <= 4); if (w & bw) { - s->dsp.mc[n][0][0][0][0](s->cur_frame->data[1] + uvoff + o, + s->dsp.mc[n][0][0][0][0](f->data[1] + uvoff + o, s->tmp_uv[0] + o, - s->cur_frame->linesize[1], + f->linesize[1], 32, h, 0, 0); - s->dsp.mc[n][0][0][0][0](s->cur_frame->data[2] + uvoff + o, + s->dsp.mc[n][0][0][0][0](f->data[2] + uvoff + o, s->tmp_uv[1] + o, - s->cur_frame->linesize[2], + f->linesize[2], 32, h, 0, 0); o += bw; } diff --git a/libavcodec/vp9mvs.c b/libavcodec/vp9mvs.c index 1f65aaac0a..a4ce84c5f3 100644 --- a/libavcodec/vp9mvs.c +++ b/libavcodec/vp9mvs.c @@ -125,7 +125,7 @@ static void find_ref_mvs(VP9Context *s, } while (0) if (row > 0) { - VP9MVRefPair *mv = &s->mv[0][(row - 1) * s->sb_cols * 8 + col]; + VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col]; if (mv->ref[0] == ref) RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]); @@ -133,7 +133,7 @@ static void find_ref_mvs(VP9Context *s, RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]); } if (col > s->tiling.tile_col_start) { - VP9MVRefPair *mv = &s->mv[0][row * s->sb_cols * 8 + col - 1]; + VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1]; if (mv->ref[0] == ref) RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]); @@ -151,7 +151,7 @@ static void find_ref_mvs(VP9Context *s, if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) { - VP9MVRefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c]; + VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; if (mv->ref[0] == ref) RETURN_MV(mv->mv[0]); @@ -162,7 +162,7 @@ static void find_ref_mvs(VP9Context *s, // MV at this position in previous frame, using same reference frame if (s->use_last_frame_mvs) { - VP9MVRefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col]; + VP9MVRefPair *mv = &s->frames[LAST_FRAME].mv[row * s->sb_cols * 8 + col]; if (mv->ref[0] == ref) RETURN_MV(mv->mv[0]); @@ -186,7 +186,7 @@ static void find_ref_mvs(VP9Context *s, if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) { - VP9MVRefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c]; + VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; if (mv->ref[0] != ref && mv->ref[0] >= 0) RETURN_SCALE_MV(mv->mv[0], @@ -203,7 +203,7 @@ static void find_ref_mvs(VP9Context *s, // MV at this position in previous frame, using different reference frame if (s->use_last_frame_mvs) { - VP9MVRefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col]; + VP9MVRefPair *mv = &s->frames[LAST_FRAME].mv[row * s->sb_cols * 8 + col]; if (mv->ref[0] != ref && mv->ref[0] >= 0) RETURN_SCALE_MV(mv->mv[0], -- cgit v1.2.3