summaryrefslogtreecommitdiff
path: root/libavcodec/vc1dec.c
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2012-11-21 21:34:46 +0100
committerAnton Khirnov <anton@khirnov.net>2013-03-08 07:38:30 +0100
commit759001c534287a96dc96d1e274665feb7059145d (patch)
tree6ace9560c20aa30db92067c5b45d7bd86e458d10 /libavcodec/vc1dec.c
parent6e7b50b4270116ded8b874d76cb7c5b1a0341827 (diff)
lavc decoders: work with refcounted frames.
Diffstat (limited to 'libavcodec/vc1dec.c')
-rw-r--r--libavcodec/vc1dec.c362
1 files changed, 181 insertions, 181 deletions
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index adfd699ac8..44dd652283 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -348,8 +348,8 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
// store motion vectors for further use in B frames
if (s->pict_type == AV_PICTURE_TYPE_P) {
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
}
uvmx = (mx + ((mx & 3) == 3)) >> 1;
@@ -604,8 +604,8 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
break;
}
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
for (k = 0; k < 4; k++)
v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
}
@@ -798,8 +798,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
chroma_ref_type = v->reffield;
if (!valid_count) {
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
return; //no need to do MC for intra blocks
}
@@ -813,8 +813,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
}
if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
return;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
uvmx = (tx + ((tx & 3) == 3)) >> 1;
uvmy = (ty + ((ty & 3) == 3)) >> 1;
@@ -1376,30 +1376,30 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
xy = s->block_index[n];
if (s->mb_intra) {
- s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
- s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
if (mv1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
- s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
}
return;
}
- C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
- A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
+ C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
+ A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
if (mv1) {
if (v->field_mode && mixedmv_pic)
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
@@ -1421,7 +1421,7 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
off = -1;
}
}
- B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
+ B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
a_valid = !s->first_slice_line || (n == 2 || n == 3);
b_valid = a_valid && (s->mb_width > 1);
@@ -1584,15 +1584,15 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
y_bias = 1;
/* store MV using signed modulus of MV range defined in 4.11 */
- s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
- s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
+ s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
if (mv1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
- s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
- s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
- s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
- s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
- s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
+ s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+ s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
+ s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+ s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
+ s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+ s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
}
@@ -1616,24 +1616,24 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
xy = s->block_index[n];
if (s->mb_intra) {
- s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
- s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
- s->current_picture.f.motion_val[1][xy][0] = 0;
- s->current_picture.f.motion_val[1][xy][1] = 0;
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
+ s->current_picture.motion_val[1][xy][0] = 0;
+ s->current_picture.motion_val[1][xy][1] = 0;
if (mvn == 1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[0][xy + 1][0] = 0;
- s->current_picture.f.motion_val[0][xy + 1][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
- s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
+ s->current_picture.motion_val[0][xy + 1][0] = 0;
+ s->current_picture.motion_val[0][xy + 1][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap][1] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
+ s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
- s->current_picture.f.motion_val[1][xy + 1][0] = 0;
- s->current_picture.f.motion_val[1][xy + 1][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
- s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
+ s->current_picture.motion_val[1][xy + 1][0] = 0;
+ s->current_picture.motion_val[1][xy + 1][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap][1] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
+ s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
}
return;
}
@@ -1643,14 +1643,14 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
if (s->mb_x || (n == 1) || (n == 3)) {
if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
|| (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
- A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
- A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
+ A[0] = s->current_picture.motion_val[0][xy - 1][0];
+ A[1] = s->current_picture.motion_val[0][xy - 1][1];
a_valid = 1;
} else { // current block has frame mv and cand. has field MV (so average)
- A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
- + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
- A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
- + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
+ A[0] = (s->current_picture.motion_val[0][xy - 1][0]
+ + s->current_picture.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
+ A[1] = (s->current_picture.motion_val[0][xy - 1][1]
+ + s->current_picture.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
a_valid = 1;
}
if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
@@ -1670,11 +1670,11 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
n_adj = (n & 2) | (n & 1);
}
- B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
- B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
+ B[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
+ B[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
- B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
- B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
+ B[0] = (B[0] + s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
+ B[1] = (B[1] + s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
}
}
if (s->mb_width > 1) {
@@ -1685,11 +1685,11 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
n_adj = n & 2;
}
- C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
- C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
+ C[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
+ C[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
- C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
- C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
+ C[0] = (1 + C[0] + (s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
+ C[1] = (1 + C[1] + (s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
}
if (s->mb_x == s->mb_width - 1) {
if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
@@ -1699,11 +1699,11 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
n_adj = n | 1;
}
- C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
- C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
+ C[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
+ C[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
- C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
- C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
+ C[0] = (1 + C[0] + s->current_picture.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
+ C[1] = (1 + C[1] + s->current_picture.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
}
} else
c_valid = 0;
@@ -1714,12 +1714,12 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
} else {
pos_b = s->block_index[1];
b_valid = 1;
- B[0] = s->current_picture.f.motion_val[0][pos_b][0];
- B[1] = s->current_picture.f.motion_val[0][pos_b][1];
+ B[0] = s->current_picture.motion_val[0][pos_b][0];
+ B[1] = s->current_picture.motion_val[0][pos_b][1];
pos_c = s->block_index[0];
c_valid = 1;
- C[0] = s->current_picture.f.motion_val[0][pos_c][0];
- C[1] = s->current_picture.f.motion_val[0][pos_c][1];
+ C[0] = s->current_picture.motion_val[0][pos_c][0];
+ C[1] = s->current_picture.motion_val[0][pos_c][1];
}
total_valid = a_valid + b_valid + c_valid;
@@ -1807,18 +1807,18 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
}
/* store MV using signed modulus of MV range defined in 4.11 */
- s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
- s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
+ s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
if (mvn == 1) { /* duplicate motion data for 1-MV block */
- s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
- s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
- s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + 1 ][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + 1 ][1] = s->current_picture.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + wrap ][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + wrap ][1] = s->current_picture.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
} else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
- s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
- s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
+ s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
+ s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
s->mv[0][n + 1][0] = s->mv[0][n][0];
s->mv[0][n + 1][1] = s->mv[0][n][1];
}
@@ -2041,17 +2041,17 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
xy = s->block_index[0];
if (s->mb_intra) {
- s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
- s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
- s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
- s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
+ s->current_picture.motion_val[0][xy + v->blocks_off][0] =
+ s->current_picture.motion_val[0][xy + v->blocks_off][1] =
+ s->current_picture.motion_val[1][xy + v->blocks_off][0] =
+ s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
return;
}
if (!v->field_mode) {
- s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
- s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
- s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
- s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
+ s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
+ s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+ s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
+ s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
/* Pullback predicted motion vectors as specified in 8.4.5.4 */
s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
@@ -2060,18 +2060,18 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
}
if (direct) {
- s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
- s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
- s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
- s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
+ s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
return;
}
if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
- C = s->current_picture.f.motion_val[0][xy - 2];
- A = s->current_picture.f.motion_val[0][xy - wrap * 2];
+ C = s->current_picture.motion_val[0][xy - 2];
+ A = s->current_picture.motion_val[0][xy - wrap * 2];
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
- B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
+ B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
if (!s->mb_x) C[0] = C[1] = 0;
if (!s->first_slice_line) { // predictor A is not out of bounds
@@ -2146,10 +2146,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
}
if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
- C = s->current_picture.f.motion_val[1][xy - 2];
- A = s->current_picture.f.motion_val[1][xy - wrap * 2];
+ C = s->current_picture.motion_val[1][xy - 2];
+ A = s->current_picture.motion_val[1][xy - wrap * 2];
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
- B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
+ B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
if (!s->mb_x)
C[0] = C[1] = 0;
@@ -2225,10 +2225,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
}
- s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
- s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
- s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
- s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
+ s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
}
static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
@@ -2239,14 +2239,14 @@ static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dm
if (v->bmvtype == BMV_TYPE_DIRECT) {
int total_opp, k, f;
- if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
- s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
+ if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
+ s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
v->bfraction, 0, s->quarter_sample);
- s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
+ s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
v->bfraction, 0, s->quarter_sample);
- s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
+ s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
v->bfraction, 1, s->quarter_sample);
- s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
+ s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
v->bfraction, 1, s->quarter_sample);
total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
@@ -2261,10 +2261,10 @@ static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dm
}
v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
for (k = 0; k < 4; k++) {
- s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
- s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
- s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
- s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
+ s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
}
@@ -2382,17 +2382,17 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
b = dc_val[ - 1 - wrap];
a = dc_val[ - wrap];
/* scale predictors if needed */
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
dqscale_index = s->y_dc_scale_table[q1] - 1;
if (dqscale_index < 0)
return 0;
if (c_avail && (n != 1 && n != 3)) {
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (q2 && q2 != q1)
c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
if (a_avail && (n != 2 && n != 3)) {
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if (q2 && q2 != q1)
a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
@@ -2402,7 +2402,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
off--;
if (n != 2)
off -= s->mb_stride;
- q2 = s->current_picture.f.qscale_table[off];
+ q2 = s->current_picture.qscale_table[off];
if (q2 && q2 != q1)
b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
}
@@ -2773,11 +2773,11 @@ static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
else // top
ac_val -= 16 * s->block_wrap[n];
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
if ( dc_pred_dir && c_avail && mb_pos)
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if ( dc_pred_dir && n == 1)
q2 = q1;
if (!dc_pred_dir && n == 2)
@@ -2996,11 +2996,11 @@ static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
else //top
ac_val -= 16 * s->block_wrap[n];
- q1 = s->current_picture.f.qscale_table[mb_pos];
+ q1 = s->current_picture.qscale_table[mb_pos];
if (dc_pred_dir && c_avail && mb_pos)
- q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+ q2 = s->current_picture.qscale_table[mb_pos - 1];
if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
- q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+ q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
if ( dc_pred_dir && n == 1)
q2 = q1;
if (!dc_pred_dir && n == 2)
@@ -3318,7 +3318,7 @@ static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_
bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
: (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
mv_stride = s->b8_stride;
- mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
+ mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
}
if (bottom_is_intra & 1 || block_is_intra & 1 ||
@@ -3380,7 +3380,7 @@ static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_
: (mb_cbp >> ((block_num + 1) * 4));
right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
: (mb_is_intra >> ((block_num + 1) * 4));
- mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
+ mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
}
if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
@@ -3474,10 +3474,10 @@ static int vc1_decode_p_mb(VC1Context *v)
GET_MVDATA(dmv_x, dmv_y);
if (s->mb_intra) {
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
}
- s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
/* FIXME Set DC val for inter block ? */
@@ -3494,7 +3494,7 @@ static int vc1_decode_p_mb(VC1Context *v)
mquant = v->pq;
cbp = 0;
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
@@ -3548,8 +3548,8 @@ static int vc1_decode_p_mb(VC1Context *v)
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
+ s->current_picture.qscale_table[mb_pos] = 0;
vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
vc1_mc_1mv(v, 0);
}
@@ -3592,7 +3592,7 @@ static int vc1_decode_p_mb(VC1Context *v)
if (!intra_count && !coded_inter)
goto end;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* test if block is intra and has pred */
{
int intrapred = 0;
@@ -3655,7 +3655,7 @@ static int vc1_decode_p_mb(VC1Context *v)
}
} else { // skipped MB
s->mb_intra = 0;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
for (i = 0; i < 6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
@@ -3665,7 +3665,7 @@ static int vc1_decode_p_mb(VC1Context *v)
vc1_mc_4mv_luma(v, i, 0);
}
vc1_mc_4mv_chroma(v, 0);
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
}
}
end:
@@ -3740,9 +3740,9 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
break;
}
if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
s->mb_intra = v->is_intra[s->mb_x] = 1;
for (i = 0; i < 6; i++)
v->mb_type[0][s->block_index[i]] = 1;
@@ -3752,7 +3752,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -3844,7 +3844,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
}
if (cbp)
GET_MQUANT(); // p. 227
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
for (i = 0; i < 6; i++) {
@@ -3873,8 +3873,8 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
+ s->current_picture.qscale_table[mb_pos] = 0;
v->blk_mv_type[s->block_index[0]] = 0;
v->blk_mv_type[s->block_index[1]] = 0;
v->blk_mv_type[s->block_index[2]] = 0;
@@ -3911,11 +3911,11 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
if (idx_mbmode <= 1) { // intra MB
s->mb_intra = v->is_intra[s->mb_x] = 1;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -3947,7 +3947,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
}
} else {
s->mb_intra = v->is_intra[s->mb_x] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
if (idx_mbmode <= 5) { // 1-MV
dmv_x = dmv_y = pred_flag = 0;
@@ -3978,7 +3978,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
if (cbp) {
GET_MQUANT();
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp) {
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
}
@@ -4044,7 +4044,7 @@ static void vc1_decode_b_mb(VC1Context *v)
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
- s->current_picture.f.qscale_table[mb_pos] = 0;
+ s->current_picture.qscale_table[mb_pos] = 0;
if (!direct) {
if (!skipped) {
@@ -4081,7 +4081,7 @@ static void vc1_decode_b_mb(VC1Context *v)
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
s->mb_intra = 0;
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
@@ -4096,7 +4096,7 @@ static void vc1_decode_b_mb(VC1Context *v)
}
if (s->mb_intra && !mb_has_coeffs) {
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
s->ac_pred = get_bits1(gb);
cbp = 0;
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
@@ -4118,7 +4118,7 @@ static void vc1_decode_b_mb(VC1Context *v)
s->ac_pred = get_bits1(gb);
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
}
@@ -4185,11 +4185,11 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
if (idx_mbmode <= 1) { // intra MB
s->mb_intra = v->is_intra[s->mb_x] = 1;
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same (not sure if necessary here) */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -4224,7 +4224,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
}
} else {
s->mb_intra = v->is_intra[s->mb_x] = 0;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
if (v->fmb_is_raw)
fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
@@ -4290,7 +4290,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
if (cbp) {
GET_MQUANT();
}
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && cbp) {
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
}
@@ -4372,10 +4372,10 @@ static void vc1_decode_i_blocks(VC1Context *v)
dst[5] = s->dest[2];
s->dsp.clear_blocks(s->block[0]);
mb_pos = s->mb_x + s->mb_y * s->mb_width;
- s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
- s->current_picture.f.qscale_table[mb_pos] = v->pq;
- s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture.qscale_table[mb_pos] = v->pq;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
// do actual MB decoding and displaying
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
@@ -4512,9 +4512,9 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
ff_update_block_index(s);
s->dsp.clear_blocks(block[0]);
mb_pos = s->mb_x + s->mb_y * s->mb_stride;
- s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
- s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+ s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
// do actual MB decoding and displaying
if (v->fieldtx_is_raw)
@@ -4530,7 +4530,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
GET_MQUANT();
- s->current_picture.f.qscale_table[mb_pos] = mquant;
+ s->current_picture.qscale_table[mb_pos] = mquant;
/* Set DC scale - y and c use the same */
s->y_dc_scale = s->y_dc_scale_table[mquant];
s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -5024,12 +5024,8 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
v->two_sprites = 0;
}
- if (v->sprite_output_frame.data[0])
- avctx->release_buffer(avctx, &v->sprite_output_frame);
-
- v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
- v->sprite_output_frame.reference = 0;
- if (ff_get_buffer(avctx, &v->sprite_output_frame) < 0) {
+ av_frame_unref(&v->sprite_output_frame);
+ if (ff_get_buffer(avctx, &v->sprite_output_frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
@@ -5279,9 +5275,8 @@ av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
VC1Context *v = avctx->priv_data;
int i;
- if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
- && v->sprite_output_frame.data[0])
- avctx->release_buffer(avctx, &v->sprite_output_frame);
+ av_frame_unref(&v->sprite_output_frame);
+
for (i = 0; i < 4; i++)
av_freep(&v->sr_rows[i >> 1][i & 1]);
av_freep(&v->hrd_rate);
@@ -5315,7 +5310,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size, n_slices = 0, i;
+ int buf_size = avpkt->size, n_slices = 0, i, ret;
VC1Context *v = avctx->priv_data;
MpegEncContext *s = &v->s;
AVFrame *pict = data;
@@ -5332,7 +5327,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
/* special case for last picture */
if (s->low_delay == 0 && s->next_picture_ptr) {
- *pict = s->next_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+ return ret;
s->next_picture_ptr = NULL;
*got_frame = 1;
@@ -5650,17 +5646,21 @@ image:
if (vc1_decode_sprites(v, &s->gb))
goto err;
#endif
- *pict = v->sprite_output_frame;
+ if ((ret = av_frame_ref(pict, &v->sprite_output_frame)) < 0)
+ goto err;
*got_frame = 1;
} else {
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = s->current_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+ goto err;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else if (s->last_picture_ptr != NULL) {
- *pict = s->last_picture_ptr->f;
+ if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+ goto err;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
if (s->last_picture_ptr || s->low_delay) {
*got_frame = 1;
- ff_print_debug_info(s, pict);
}
}