summaryrefslogtreecommitdiff
path: root/libavcodec/error_resilience.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-07-12 01:42:32 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-07-12 01:42:32 +0200
commite10979ff5624248dc86780443df9ebd8cc48e246 (patch)
tree9ee75c3d4d0dd42846cfb2a3cd439bece1f20479 /libavcodec/error_resilience.c
parenta8c2ff2c3cc5f28c36692031c342692cac742280 (diff)
parentb047941d7da470ba0dcedb1fd0aa828075265ffc (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: changelog: misc typo and wording fixes H.264: add filter_mb_fast support for >8-bit decoding doc: Remove outdated comments about gcc 2.95 and gcc 3.3 support. lls: use av_lfg instead of rand() in test program build: remove unnecessary dependency on libs from 'all' target H.264: avoid redundant alpha/beta calculations in loopfilter H.264: optimize intra/inter loopfilter decision mpegts: fix Continuity Counter error detection build: remove unnecessary FFLDFLAGS variable vp8/mt: flush worker thread, not application thread context, on seek. mt: proper locking around release_buffer calls. DxVA2: unbreak build after [657ccb5ac75ce34e62bd67f228d9bd36db72189e] hwaccel: unbreak build Eliminate FF_COMMON_FRAME macro. Conflicts: Changelog Makefile doc/developer.texi libavcodec/avcodec.h libavcodec/h264.c libavcodec/mpeg4videodec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/error_resilience.c')
-rw-r--r--libavcodec/error_resilience.c184
1 files changed, 92 insertions, 92 deletions
diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
index d6ac81f009..3fb95ead88 100644
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@ -41,9 +41,9 @@
#undef mb_intra
static void decode_mb(MpegEncContext *s, int ref){
- s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* s->linesize ) + s->mb_x * 16;
- s->dest[1] = s->current_picture.data[1] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
- s->dest[2] = s->current_picture.data[2] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
+ s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
+ s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+ s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
H264Context *h= (void*)s;
@@ -52,7 +52,7 @@ static void decode_mb(MpegEncContext *s, int ref){
assert(ref>=0);
if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
ref=0;
- fill_rectangle(&s->current_picture.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
+ fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
assert(!FRAME_MBAFF);
@@ -166,14 +166,14 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
error= s->error_status_table[mb_index];
- if(IS_INTER(s->current_picture.mb_type[mb_index])) continue; //inter
+ if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
if(!(error&DC_ERROR)) continue; //dc-ok
/* right block */
for(j=b_x+1; j<w; j++){
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[0]= dc[j + b_y*stride];
distance[0]= j-b_x;
@@ -185,7 +185,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
for(j=b_x-1; j>=0; j--){
int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[1]= dc[j + b_y*stride];
distance[1]= b_x-j;
@@ -197,7 +197,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
for(j=b_y+1; j<h; j++){
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[2]= dc[b_x + j*stride];
distance[2]= j-b_y;
@@ -209,7 +209,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
for(j=b_y-1; j>=0; j--){
int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+ int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[3]= dc[b_x + j*stride];
distance[3]= b_y-j;
@@ -248,13 +248,13 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
int y;
int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
- int left_intra= IS_INTRA(s->current_picture.mb_type [( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride]);
- int right_intra= IS_INTRA(s->current_picture.mb_type [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]);
+ int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+ int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int offset= b_x*8 + b_y*stride*8;
- int16_t *left_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
- int16_t *right_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
+ int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
+ int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
if(!(left_damage||right_damage)) continue; // both undamaged
@@ -311,13 +311,13 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
int x;
int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
- int top_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride]);
- int bottom_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]);
+ int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
+ int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int offset= b_x*8 + b_y*stride*8;
- int16_t *top_mv= s->current_picture.motion_val[0][mvy_stride* b_y + mvx_stride*b_x];
- int16_t *bottom_mv= s->current_picture.motion_val[0][mvy_stride*(b_y+1) + mvx_stride*b_x];
+ int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
+ int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
if(!(top_damage||bottom_damage)) continue; // both undamaged
@@ -376,19 +376,19 @@ static void guess_mv(MpegEncContext *s){
int f=0;
int error= s->error_status_table[mb_xy];
- if(IS_INTRA(s->current_picture.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
+ if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
fixed[mb_xy]= f;
if(f==MV_FROZEN)
num_avail++;
- else if(s->last_picture.data[0] && s->last_picture.motion_val[0]){
+ else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){
const int mb_y= mb_xy / s->mb_stride;
const int mb_x= mb_xy % s->mb_stride;
const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
- s->current_picture.motion_val[0][mot_index][0]= s->last_picture.motion_val[0][mot_index][0];
- s->current_picture.motion_val[0][mot_index][1]= s->last_picture.motion_val[0][mot_index][1];
- s->current_picture.ref_index[0][4*mb_xy] = s->last_picture.ref_index[0][4*mb_xy];
+ s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0];
+ s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1];
+ s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy];
}
}
@@ -397,10 +397,10 @@ static void guess_mv(MpegEncContext *s){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
const int mb_xy= mb_x + mb_y*s->mb_stride;
- if(IS_INTRA(s->current_picture.mb_type[mb_xy])) continue;
+ if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
- s->mv_dir = s->last_picture.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
+ s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
s->mb_intra=0;
s->mv_type = MV_TYPE_16X16;
s->mb_skipped=0;
@@ -442,8 +442,8 @@ int score_sum=0;
if((mb_x^mb_y^pass)&1) continue;
if(fixed[mb_xy]==MV_FROZEN) continue;
- assert(!IS_INTRA(s->current_picture.mb_type[mb_xy]));
- assert(s->last_picture_ptr && s->last_picture_ptr->data[0]);
+ assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
+ assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
j=0;
if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
@@ -462,27 +462,27 @@ int score_sum=0;
none_left=0;
if(mb_x>0 && fixed[mb_xy-1]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-1)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
pred_count++;
}
if(mb_x+1<mb_width && fixed[mb_xy+1]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+1)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
pred_count++;
}
if(mb_y>0 && fixed[mb_xy-mb_stride]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy-s->mb_stride)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
pred_count++;
}
if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
- mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][0];
- mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][1];
- ref [pred_count] = s->current_picture.ref_index[0][4*(mb_xy+s->mb_stride)];
+ mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
+ mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
+ ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
pred_count++;
}
if(pred_count==0) continue;
@@ -542,16 +542,16 @@ skip_mean_and_median:
ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
mb_y, 0);
}
- if (!s->last_picture.motion_val[0] ||
- !s->last_picture.ref_index[0])
+ if (!s->last_picture.f.motion_val[0] ||
+ !s->last_picture.f.ref_index[0])
goto skip_last_mv;
- prev_x = s->last_picture.motion_val[0][mot_index][0];
- prev_y = s->last_picture.motion_val[0][mot_index][1];
- prev_ref = s->last_picture.ref_index[0][4*mb_xy];
+ prev_x = s->last_picture.f.motion_val[0][mot_index][0];
+ prev_y = s->last_picture.f.motion_val[0][mot_index][1];
+ prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
} else {
- prev_x = s->current_picture.motion_val[0][mot_index][0];
- prev_y = s->current_picture.motion_val[0][mot_index][1];
- prev_ref = s->current_picture.ref_index[0][4*mb_xy];
+ prev_x = s->current_picture.f.motion_val[0][mot_index][0];
+ prev_y = s->current_picture.f.motion_val[0][mot_index][1];
+ prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
}
/* last MV */
@@ -573,10 +573,10 @@ skip_mean_and_median:
for(j=0; j<pred_count; j++){
int score=0;
- uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
+ uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
- s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
- s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
+ s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
+ s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
if(ref[j]<0) //predictor intra or otherwise not available
continue;
@@ -615,8 +615,8 @@ score_sum+= best_score;
for(i=0; i<mot_step; i++)
for(j=0; j<mot_step; j++){
- s->current_picture.motion_val[0][mot_index+i+j*mot_stride][0]= s->mv[0][0][0];
- s->current_picture.motion_val[0][mot_index+i+j*mot_stride][1]= s->mv[0][0][1];
+ s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
+ s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
}
decode_mb(s, ref[best_pred]);
@@ -648,7 +648,7 @@ score_sum+= best_score;
static int is_intra_more_likely(MpegEncContext *s){
int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
- if(!s->last_picture_ptr || !s->last_picture_ptr->data[0]) return 1; //no previous frame available -> use spatial prediction
+ if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
undamaged_count=0;
for(i=0; i<s->mb_num; i++){
@@ -660,7 +660,7 @@ static int is_intra_more_likely(MpegEncContext *s){
if(s->codec_id == CODEC_ID_H264){
H264Context *h= (void*)s;
- if(h->ref_count[0] <= 0 || !h->ref_list[0][0].data[0])
+ if (h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
return 1;
}
@@ -687,8 +687,8 @@ static int is_intra_more_likely(MpegEncContext *s){
if((j%skip_amount) != 0) continue; //skip a few to speed things up
if(s->pict_type==AV_PICTURE_TYPE_I){
- uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
- uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
+ uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
+ uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME
@@ -700,7 +700,7 @@ static int is_intra_more_likely(MpegEncContext *s){
// FIXME need await_progress() here
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
}else{
- if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
+ if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
is_intra_likely++;
else
is_intra_likely--;
@@ -802,15 +802,15 @@ void ff_er_frame_end(MpegEncContext *s){
s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled
s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
- if(s->current_picture.motion_val[0] == NULL){
+ if (s->current_picture.f.motion_val[0] == NULL) {
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
for(i=0; i<2; i++){
- pic->ref_index[i]= av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
+ pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
- pic->motion_val[i]= pic->motion_val_base[i]+4;
+ pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
}
- pic->motion_subsample_log2= 3;
+ pic->f.motion_subsample_log2 = 3;
s->current_picture= *s->current_picture_ptr;
}
@@ -965,25 +965,25 @@ void ff_er_frame_end(MpegEncContext *s){
continue;
if(is_intra_likely)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
else
- s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
}
// change inter to intra blocks if no reference frames are available
- if (!s->last_picture.data[0] && !s->next_picture.data[0])
+ if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
for(i=0; i<s->mb_num; i++){
const int mb_xy= s->mb_index2xy[i];
- if(!IS_INTRA(s->current_picture.mb_type[mb_xy]))
- s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+ if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
+ s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
}
/* handle inter blocks with damaged AC */
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
- int dir = !s->last_picture.data[0];
+ const int mb_type= s->current_picture.f.mb_type[mb_xy];
+ int dir = !s->last_picture.f.data[0];
error= s->error_status_table[mb_xy];
if(IS_INTRA(mb_type)) continue; //intra
@@ -998,13 +998,13 @@ void ff_er_frame_end(MpegEncContext *s){
int j;
s->mv_type = MV_TYPE_8X8;
for(j=0; j<4; j++){
- s->mv[0][j][0] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0];
- s->mv[0][j][1] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1];
+ s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
+ s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
}
}else{
s->mv_type = MV_TYPE_16X16;
- s->mv[0][0][0] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
- s->mv[0][0][1] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
+ s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
+ s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
}
s->dsp.clear_blocks(s->block[0]);
@@ -1021,7 +1021,7 @@ void ff_er_frame_end(MpegEncContext *s){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int xy= mb_x*2 + mb_y*2*s->b8_stride;
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type= s->current_picture.f.mb_type[mb_xy];
error= s->error_status_table[mb_xy];
if(IS_INTRA(mb_type)) continue;
@@ -1029,8 +1029,8 @@ void ff_er_frame_end(MpegEncContext *s){
if(!(error&AC_ERROR)) continue; //undamaged inter
s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
- if(!s->last_picture.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
- if(!s->next_picture.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
+ if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
+ if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
s->mb_intra=0;
s->mv_type = MV_TYPE_16X16;
s->mb_skipped=0;
@@ -1045,10 +1045,10 @@ void ff_er_frame_end(MpegEncContext *s){
ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
mb_y, 0);
}
- s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp;
- s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp;
- s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp;
- s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp;
+ s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
+ s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
+ s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
+ s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
}else{
s->mv[0][0][0]= 0;
s->mv[0][0][1]= 0;
@@ -1075,16 +1075,16 @@ void ff_er_frame_end(MpegEncContext *s){
int16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
error= s->error_status_table[mb_xy];
if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
- dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
- dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
- dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
+ dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
+ dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
+ dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
for(n=0; n<4; n++){
@@ -1125,16 +1125,16 @@ void ff_er_frame_end(MpegEncContext *s){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy= mb_x + mb_y * s->mb_stride;
- const int mb_type= s->current_picture.mb_type[mb_xy];
+ const int mb_type = s->current_picture.f.mb_type[mb_xy];
error= s->error_status_table[mb_xy];
if(IS_INTER(mb_type)) continue;
if(!(error&AC_ERROR)) continue; //undamaged
- dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
- dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize;
- dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize;
+ dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
+ dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
+ dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
}
@@ -1143,14 +1143,14 @@ void ff_er_frame_end(MpegEncContext *s){
if(s->avctx->error_concealment&FF_EC_DEBLOCK){
/* filter horizontal block boundaries */
- h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
- h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
- h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
+ h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
/* filter vertical block boundaries */
- v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
- v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
- v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
+ v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
+ v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
}
ec_clean: