summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2003-04-10 13:18:38 +0000
committerMichael Niedermayer <michaelni@gmx.at>2003-04-10 13:18:38 +0000
commit7bc9090a4176de2bc834e2a7df131047b944f3b5 (patch)
tree1d57ac078ac227d652202cc1077ac565b8e36122
parent84876d36774b6633c2950291fbfb3db5922273fb (diff)
simplified adressing of most mb based arrays (mb_x + mb_y*s->mb_stride) now instead of mb_x + mb_y*mb_width and 1+mb_x + (1+mb_y)*(mb_width+2) and ... mixture
more direct use of the new mb_type stuff instead of codec specific stuff runtime mb_type debug output h264/h263 variants/mpeg1/2/4 error concealment /resilience for mpeg1/2 various minor optimizations Originally committed as revision 1746 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r--libavcodec/common.h9
-rw-r--r--libavcodec/error_resilience.c304
-rw-r--r--libavcodec/h263.c252
-rw-r--r--libavcodec/h263dec.c29
-rw-r--r--libavcodec/h264.c38
-rw-r--r--libavcodec/h264data.h7
-rw-r--r--libavcodec/motion_est.c72
-rw-r--r--libavcodec/motion_est_template.c8
-rw-r--r--libavcodec/mpeg12.c485
-rw-r--r--libavcodec/mpeg12data.h85
-rw-r--r--libavcodec/mpeg4data.h10
-rw-r--r--libavcodec/mpegvideo.c286
-rw-r--r--libavcodec/mpegvideo.h48
-rw-r--r--libavcodec/msmpeg4.c31
-rw-r--r--libavcodec/wmv2.c21
15 files changed, 907 insertions, 778 deletions
diff --git a/libavcodec/common.h b/libavcodec/common.h
index c229a3a1c9..b286186d76 100644
--- a/libavcodec/common.h
+++ b/libavcodec/common.h
@@ -999,6 +999,15 @@ if((y)<(x)){\
#define free please_use_av_free
#define realloc please_use_av_realloc
+#define CHECKED_ALLOCZ(p, size)\
+{\
+ p= av_mallocz(size);\
+ if(p==NULL){\
+ perror("malloc");\
+ goto fail;\
+ }\
+}
+
#endif /* HAVE_AV_CONFIG_H */
#endif /* COMMON_H */
diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
index 486f5670a2..6a940ed0aa 100644
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@ -23,6 +23,8 @@
* Error resilience / concealment.
*/
+#include <limits.h>
+
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
@@ -111,18 +113,18 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
int mb_index, error, j;
int64_t guess, weight_sum;
- mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_width;
+ mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
error= s->error_status_table[mb_index];
- if(!(s->mb_type[mb_index]&MB_TYPE_INTRA)) continue; //inter
+ if(IS_INTER(s->current_picture.mb_type[mb_index])) continue; //inter
if(!(error&DC_ERROR)) continue; //dc-ok
/* right block */
for(j=b_x+1; j<w; j++){
- int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_width;
+ int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= s->mb_type[mb_index_j]&MB_TYPE_INTRA;
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[0]= dc[j + b_y*stride];
distance[0]= j-b_x;
@@ -132,9 +134,9 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
/* left block */
for(j=b_x-1; j>=0; j--){
- int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_width;
+ int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= s->mb_type[mb_index_j]&MB_TYPE_INTRA;
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[1]= dc[j + b_y*stride];
distance[1]= b_x-j;
@@ -144,9 +146,9 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
/* bottom block */
for(j=b_y+1; j<h; j++){
- int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_width;
+ int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= s->mb_type[mb_index_j]&MB_TYPE_INTRA;
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[2]= dc[b_x + j*stride];
distance[2]= j-b_y;
@@ -156,9 +158,9 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
/* top block */
for(j=b_y-1; j>=0; j--){
- int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_width;
+ int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
int error_j= s->error_status_table[mb_index_j];
- int intra_j= s->mb_type[mb_index_j]&MB_TYPE_INTRA;
+ int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
if(intra_j==0 || !(error_j&DC_ERROR)){
color[3]= dc[b_x + j*stride];
distance[3]= b_y-j;
@@ -192,10 +194,10 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
for(b_y=0; b_y<h; b_y++){
for(b_x=0; b_x<w-1; b_x++){
int y;
- int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_width];
- int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_width];
- int left_intra= s->mb_type [( b_x >>is_luma) + (b_y>>is_luma)*s->mb_width]&MB_TYPE_INTRA;
- int right_intra= s->mb_type [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_width]&MB_TYPE_INTRA;
+ int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
+ int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
+ int left_intra= IS_INTRA(s->current_picture.mb_type [( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride]);
+ int right_intra= IS_INTRA(s->current_picture.mb_type [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]);
int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int offset= b_x*8 + b_y*stride*8;
@@ -252,10 +254,10 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
for(b_y=0; b_y<h-1; b_y++){
for(b_x=0; b_x<w; b_x++){
int x;
- int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_width];
- int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_width];
- int top_intra= s->mb_type [(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_width]&MB_TYPE_INTRA;
- int bottom_intra= s->mb_type [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_width]&MB_TYPE_INTRA;
+ int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
+ int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
+ int top_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride]);
+ int bottom_intra= IS_INTRA(s->current_picture.mb_type [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]);
int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
int offset= b_x*8 + b_y*stride*8;
@@ -301,36 +303,37 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
}
static void guess_mv(MpegEncContext *s){
- uint8_t fixed[s->mb_num];
+ uint8_t fixed[s->mb_stride * s->mb_height];
#define MV_FROZEN 3
#define MV_CHANGED 2
#define MV_UNCHANGED 1
+ const int mb_stride = s->mb_stride;
const int mb_width = s->mb_width;
const int mb_height= s->mb_height;
int i, depth, num_avail;
+ int mb_x, mb_y;
num_avail=0;
for(i=0; i<s->mb_num; i++){
+ const int mb_xy= s->mb_index2xy[ i ];
int f=0;
- int error= s->error_status_table[i];
+ int error= s->error_status_table[mb_xy];
- if(s->mb_type[i]&MB_TYPE_INTRA) f=MV_FROZEN; //intra //FIXME check
+ if(IS_INTRA(s->current_picture.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
- fixed[i]= f;
+ fixed[mb_xy]= f;
if(f==MV_FROZEN)
num_avail++;
}
if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
- int mb_x, mb_y;
- i= -1;
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
- i++;
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
- if(s->mb_type[i]&MB_TYPE_INTRA) continue;
- if(!(s->error_status_table[i]&MV_ERROR)) continue;
+ if(IS_INTRA(s->current_picture.mb_type[mb_xy])) continue;
+ if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
s->mv_dir = MV_DIR_FORWARD;
s->mb_intra=0;
@@ -355,13 +358,13 @@ static void guess_mv(MpegEncContext *s){
none_left=1;
changed=1;
for(pass=0; (changed || pass<2) && pass<10; pass++){
- int i,mb_x, mb_y;
+ int mb_x, mb_y;
int score_sum=0;
changed=0;
- i= -1;
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
int mv_predictor[8][2]={{0}};
int pred_count=0;
int j;
@@ -372,43 +375,44 @@ int score_sum=0;
int prev_x= s->motion_val[mot_index][0];
int prev_y= s->motion_val[mot_index][1];
- i++;
if((mb_x^mb_y^pass)&1) continue;
- if(fixed[i]==MV_FROZEN) continue;
+ if(fixed[mb_xy]==MV_FROZEN) continue;
+ assert(!IS_INTRA(s->current_picture.mb_type[mb_xy]));
+ assert(s->last_picture_ptr && s->last_picture_ptr->data[0]);
j=0;
- if(mb_x>0 && fixed[i-1 ]==MV_FROZEN) j=1;
- if(mb_x+1<mb_width && fixed[i+1 ]==MV_FROZEN) j=1;
- if(mb_y>0 && fixed[i-mb_width]==MV_FROZEN) j=1;
- if(mb_y+1<mb_height && fixed[i+mb_width]==MV_FROZEN) j=1;
+ if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
+ if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_FROZEN) j=1;
+ if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_FROZEN) j=1;
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_FROZEN) j=1;
if(j==0) continue;
j=0;
- if(mb_x>0 && fixed[i-1 ]==MV_CHANGED) j=1;
- if(mb_x+1<mb_width && fixed[i+1 ]==MV_CHANGED) j=1;
- if(mb_y>0 && fixed[i-mb_width]==MV_CHANGED) j=1;
- if(mb_y+1<mb_height && fixed[i+mb_width]==MV_CHANGED) j=1;
+ if(mb_x>0 && fixed[mb_xy-1 ]==MV_CHANGED) j=1;
+ if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_CHANGED) j=1;
+ if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_CHANGED) j=1;
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_CHANGED) j=1;
if(j==0 && pass>1) continue;
none_left=0;
- if(mb_x>0 && fixed[i-1]){
+ if(mb_x>0 && fixed[mb_xy-1]){
mv_predictor[pred_count][0]= s->motion_val[mot_index - 2][0];
mv_predictor[pred_count][1]= s->motion_val[mot_index - 2][1];
pred_count++;
}
- if(mb_x+1<mb_width && fixed[i+1]){
+ if(mb_x+1<mb_width && fixed[mb_xy+1]){
mv_predictor[pred_count][0]= s->motion_val[mot_index + 2][0];
mv_predictor[pred_count][1]= s->motion_val[mot_index + 2][1];
pred_count++;
}
- if(mb_y>0 && fixed[i-mb_width]){
+ if(mb_y>0 && fixed[mb_xy-mb_stride]){
mv_predictor[pred_count][0]= s->motion_val[mot_index - mot_stride*2][0];
mv_predictor[pred_count][1]= s->motion_val[mot_index - mot_stride*2][1];
pred_count++;
}
- if(mb_y+1<mb_height && fixed[i+mb_width]){
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
mv_predictor[pred_count][0]= s->motion_val[mot_index + mot_stride*2][0];
mv_predictor[pred_count][1]= s->motion_val[mot_index + mot_stride*2][1];
pred_count++;
@@ -468,30 +472,32 @@ int score_sum=0;
s->mb_x= mb_x;
s->mb_y= mb_y;
+
for(j=0; j<pred_count; j++){
int score=0;
uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
+
MPV_decode_mb(s, s->block);
- if(mb_x>0 && fixed[i-1]){
+ if(mb_x>0 && fixed[mb_xy-1]){
int k;
for(k=0; k<16; k++)
score += ABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
}
- if(mb_x+1<mb_width && fixed[i+1]){
+ if(mb_x+1<mb_width && fixed[mb_xy+1]){
int k;
for(k=0; k<16; k++)
score += ABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
}
- if(mb_y>0 && fixed[i-mb_width]){
+ if(mb_y>0 && fixed[mb_xy-mb_stride]){
int k;
for(k=0; k<16; k++)
score += ABS(src[k-s->linesize ]-src[k ]);
}
- if(mb_y+1<mb_height && fixed[i+mb_width]){
+ if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
int k;
for(k=0; k<16; k++)
score += ABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
@@ -511,10 +517,10 @@ score_sum+= best_score;
if(s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y){
- fixed[i]=MV_CHANGED;
+ fixed[mb_xy]=MV_CHANGED;
changed++;
}else
- fixed[i]=MV_UNCHANGED;
+ fixed[mb_xy]=MV_UNCHANGED;
}
}
@@ -525,8 +531,9 @@ score_sum+= best_score;
return;
for(i=0; i<s->mb_num; i++){
- if(fixed[i])
- fixed[i]=MV_FROZEN;
+ int mb_xy= s->mb_index2xy[i];
+ if(fixed[mb_xy])
+ fixed[mb_xy]=MV_FROZEN;
}
// printf(":"); fflush(stdout);
}
@@ -539,7 +546,8 @@ static int is_intra_more_likely(MpegEncContext *s){
undamaged_count=0;
for(i=0; i<s->mb_num; i++){
- int error= s->error_status_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ const int error= s->error_status_table[mb_xy];
if(!((error&DC_ERROR) && (error&MV_ERROR)))
undamaged_count++;
}
@@ -550,13 +558,12 @@ static int is_intra_more_likely(MpegEncContext *s){
is_intra_likely=0;
j=0;
- i=-1;
for(mb_y= 0; mb_y<s->mb_height-1; mb_y++){
for(mb_x= 0; mb_x<s->mb_width; mb_x++){
int error;
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
- i++;
- error= s->error_status_table[i];
+ error= s->error_status_table[mb_xy];
if((error&DC_ERROR) && (error&MV_ERROR))
continue; //skip damaged
@@ -570,7 +577,7 @@ static int is_intra_more_likely(MpegEncContext *s){
is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize);
is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize);
}else{
- if(s->mbintra_table[i]) //HACK (this is allways inited but we should use mb_type[])
+ if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
is_intra_likely++;
else
is_intra_likely--;
@@ -584,7 +591,8 @@ static int is_intra_more_likely(MpegEncContext *s){
void ff_er_frame_start(MpegEncContext *s){
if(!s->error_resilience) return;
- memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_num*sizeof(uint8_t));
+ memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
+ s->error_count= 3*s->mb_num;
}
/**
@@ -594,33 +602,54 @@ void ff_er_frame_start(MpegEncContext *s){
* error of the same type occured
*/
void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
- const int start_xy= clip(startx + starty * s->mb_width, 0, s->mb_num-1);
- const int end_xy = clip(endx + endy * s->mb_width, 0, s->mb_num);
- const int mb_count= end_xy - start_xy;
+ const int start_i= clip(startx + starty * s->mb_width , 0, s->mb_num-1);
+ const int end_i = clip(endx + endy * s->mb_width , 0, s->mb_num);
+ const int start_xy= s->mb_index2xy[start_i];
+ const int end_xy = s->mb_index2xy[end_i];
int mask= -1;
if(!s->error_resilience) return;
mask &= ~VP_START;
- if(status & (AC_ERROR|AC_END)) mask &= ~(AC_ERROR|AC_END);
- if(status & (DC_ERROR|DC_END)) mask &= ~(DC_ERROR|DC_END);
- if(status & (MV_ERROR|MV_END)) mask &= ~(MV_ERROR|MV_END);
+ if(status & (AC_ERROR|AC_END)){
+ mask &= ~(AC_ERROR|AC_END);
+ s->error_count -= end_i - start_i + 1;
+ }
+ if(status & (DC_ERROR|DC_END)){
+ mask &= ~(DC_ERROR|DC_END);
+ s->error_count -= end_i - start_i + 1;
+ }
+ if(status & (MV_ERROR|MV_END)){
+ mask &= ~(MV_ERROR|MV_END);
+ s->error_count -= end_i - start_i + 1;
+ }
+
+ if(status & (AC_ERROR|DC_ERROR|MV_ERROR)) s->error_count= INT_MAX;
if(mask == ~0x7F){
- memset(&s->error_status_table[start_xy], 0, mb_count * sizeof(uint8_t));
+ memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
}else{
int i;
for(i=start_xy; i<end_xy; i++){
- s->error_status_table[i] &= mask;
+ s->error_status_table[ i ] &= mask;
}
}
-
- if(end_xy < s->mb_num){
+
+ if(end_i == s->mb_num)
+ s->error_count= INT_MAX;
+ else{
s->error_status_table[end_xy] &= mask;
s->error_status_table[end_xy] |= status;
}
s->error_status_table[start_xy] |= VP_START;
+
+ if(start_xy > 0){
+ int prev_status= s->error_status_table[ s->mb_index2xy[start_i - 1] ];
+
+ prev_status &= ~ VP_START;
+ if(prev_status != (MV_END|DC_END|AC_END)) s->error_count= INT_MAX;
+ }
}
void ff_er_frame_end(MpegEncContext *s){
@@ -629,41 +658,27 @@ void ff_er_frame_end(MpegEncContext *s){
int threshold_part[4]= {100,100,100};
int threshold= 50;
int is_intra_likely;
- int num_end_markers=0;
- if(!s->error_resilience) return;
+ if(!s->error_resilience || s->error_count==0) return;
- error=0;
- for(i=0; i<s->mb_num; i++){
- int status= s->error_status_table[i];
+ fprintf(stderr, "concealing errors\n");
+
+ if(s->motion_val == NULL){
+ int size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
- if(status==0) continue;
-
- if(status&(DC_ERROR|AC_ERROR|MV_ERROR))
- error=1;
- if(status&VP_START){
- if(num_end_markers)
- error=1;
- num_end_markers=3;
- }
- if(status&AC_END)
- num_end_markers--;
- if(status&DC_END)
- num_end_markers--;
- if(status&MV_END)
- num_end_markers--;
+ fprintf(stderr, "Warning MVs not available\n");
+
+ s->motion_val= av_mallocz(size * 2 * sizeof(int16_t));
}
- if(num_end_markers==0 && error==0)
- return;
-
- fprintf(stderr, "concealing errors\n");
-
- if(s->avctx->debug&FF_DEBUG_ER){
- for(i=0; i<s->mb_num; i++){
- int status= s->error_status_table[i];
+
+ if(s->avctx->debug&FF_DEBUG_ER){
+ for(mb_y=0; mb_y<s->mb_height; mb_y++){
+ for(mb_x=0; mb_x<s->mb_width; mb_x++){
+ int status= s->error_status_table[mb_x + mb_y*s->mb_stride];
- if(i%s->mb_width == 0) printf("\n");
- printf("%2X ", status);
+ printf("%2X ", status);
+ }
+ printf("\n");
}
}
@@ -673,7 +688,8 @@ void ff_er_frame_end(MpegEncContext *s){
int end_ok=0;
for(i=s->mb_num-1; i>=0; i--){
- int error= s->error_status_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
if(error&(1<<error_type))
end_ok=1;
@@ -681,7 +697,7 @@ void ff_er_frame_end(MpegEncContext *s){
end_ok=1;
if(!end_ok)
- s->error_status_table[i]|= 1<<error_type;
+ s->error_status_table[mb_xy]|= 1<<error_type;
if(error&VP_START)
end_ok=0;
@@ -694,7 +710,8 @@ void ff_er_frame_end(MpegEncContext *s){
int end_ok=0;
for(i=s->mb_num-1; i>=0; i--){
- int error= s->error_status_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
if(error&AC_END)
end_ok=0;
@@ -702,7 +719,7 @@ void ff_er_frame_end(MpegEncContext *s){
end_ok=1;
if(!end_ok)
- s->error_status_table[i]|= AC_ERROR;
+ s->error_status_table[mb_xy]|= AC_ERROR;
if(error&VP_START)
end_ok=0;
@@ -714,8 +731,9 @@ void ff_er_frame_end(MpegEncContext *s){
int end_ok=1;
for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
- int error1= s->error_status_table[i ];
- int error2= s->error_status_table[i+1];
+ const int mb_xy= s->mb_index2xy[i];
+ int error1= s->error_status_table[mb_xy ];
+ int error2= s->error_status_table[mb_xy+1];
if(error1&VP_START)
end_ok=1;
@@ -727,7 +745,7 @@ void ff_er_frame_end(MpegEncContext *s){
}
if(!end_ok)
- s->error_status_table[i]|= DC_ERROR|AC_ERROR|MV_ERROR;
+ s->error_status_table[mb_xy]|= DC_ERROR|AC_ERROR|MV_ERROR;
}
}
@@ -736,19 +754,20 @@ void ff_er_frame_end(MpegEncContext *s){
distance=9999999;
for(error_type=1; error_type<=3; error_type++){
for(i=s->mb_num-1; i>=0; i--){
- int error= s->error_status_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
- if(!s->mbskip_table[i]) //FIXME partition specific
+ if(!s->mbskip_table[mb_xy]) //FIXME partition specific
distance++;
if(error&(1<<error_type))
distance= 0;
if(s->partitioned_frame){
if(distance < threshold_part[error_type-1])
- s->error_status_table[i]|= 1<<error_type;
+ s->error_status_table[mb_xy]|= 1<<error_type;
}else{
if(distance < threshold)
- s->error_status_table[i]|= 1<<error_type;
+ s->error_status_table[mb_xy]|= 1<<error_type;
}
if(error&VP_START)
@@ -760,23 +779,25 @@ void ff_er_frame_end(MpegEncContext *s){
/* forward mark errors */
error=0;
for(i=0; i<s->mb_num; i++){
- int old_error= s->error_status_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ int old_error= s->error_status_table[mb_xy];
if(old_error&VP_START)
error= old_error& (DC_ERROR|AC_ERROR|MV_ERROR);
else{
error|= old_error& (DC_ERROR|AC_ERROR|MV_ERROR);
- s->error_status_table[i]|= error;
+ s->error_status_table[mb_xy]|= error;
}
}
#if 1
/* handle not partitioned case */
if(!s->partitioned_frame){
for(i=0; i<s->mb_num; i++){
- error= s->error_status_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ error= s->error_status_table[mb_xy];
if(error&(AC_ERROR|DC_ERROR|MV_ERROR))
error|= AC_ERROR|DC_ERROR|MV_ERROR;
- s->error_status_table[i]= error;
+ s->error_status_table[mb_xy]= error;
}
}
#endif
@@ -784,34 +805,32 @@ void ff_er_frame_end(MpegEncContext *s){
/* set unknown mb-type to most likely */
for(i=0; i<s->mb_num; i++){
- int intra;
- error= s->error_status_table[i];
- if((error&DC_ERROR) && (error&MV_ERROR))
- intra= is_intra_likely;
- else
- intra= s->mbintra_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ error= s->error_status_table[mb_xy];
+ if(!((error&DC_ERROR) && (error&MV_ERROR)))
+ continue;
- if(intra)
- s->mb_type[i]|= MB_TYPE_INTRA;
+ if(is_intra_likely)
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
else
- s->mb_type[i]&= ~MB_TYPE_INTRA;
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0;
}
/* handle inter blocks with damaged AC */
- i= -1;
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
- i++;
- error= s->error_status_table[i];
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+ error= s->error_status_table[mb_xy];
- if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra
+ if(IS_INTRA(mb_type)) continue; //intra
if(error&MV_ERROR) continue; //inter with damaged MV
if(!(error&AC_ERROR)) continue; //undamaged inter
s->mv_dir = MV_DIR_FORWARD;
s->mb_intra=0;
s->mb_skiped=0;
- if(s->mb_type[i]&MB_TYPE_INTER4V){
+ if(IS_8X8(mb_type)){
int mb_index= mb_x*2+1 + (mb_y*2+1)*s->block_wrap[0];
int j;
s->mv_type = MV_TYPE_8X8;
@@ -835,14 +854,14 @@ void ff_er_frame_end(MpegEncContext *s){
/* guess MVs */
if(s->pict_type==B_TYPE){
- i= -1;
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int xy= mb_x*2+1 + (mb_y*2+1)*s->block_wrap[0];
- i++;
- error= s->error_status_table[i];
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+ error= s->error_status_table[mb_xy];
- if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra
+ if(IS_INTRA(mb_type)) continue;
if(!(error&MV_ERROR)) continue; //inter with undamaged MV
if(!(error&AC_ERROR)) continue; //undamaged inter
@@ -876,17 +895,17 @@ void ff_er_frame_end(MpegEncContext *s){
guess_mv(s);
/* fill DC for inter blocks */
- i= -1;
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int dc, dcu, dcv, y, n;
int16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr;
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
- i++;
- error= s->error_status_table[i];
+ error= s->error_status_table[mb_xy];
- if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra
+ if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
@@ -928,15 +947,15 @@ void ff_er_frame_end(MpegEncContext *s){
#if 1
/* render DC only intra */
- i= -1;
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
uint8_t *dest_y, *dest_cb, *dest_cr;
-
- i++;
- error= s->error_status_table[i];
+ const int mb_xy= mb_x + mb_y * s->mb_stride;
+ const int mb_type= s->current_picture.mb_type[mb_xy];
+
+ error= s->error_status_table[mb_xy];
- if(!(s->mb_type[i]&MB_TYPE_INTRA)) continue; //inter
+ if(IS_INTER(mb_type)) continue;
if(!(error&AC_ERROR)) continue; //undamaged
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
@@ -962,11 +981,12 @@ void ff_er_frame_end(MpegEncContext *s){
/* clean a few tables */
for(i=0; i<s->mb_num; i++){
- int error= s->error_status_table[i];
+ const int mb_xy= s->mb_index2xy[i];
+ int error= s->error_status_table[mb_xy];
if(s->pict_type!=B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
- s->mbskip_table[i]=0;
+ s->mbskip_table[mb_xy]=0;
}
- s->mbintra_table[i]=1;
+ s->mbintra_table[mb_xy]=1;
}
}
diff --git a/libavcodec/h263.c b/libavcodec/h263.c
index 316751269a..ed4da642f0 100644
--- a/libavcodec/h263.c
+++ b/libavcodec/h263.c
@@ -39,12 +39,6 @@
//#undef NDEBUG
//#include <assert.h>
-#if 1
-#define PRINT_MB_TYPE(a) {}
-#else
-#define PRINT_MB_TYPE(a) printf(a)
-#endif
-
#define INTRA_MCBPC_VLC_BITS 6
#define INTER_MCBPC_VLC_BITS 6
#define CBPY_VLC_BITS 6
@@ -290,7 +284,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d
ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
ac_val1= ac_val;
if(dir[n]){
- const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride;
/* top prediction */
ac_val-= s->block_wrap[n]*16;
if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){
@@ -313,7 +307,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d
}
}
}else{
- const int xy= s->mb_x-1 + s->mb_y*s->mb_width;
+ const int xy= s->mb_x-1 + s->mb_y*s->mb_stride;
/* left prediction */
ac_val-= 16;
if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){
@@ -349,12 +343,12 @@ void ff_clean_h263_qscales(MpegEncContext *s){
int8_t * const qscale_table= s->current_picture.qscale_table;
for(i=1; i<s->mb_num; i++){
- if(qscale_table[i] - qscale_table[i-1] >2)
- qscale_table[i]= qscale_table[i-1]+2;
+ if(qscale_table[ s->mb_index2xy[i] ] - qscale_table[ s->mb_index2xy[i-1] ] >2)
+ qscale_table[ s->mb_index2xy[i] ]= qscale_table[ s->mb_index2xy[i-1] ]+2;
}
for(i=s->mb_num-2; i>=0; i--){
- if(qscale_table[i] - qscale_table[i+1] >2)
- qscale_table[i]= qscale_table[i+1]+2;
+ if(qscale_table[ s->mb_index2xy[i] ] - qscale_table[ s->mb_index2xy[i+1] ] >2)
+ qscale_table[ s->mb_index2xy[i] ]= qscale_table[ s->mb_index2xy[i+1] ]+2;
}
}
@@ -368,9 +362,11 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
ff_clean_h263_qscales(s);
for(i=1; i<s->mb_num; i++){
- if(qscale_table[i] != qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_INTER4V)){
- s->mb_type[i]&= ~MB_TYPE_INTER4V;
- s->mb_type[i]|= MB_TYPE_INTER;
+ int mb_xy= s->mb_index2xy[i];
+
+ if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&MB_TYPE_INTER4V)){
+ s->mb_type[mb_xy]&= ~MB_TYPE_INTER4V;
+ s->mb_type[mb_xy]|= MB_TYPE_INTER;
}
}
@@ -380,52 +376,47 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
for the actual adaptive quantization */
for(i=0; i<s->mb_num; i++){
- odd += qscale_table[i]&1;
+ int mb_xy= s->mb_index2xy[i];
+ odd += qscale_table[mb_xy]&1;
}
if(2*odd > s->mb_num) odd=1;
else odd=0;
for(i=0; i<s->mb_num; i++){
- if((qscale_table[i]&1) != odd)
- qscale_table[i]++;
- if(qscale_table[i] > 31)
- qscale_table[i]= 31;
+ int mb_xy= s->mb_index2xy[i];
+ if((qscale_table[mb_xy]&1) != odd)
+ qscale_table[mb_xy]++;
+ if(qscale_table[mb_xy] > 31)
+ qscale_table[mb_xy]= 31;
}
for(i=1; i<s->mb_num; i++){
- if(qscale_table[i] != qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_DIRECT)){
- s->mb_type[i]&= ~MB_TYPE_DIRECT;
- s->mb_type[i]|= MB_TYPE_BIDIR;
+ int mb_xy= s->mb_index2xy[i];
+ if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&MB_TYPE_DIRECT)){
+ s->mb_type[mb_xy]&= ~MB_TYPE_DIRECT;
+ s->mb_type[mb_xy]|= MB_TYPE_BIDIR;
}
}
}
}
#endif //CONFIG_ENCODERS
-
-void ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
- const int mb_index= s->mb_x + s->mb_y*s->mb_width;
+/**
+ *
+ * @return the mb_type
+ */
+int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
+ const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
+ const int colocated_mb_type= s->next_picture.mb_type[mb_index]; //FIXME or next?
int xy= s->block_index[0];
uint16_t time_pp= s->pp_time;
uint16_t time_pb= s->pb_time;
int i;
//FIXME avoid divides
- switch(s->co_located_type_table[mb_index]){
- case 0:
- s->mv[0][0][0] = s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->motion_val[xy][0]*time_pb/time_pp + mx;
- s->mv[0][0][1] = s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->motion_val[xy][1]*time_pb/time_pp + my;
- s->mv[1][0][0] = s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = mx ? s->mv[0][0][0] - s->motion_val[xy][0]
- : s->motion_val[xy][0]*(time_pb - time_pp)/time_pp;
- s->mv[1][0][1] = s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = my ? s->mv[0][0][1] - s->motion_val[xy][1]
- : s->motion_val[xy][1]*(time_pb - time_pp)/time_pp;
- if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample)
- s->mv_type= MV_TYPE_16X16;
- else
- s->mv_type= MV_TYPE_8X8;
- break;
- case CO_LOCATED_TYPE_4MV:
+
+ if(IS_8X8(colocated_mb_type)){
s->mv_type = MV_TYPE_8X8;
for(i=0; i<4; i++){
xy= s->block_index[i];
@@ -436,8 +427,8 @@ void ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
s->mv[1][i][1] = my ? s->mv[0][i][1] - s->motion_val[xy][1]
: s->motion_val[xy][1]*(time_pb - time_pp)/time_pp;
}
- break;
- case CO_LOCATED_TYPE_FIELDMV:
+ return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1;
+ } else if(IS_INTERLACED(colocated_mb_type)){
s->mv_type = MV_TYPE_FIELD;
for(i=0; i<2; i++){
if(s->top_field_first){
@@ -454,7 +445,19 @@ void ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
s->mv[1][i][1] = my ? s->mv[0][i][1] - s->field_mv_table[mb_index][i][1]
: s->field_mv_table[mb_index][i][1]*(time_pb - time_pp)/time_pp;
}
- break;
+ return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED;
+ }else{
+ s->mv[0][0][0] = s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->motion_val[xy][0]*time_pb/time_pp + mx;
+ s->mv[0][0][1] = s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->motion_val[xy][1]*time_pb/time_pp + my;
+ s->mv[1][0][0] = s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = mx ? s->mv[0][0][0] - s->motion_val[xy][0]
+ : s->motion_val[xy][0]*(time_pb - time_pp)/time_pp;
+ s->mv[1][0][1] = s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = my ? s->mv[0][0][1] - s->motion_val[xy][1]
+ : s->motion_val[xy][1]*(time_pb - time_pp)/time_pp;
+ if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample)
+ s->mv_type= MV_TYPE_16X16;
+ else
+ s->mv_type= MV_TYPE_8X8;
+ return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; //Note see prev line
}
}
@@ -496,7 +499,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
assert(mb_type>=0);
/* nothing to do if this MB was skiped in the next P Frame */
- if(s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x]){ //FIXME avoid DCT & ...
+ if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ...
s->skip_count++;
s->mv[0][0][0]=
s->mv[0][0][1]=
@@ -1890,7 +1893,7 @@ void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
ac_val1 = ac_val;
if (s->ac_pred) {
if (dir == 0) {
- const int xy= s->mb_x-1 + s->mb_y*s->mb_width;
+ const int xy= s->mb_x-1 + s->mb_y*s->mb_stride;
/* left prediction */
ac_val -= 16;
@@ -1906,7 +1909,7 @@ void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
}
}
} else {
- const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride;
/* top prediction */
ac_val -= 16 * s->block_wrap[n];
@@ -1946,7 +1949,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
if (dir == 0) {
- const int xy= s->mb_x-1 + s->mb_y*s->mb_width;
+ const int xy= s->mb_x-1 + s->mb_y*s->mb_stride;
/* left prediction */
ac_val -= 16;
if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){
@@ -1961,7 +1964,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
}
}
} else {
- const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride;
/* top prediction */
ac_val -= 16 * s->block_wrap[n];
if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){
@@ -2500,7 +2503,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s)
return -1;
}
if(s->pict_type == B_TYPE){
- while(s->next_picture.mbskip_table[ mb_num ]) mb_num++;
+ while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) mb_num++;
if(mb_num >= s->mb_num) return -1; // slice contains just skiped MBs which where allready decoded
}
@@ -2693,7 +2696,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
for(; s->mb_y<s->mb_height; s->mb_y++){
ff_init_block_index(s);
for(; s->mb_x<s->mb_width; s->mb_x++){
- const int xy= s->mb_x + s->mb_y*s->mb_width;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
int cbpc;
int dir=0;
@@ -2702,8 +2705,6 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
s->first_slice_line=0;
- if(s->mb_x==0) PRINT_MB_TYPE("\n");
-
if(s->pict_type==I_TYPE){
int i;
@@ -2711,7 +2712,6 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
return mb_num-1;
}
- PRINT_MB_TYPE("I");
cbpc = get_vlc2(&s->gb, intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 1);
if (cbpc < 0){
@@ -2719,7 +2719,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
return -1;
}
s->cbp_table[xy]= cbpc & 3;
- s->mb_type[xy]= MB_TYPE_INTRA;
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->mb_intra = 1;
if(cbpc & 4) {
@@ -2751,13 +2751,12 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
skip_bits1(&s->gb);
if(bits&0x10000){
/* skip mb */
- s->mb_type[xy]= MB_TYPE_SKIPED;
if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
- PRINT_MB_TYPE("G");
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
mx= get_amv(s, 0);
my= get_amv(s, 1);
}else{
- PRINT_MB_TYPE("S");
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
mx=my=0;
}
mot_val[0 ]= mot_val[2 ]=
@@ -2783,9 +2782,8 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
s->mb_intra = ((cbpc & 4) != 0);
if(s->mb_intra){
- PRINT_MB_TYPE("I");
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->mbintra_table[xy]= 1;
- s->mb_type[xy]= MB_TYPE_INTRA;
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0;
mot_val[1 ]= mot_val[3 ]=
@@ -2799,9 +2797,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
else s->mcsel= 0;
if ((cbpc & 16) == 0) {
- PRINT_MB_TYPE("P");
/* 16x16 motion prediction */
- s->mb_type[xy]= MB_TYPE_INTER;
h263_pred_motion(s, 0, &pred_x, &pred_y);
if(!s->mcsel){
@@ -2812,9 +2808,11 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
my = h263_decode_motion(s, pred_y, s->f_code);
if (my >= 0xffff)
return -1;
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
} else {
mx = get_amv(s, 0);
my = get_amv(s, 1);
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
}
mot_val[0 ]= mot_val[2 ] =
@@ -2823,8 +2821,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
int i;
- PRINT_MB_TYPE("4");
- s->mb_type[xy]= MB_TYPE_INTER4V;
+ s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
int16_t *mot_val= h263_pred_motion(s, i, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
@@ -2860,7 +2857,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
for(s->mb_y= s->resync_mb_y; mb_num < mb_count; s->mb_y++){
ff_init_block_index(s);
for(; mb_num < mb_count && s->mb_x<s->mb_width; s->mb_x++){
- const int xy= s->mb_x + s->mb_y*s->mb_width;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
mb_num++;
ff_update_block_index(s);
@@ -2876,9 +2873,9 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
}
s->cbp_table[xy]|= cbpy<<2;
- s->pred_dir_table[xy]|= ac_pred<<7;
+ s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
}else{ /* P || S_TYPE */
- if(s->mb_type[xy]&MB_TYPE_INTRA){
+ if(IS_INTRA(s->current_picture.mb_type[xy])){
int dir=0,i;
int ac_pred = get_bits1(&s->gb);
int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
@@ -2905,8 +2902,9 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
}
s->cbp_table[xy]&= 3; //remove dquant
s->cbp_table[xy]|= cbpy<<2;
- s->pred_dir_table[xy]= dir | (ac_pred<<7);
- }else if(s->mb_type[xy]&MB_TYPE_SKIPED){
+ s->current_picture.mb_type[xy] |= ac_pred*MB_TYPE_ACPRED;
+ s->pred_dir_table[xy]= dir;
+ }else if(IS_SKIP(s->current_picture.mb_type[xy])){
s->current_picture.qscale_table[xy]= s->qscale;
s->cbp_table[xy]= 0;
}else{
@@ -2989,9 +2987,9 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
{
int cbp, mb_type;
- const int xy= s->mb_x + s->mb_y*s->mb_width;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
- mb_type= s->mb_type[xy];
+ mb_type= s->current_picture.mb_type[xy];
cbp = s->cbp_table[xy];
if(s->current_picture.qscale_table[xy] != s->qscale){
@@ -3006,9 +3004,9 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
s->mv[0][i][0] = s->motion_val[ s->block_index[i] ][0];
s->mv[0][i][1] = s->motion_val[ s->block_index[i] ][1];
}
- s->mb_intra = mb_type&MB_TYPE_INTRA;
+ s->mb_intra = IS_INTRA(mb_type);
- if (mb_type&MB_TYPE_SKIPED) {
+ if (IS_SKIP(mb_type)) {
/* skip mb */
for(i=0;i<6;i++)
s->block_last_index[i] = -1;
@@ -3022,12 +3020,12 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
s->mb_skiped = 1;
}
}else if(s->mb_intra){
- s->ac_pred = s->pred_dir_table[xy]>>7;
+ s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
}else if(!s->mb_intra){
// s->mcsel= 0; //FIXME do we need to init that
s->mv_dir = MV_DIR_FORWARD;
- if (mb_type&MB_TYPE_INTER4V) {
+ if (IS_8X8(mb_type)) {
s->mv_type = MV_TYPE_8X8;
} else {
s->mv_type = MV_TYPE_16X16;
@@ -3035,10 +3033,10 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
}
} else { /* I-Frame */
s->mb_intra = 1;
- s->ac_pred = s->pred_dir_table[xy]>>7;
+ s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
}
- if (!(mb_type&MB_TYPE_SKIPED)) {
+ if (!IS_SKIP(mb_type)) {
int i;
/* decode each block */
for (i = 0; i < 6; i++) {
@@ -3059,10 +3057,12 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
else
return SLICE_NOEND;
}else{
- if(s->cbp_table[xy+1] && mpeg4_is_resync(s))
- return SLICE_END;
- else
- return SLICE_OK;
+ if(mpeg4_is_resync(s)){
+ const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
+ if(s->cbp_table[xy+delta])
+ return SLICE_END;
+ }
+ return SLICE_OK;
}
}
@@ -3072,8 +3072,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
int16_t *mot_val;
static int8_t quant_tab[4] = { -1, -2, 1, 2 };
-
- if(s->mb_x==0) PRINT_MB_TYPE("\n");
+ const int xy= s->mb_x + s->mb_y * s->mb_stride;
if (s->pict_type == P_TYPE || s->pict_type==S_TYPE) {
if (get_bits1(&s->gb)) {
@@ -3084,14 +3083,14 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
- PRINT_MB_TYPE("G");
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=1;
s->mv[0][0][0]= get_amv(s, 0);
s->mv[0][0][1]= get_amv(s, 1);
s->mb_skiped = 0;
}else{
- PRINT_MB_TYPE("S");
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mcsel=0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
@@ -3126,7 +3125,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
if(s->mcsel){
- PRINT_MB_TYPE("G");
+ s->current_picture.mb_type[xy]= MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 global motion prediction */
s->mv_type = MV_TYPE_16X16;
mx= get_amv(s, 0);
@@ -3134,7 +3133,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
}else if((!s->progressive_sequence) && get_bits1(&s->gb)){
- PRINT_MB_TYPE("f");
+ s->current_picture.mb_type[xy]= MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED;
/* 16x8 field motion prediction */
s->mv_type= MV_TYPE_FIELD;
@@ -3156,7 +3155,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv[0][i][1] = my;
}
}else{
- PRINT_MB_TYPE("P");
+ s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
h263_pred_motion(s, 0, &pred_x, &pred_y);
@@ -3182,7 +3181,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
}
} else {
- PRINT_MB_TYPE("4");
+ s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, &pred_x, &pred_y);
@@ -3225,7 +3224,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
/* if we skipped it in the future P Frame than skip it now too */
- s->mb_skiped= s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x]; // Note, skiptab=0 if last was GMC
+ s->mb_skiped= s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
if(s->mb_skiped){
/* skip mb */
@@ -3238,41 +3237,44 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv[0][0][1] = 0;
s->mv[1][0][0] = 0;
s->mv[1][0][1] = 0;
- PRINT_MB_TYPE("s");
+ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end;
}
modb1= get_bits1(&s->gb);
if(modb1){
- mb_type=4; //like MB_TYPE_B_DIRECT but no vectors coded
+ mb_type= MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1; //like MB_TYPE_B_DIRECT but no vectors coded
cbp=0;
}else{
- int field_mv;
-
modb2= get_bits1(&s->gb);
mb_type= get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1);
+ if(mb_type<0){
+ printf("illegal MB_type\n");
+ return -1;
+ }
+ mb_type= mb_type_b_map[ mb_type ];
if(modb2) cbp= 0;
else cbp= get_bits(&s->gb, 6);
- if (mb_type!=MB_TYPE_B_DIRECT && cbp) {
+ if ((!IS_DIRECT(mb_type)) && cbp) {
if(get_bits1(&s->gb)){
change_qscale(s, get_bits1(&s->gb)*4 - 2);
}
}
- field_mv=0;
if(!s->progressive_sequence){
if(cbp)
s->interlaced_dct= get_bits1(&s->gb);
- if(mb_type!=MB_TYPE_B_DIRECT && get_bits1(&s->gb)){
- field_mv=1;
+ if(!IS_DIRECT(mb_type) && get_bits1(&s->gb)){
+ mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
+ mb_type &= ~MB_TYPE_16x16;
- if(mb_type!=MB_TYPE_B_BACKW){
+ if(USES_LIST(mb_type, 0)){
s->field_select[0][0]= get_bits1(&s->gb);
s->field_select[0][1]= get_bits1(&s->gb);
}
- if(mb_type!=MB_TYPE_B_FORW){
+ if(USES_LIST(mb_type, 1)){
s->field_select[1][0]= get_bits1(&s->gb);
s->field_select[1][1]= get_bits1(&s->gb);
}
@@ -3280,9 +3282,10 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
s->mv_dir = 0;
- if(mb_type!=MB_TYPE_B_DIRECT && !field_mv){
+ if((mb_type & (MB_TYPE_DIRECT2|MB_TYPE_INTERLACED)) == 0){
s->mv_type= MV_TYPE_16X16;
- if(mb_type!=MB_TYPE_B_BACKW){
+
+ if(USES_LIST(mb_type, 0)){
s->mv_dir = MV_DIR_FORWARD;
mx = h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
@@ -3291,7 +3294,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->last_mv[0][1][1]= s->last_mv[0][0][1]= s->mv[0][0][1] = my;
}
- if(mb_type!=MB_TYPE_B_FORW){
+ if(USES_LIST(mb_type, 1)){
s->mv_dir |= MV_DIR_BACKWARD;
mx = h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
@@ -3299,12 +3302,10 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->last_mv[1][1][0]= s->last_mv[1][0][0]= s->mv[1][0][0] = mx;
s->last_mv[1][1][1]= s->last_mv[1][0][1]= s->mv[1][0][1] = my;
}
- if(mb_type!=MB_TYPE_B_DIRECT)
- PRINT_MB_TYPE(mb_type==MB_TYPE_B_FORW ? "F" : (mb_type==MB_TYPE_B_BACKW ? "B" : "T"));
- }else if(mb_type!=MB_TYPE_B_DIRECT){
+ }else if(!IS_DIRECT(mb_type)){
s->mv_type= MV_TYPE_FIELD;
- if(mb_type!=MB_TYPE_B_BACKW){
+ if(USES_LIST(mb_type, 0)){
s->mv_dir = MV_DIR_FORWARD;
for(i=0; i<2; i++){
@@ -3315,7 +3316,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
}
- if(mb_type!=MB_TYPE_B_FORW){
+ if(USES_LIST(mb_type, 1)){
s->mv_dir |= MV_DIR_BACKWARD;
for(i=0; i<2; i++){
@@ -3325,13 +3326,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->last_mv[1][i][1]= (s->mv[1][i][1] = my)*2;
}
}
- if(mb_type!=MB_TYPE_B_DIRECT)
- PRINT_MB_TYPE(mb_type==MB_TYPE_B_FORW ? "f" : (mb_type==MB_TYPE_B_BACKW ? "b" : "t"));
}
}
- if(mb_type==4 || mb_type==MB_TYPE_B_DIRECT){
- if(mb_type==4)
+ if(IS_DIRECT(mb_type)){
+ if(IS_SKIP(mb_type))
mx=my=0;
else{
mx = h263_decode_motion(s, 0, 1);
@@ -3339,13 +3338,9 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
- ff_mpeg4_set_direct_mv(s, mx, my);
- }
-
- if(mb_type<0 || mb_type>4){
- printf("illegal MB_type\n");
- return -1;
+ mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
}
+ s->current_picture.mb_type[xy]= mb_type;
} else { /* I-Frame */
cbpc = get_vlc2(&s->gb, intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 1);
if (cbpc < 0)
@@ -3353,13 +3348,17 @@ int ff_h263_decode_mb(MpegEncContext *s,
dquant = cbpc & 4;
s->mb_intra = 1;
intra:
- s->ac_pred = 0;
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
if (s->h263_pred || s->h263_aic) {
s->ac_pred = get_bits1(&s->gb);
- if (s->ac_pred && s->h263_aic)
- s->h263_aic_dir = get_bits1(&s->gb);
- }
- PRINT_MB_TYPE(s->ac_pred ? "A" : "I");
+ if(s->ac_pred){
+ s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
+
+ if (s->h263_aic)
+ s->h263_aic_dir = get_bits1(&s->gb);
+ }
+ }else
+ s->ac_pred = 0;
cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
if(cbpy<0) return -1;
@@ -3407,7 +3406,8 @@ end:
/* per-MB end of slice check */
if(s->codec_id==CODEC_ID_MPEG4){
if(mpeg4_is_resync(s)){
- if(s->pict_type==B_TYPE && s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x+1])
+ const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
+ if(s->pict_type==B_TYPE && s->next_picture.mbskip_table[xy + delta])
return SLICE_OK;
return SLICE_END;
}
@@ -3429,11 +3429,11 @@ static int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
{
int code, val, sign, shift, l;
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
- if (code < 0)
- return 0xffff;
if (code == 0)
return pred;
+ if (code < 0)
+ return 0xffff;
sign = get_bits1(&s->gb);
shift = f_code - 1;
@@ -3676,7 +3676,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
level = s->dc_val[0][ s->block_index[n] ];
if(n<4) level= (level + (s->y_dc_scale>>1))/s->y_dc_scale; //FIXME optimizs
else level= (level + (s->c_dc_scale>>1))/s->c_dc_scale;
- dc_pred_dir= (s->pred_dir_table[s->mb_x + s->mb_y*s->mb_width]<<n)&32;
+ dc_pred_dir= (s->pred_dir_table[s->mb_x + s->mb_y*s->mb_stride]<<n)&32;
}else{
level = mpeg4_decode_dc(s, n, &dc_pred_dir);
if (level < 0)
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index c8a13a7542..75dbcb2d6c 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -209,7 +209,7 @@ static int decode_slice(MpegEncContext *s){
MPV_decode_mb(s, s->block);
if(ret<0){
- const int xy= s->mb_x + s->mb_y*s->mb_width;
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
if(ret==SLICE_END){
//printf("%d %d %d %06X\n", s->mb_x, s->mb_y, s->gb.size*8 - get_bits_count(&s->gb), show_bits(&s->gb, 24));
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
@@ -644,12 +644,6 @@ retry:
ff_er_frame_start(s);
/* decode each macroblock */
- s->block_wrap[0]=
- s->block_wrap[1]=
- s->block_wrap[2]=
- s->block_wrap[3]= s->mb_width*2 + 2;
- s->block_wrap[4]=
- s->block_wrap[5]= s->mb_width + 2;
s->mb_x=0;
s->mb_y=0;
@@ -708,8 +702,8 @@ retry:
for(mb_y=0; mb_y<s->mb_height; mb_y++){
int mb_x;
for(mb_x=0; mb_x<s->mb_width; mb_x++){
- const int mb_index= mb_x + mb_y*s->mb_width;
- if(s->co_located_type_table[mb_index] == MV_TYPE_8X8){
+ const int mb_index= mb_x + mb_y*s->mb_stride;
+ if(IS_8X8(s->current_picture.mb_type[mb_index])){
int i;
for(i=0; i<4; i++){
int sx= mb_x*16 + 4 + 8*(i&1);
@@ -732,25 +726,14 @@ retry:
}
}
-
if(s->pict_type==B_TYPE || s->low_delay){
*pict= *(AVFrame*)&s->current_picture;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else {
*pict= *(AVFrame*)&s->last_picture;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
-
- if(avctx->debug&FF_DEBUG_QP){
- int8_t *qtab= pict->qscale_table;
- int x,y;
-
- for(y=0; y<s->mb_height; y++){
- for(x=0; x<s->mb_width; x++){
- printf("%2d ", qtab[x + y*s->mb_width]);
- }
- printf("\n");
- }
- printf("\n");
- }
+
/* Return the Picture timestamp as the frame number */
/* we substract 1 because it is added on utils.c */
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 83f26792be..ca76c13d5f 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -149,8 +149,6 @@ typedef struct H264Context{
uint8_t *rbsp_buffer;
int rbsp_buffer_size;
- int mb_stride; ///< stride of some mb tables
-
int chroma_qp; //QPc
int prev_mb_skiped; //FIXME remove (IMHO not used)
@@ -355,7 +353,7 @@ static inline void fill_rectangle(void *p, int w, int h, int stride, uint32_t va
static inline void fill_caches(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
- const int mb_xy= s->mb_x + s->mb_y*h->mb_stride;
+ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
int topleft_xy, top_xy, topright_xy, left_xy[2];
int topleft_type, top_type, topright_type, left_type[2];
int left_block[4];
@@ -366,9 +364,9 @@ static inline void fill_caches(H264Context *h, int mb_type){
if(h->sps.mb_aff){
//FIXME
}else{
- topleft_xy = mb_xy-1 - h->mb_stride;
- top_xy = mb_xy - h->mb_stride;
- topright_xy= mb_xy+1 - h->mb_stride;
+ topleft_xy = mb_xy-1 - s->mb_stride;
+ top_xy = mb_xy - s->mb_stride;
+ topright_xy= mb_xy+1 - s->mb_stride;
left_xy[0] = mb_xy-1;
left_xy[1] = mb_xy-1;
left_block[0]= 0;
@@ -602,7 +600,7 @@ static inline void fill_caches(H264Context *h, int mb_type){
static inline void write_back_intra_pred_mode(H264Context *h){
MpegEncContext * const s = &h->s;
- const int mb_xy= s->mb_x + s->mb_y*h->mb_stride;
+ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
h->intra4x4_pred_mode[mb_xy][0]= h->intra4x4_pred_mode_cache[7+8*1];
h->intra4x4_pred_mode[mb_xy][1]= h->intra4x4_pred_mode_cache[7+8*2];
@@ -693,7 +691,7 @@ static inline int pred_intra_mode(H264Context *h, int n){
static inline void write_back_non_zero_count(H264Context *h){
MpegEncContext * const s = &h->s;
- const int mb_xy= s->mb_x + s->mb_y*h->mb_stride;
+ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
h->non_zero_count[mb_xy][0]= h->non_zero_count_cache[4+8*4];
h->non_zero_count[mb_xy][1]= h->non_zero_count_cache[5+8*4];
@@ -896,7 +894,7 @@ static inline void pred_pskip_motion(H264Context * const h, int * const mx, int
static inline void write_back_motion(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
- const int mb_xy= s->mb_x + s->mb_y*h->mb_stride;
+ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride;
int list;
@@ -1952,7 +1950,7 @@ static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
qpel_mc_func (*qpix_put)[16], h264_chroma_mc_func (*chroma_put),
qpel_mc_func (*qpix_avg)[16], h264_chroma_mc_func (*chroma_avg)){
MpegEncContext * const s = &h->s;
- const int mb_xy= s->mb_x + s->mb_y*h->mb_stride;
+ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
const int mb_type= s->current_picture.mb_type[mb_xy];
assert(IS_INTER(mb_type));
@@ -2122,7 +2120,7 @@ static void free_tables(H264Context *h){
*/
static int alloc_tables(H264Context *h){
MpegEncContext * const s = &h->s;
- const int big_mb_num= h->mb_stride * (s->mb_height+1);
+ const int big_mb_num= s->mb_stride * (s->mb_height+1);
int x,y;
CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t))
@@ -2130,13 +2128,13 @@ static int alloc_tables(H264Context *h){
CHECKED_ALLOCZ(h->slice_table_base , big_mb_num * sizeof(uint8_t))
memset(h->slice_table_base, -1, big_mb_num * sizeof(uint8_t));
- h->slice_table= h->slice_table_base + h->mb_stride + 1;
+ h->slice_table= h->slice_table_base + s->mb_stride + 1;
CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint16_t));
CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint16_t));
for(y=0; y<s->mb_height; y++){
for(x=0; x<s->mb_width; x++){
- const int mb_xy= x + y*h->mb_stride;
+ const int mb_xy= x + y*s->mb_stride;
const int b_xy = 4*x + 4*y*h->b_stride;
const int b8_xy= 2*x + 2*y*h->b8_stride;
@@ -2211,7 +2209,7 @@ static void hl_decode_mb(H264Context *h){
MpegEncContext * const s = &h->s;
const int mb_x= s->mb_x;
const int mb_y= s->mb_y;
- const int mb_xy= mb_x + mb_y*h->mb_stride;
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
const int mb_type= s->current_picture.mb_type[mb_xy];
uint8_t *dest_y, *dest_cb, *dest_cr;
int linesize, uvlinesize /*dct_offset*/;
@@ -2839,7 +2837,6 @@ static int decode_slice_header(H264Context *h){
s->mb_width= h->sps.mb_width;
s->mb_height= h->sps.mb_height;
- h->mb_stride= s->mb_width + 1;
h->b_stride= s->mb_width*4;
h->b8_stride= s->mb_width*2;
@@ -3171,7 +3168,7 @@ static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, in
*/
static int decode_mb(H264Context *h){
MpegEncContext * const s = &h->s;
- const int mb_xy= s->mb_x + s->mb_y*h->mb_stride;
+ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
int mb_type, partition_count, cbp;
memset(h->mb, 0, sizeof(int16_t)*24*16); //FIXME avoid if allready clear (move after skip handlong?
@@ -4164,6 +4161,7 @@ static int decode_frame(AVCodecContext *avctx,
}
*pict= *(AVFrame*)&s->current_picture; //FIXME
+ ff_print_debug_info(s, s->current_picture_ptr);
assert(pict->data[0]);
//printf("out %d\n", (int)pict->data[0]);
#if 0 //?
@@ -4183,12 +4181,12 @@ static int decode_frame(AVCodecContext *avctx,
#if 0
static inline void fill_mb_avail(H264Context *h){
MpegEncContext * const s = &h->s;
- const int mb_xy= s->mb_x + s->mb_y*h->mb_stride;
+ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
if(s->mb_y){
- h->mb_avail[0]= s->mb_x && h->slice_table[mb_xy - h->mb_stride - 1] == h->slice_num;
- h->mb_avail[1]= h->slice_table[mb_xy - h->mb_stride ] == h->slice_num;
- h->mb_avail[2]= s->mb_x+1 < s->mb_width && h->slice_table[mb_xy - h->mb_stride + 1] == h->slice_num;
+ h->mb_avail[0]= s->mb_x && h->slice_table[mb_xy - s->mb_stride - 1] == h->slice_num;
+ h->mb_avail[1]= h->slice_table[mb_xy - s->mb_stride ] == h->slice_num;
+ h->mb_avail[2]= s->mb_x+1 < s->mb_width && h->slice_table[mb_xy - s->mb_stride + 1] == h->slice_num;
}else{
h->mb_avail[0]=
h->mb_avail[1]=
diff --git a/libavcodec/h264data.h b/libavcodec/h264data.h
index b3737c8249..8fce4d240d 100644
--- a/libavcodec/h264data.h
+++ b/libavcodec/h264data.h
@@ -306,6 +306,9 @@ static const uint8_t chroma_dc_scan[4]={
(0+1*2)*16, (1+1*2)*16, //FIXME
};
+#define MB_TYPE_REF0 0x40000000
+#define IS_REF0(a) ((a)&MB_TYPE_REF0)
+
typedef struct IMbInfo{
uint16_t type;
uint8_t pred_mode;
@@ -362,7 +365,7 @@ static const PMbInfo p_sub_mb_type_info[4]={
};
static const PMbInfo b_mb_type_info[23]={
-{MB_TYPE_DIRECT , 1, },
+{MB_TYPE_DIRECT2 , 1, },
{MB_TYPE_16x16|MB_TYPE_P0L0 , 1, },
{MB_TYPE_16x16 |MB_TYPE_P0L1 , 1, },
{MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1 , 1, },
@@ -388,7 +391,7 @@ static const PMbInfo b_mb_type_info[23]={
};
static const PMbInfo b_sub_mb_type_info[13]={
-{MB_TYPE_DIRECT , 1, },
+{MB_TYPE_DIRECT2 , 1, },
{MB_TYPE_16x16|MB_TYPE_P0L0 , 1, },
{MB_TYPE_16x16 |MB_TYPE_P0L1 , 1, },
{MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1 , 1, },
diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c
index e0c0cc9171..58b96d4897 100644
--- a/libavcodec/motion_est.c
+++ b/libavcodec/motion_est.c
@@ -775,7 +775,7 @@ static inline int sad_hpel_motion_search(MpegEncContext * s,
static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my, int mv4)
{
- const int xy= s->mb_x + 1 + (s->mb_y + 1)*(s->mb_width + 2);
+ const int xy= s->mb_x + s->mb_y*s->mb_stride;
s->p_mv_table[xy][0] = mx;
s->p_mv_table[xy][1] = my;
@@ -1076,10 +1076,10 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
vard = (s->dsp.sse[0](NULL, pix, ppix, s->linesize)+128)>>8;
//printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
- pic->mb_var [s->mb_width * mb_y + mb_x] = varc;
- pic->mc_mb_var[s->mb_width * mb_y + mb_x] = vard;
- pic->mb_mean [s->mb_width * mb_y + mb_x] = (sum+128)>>8;
-// pic->mb_cmp_score[s->mb_width * mb_y + mb_x] = dmin;
+ pic->mb_var [s->mb_stride * mb_y + mb_x] = varc;
+ pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = vard;
+ pic->mb_mean [s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
+// pic->mb_cmp_score[s->mb_stride * mb_y + mb_x] = dmin;
pic->mb_var_sum += varc;
pic->mc_mb_var_sum += vard;
//printf("E%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
@@ -1129,7 +1129,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
dmin=dmin4;
}
}
- pic->mb_cmp_score[s->mb_width * mb_y + mb_x] = dmin;
+ pic->mb_cmp_score[s->mb_stride * mb_y + mb_x] = dmin;
set_p_mv_tables(s, mx, my, mb_type!=MB_TYPE_INTER4V);
if (vard <= 64 || vard < varc) {
@@ -1139,7 +1139,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
}
}
- s->mb_type[mb_y*s->mb_width + mb_x]= mb_type;
+ s->mb_type[mb_y*s->mb_stride + mb_x]= mb_type;
}
int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
@@ -1152,8 +1152,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
int P[10][2];
const int shift= 1+s->quarter_sample;
uint8_t * const mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV;
- const int mv_stride= s->mb_width + 2;
- const int xy= mb_x + 1 + (mb_y + 1)*mv_stride;
+ const int xy= mb_x + mb_y*s->mb_stride;
assert(s->quarter_sample==0 || s->quarter_sample==1);
@@ -1178,10 +1177,10 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
P_TOP[0]= P_TOPRIGHT[0]= P_MEDIAN[0]=
P_TOP[1]= P_TOPRIGHT[1]= P_MEDIAN[1]= 0; //FIXME
} else {
- P_TOP[0] = s->p_mv_table[xy + mv_stride ][0];
- P_TOP[1] = s->p_mv_table[xy + mv_stride ][1];
- P_TOPRIGHT[0] = s->p_mv_table[xy + mv_stride - 1][0];
- P_TOPRIGHT[1] = s->p_mv_table[xy + mv_stride - 1][1];
+ P_TOP[0] = s->p_mv_table[xy + s->mb_stride ][0];
+ P_TOP[1] = s->p_mv_table[xy + s->mb_stride ][1];
+ P_TOPRIGHT[0] = s->p_mv_table[xy + s->mb_stride - 1][0];
+ P_TOPRIGHT[1] = s->p_mv_table[xy + s->mb_stride - 1][1];
if(P_TOP[1] < (rel_ymin<<shift)) P_TOP[1] = (rel_ymin<<shift);
if(P_TOPRIGHT[0] > (rel_xmax<<shift)) P_TOPRIGHT[0]= (rel_xmax<<shift);
if(P_TOPRIGHT[1] < (rel_ymin<<shift)) P_TOPRIGHT[1]= (rel_ymin<<shift);
@@ -1210,8 +1209,8 @@ static int ff_estimate_motion_b(MpegEncContext * s,
int pred_x=0, pred_y=0;
int P[10][2];
const int shift= 1+s->quarter_sample;
- const int mot_stride = s->mb_width + 2;
- const int mot_xy = (mb_y + 1)*mot_stride + mb_x + 1;
+ const int mot_stride = s->mb_stride;
+ const int mot_xy = mb_y*mot_stride + mb_x;
uint8_t * const ref_picture= picture->data[0];
uint8_t * const mv_penalty= s->me.mv_penalty[f_code] + MAX_MV;
int mv_scale;
@@ -1370,8 +1369,8 @@ static inline int check_bidir_mv(MpegEncContext * s,
static inline int bidir_refine(MpegEncContext * s,
int mb_x, int mb_y)
{
- const int mot_stride = s->mb_width + 2;
- const int xy = (mb_y + 1)*mot_stride + mb_x + 1;
+ const int mot_stride = s->mb_stride;
+ const int xy = mb_y *mot_stride + mb_x;
int fbmin;
int pred_fx= s->b_bidir_forw_mv_table[xy-1][0];
int pred_fy= s->b_bidir_forw_mv_table[xy-1][1];
@@ -1397,8 +1396,8 @@ static inline int direct_search(MpegEncContext * s,
int mb_x, int mb_y)
{
int P[10][2];
- const int mot_stride = s->mb_width + 2;
- const int mot_xy = (mb_y + 1)*mot_stride + mb_x + 1;
+ const int mot_stride = s->mb_stride;
+ const int mot_xy = mb_y*mot_stride + mb_x;
const int shift= 1+s->quarter_sample;
int dmin, i;
const int time_pp= s->pp_time;
@@ -1410,7 +1409,7 @@ static inline int direct_search(MpegEncContext * s,
ymin= xmin=(-32)>>shift;
ymax= xmax= 31>>shift;
- if(s->co_located_type_table[mb_x + mb_y*s->mb_width]==CO_LOCATED_TYPE_4MV){
+ if(IS_8X8(s->next_picture.mb_type[mot_xy])){
s->mv_type= MV_TYPE_8X8;
}else{
s->mv_type= MV_TYPE_16X16;
@@ -1526,7 +1525,7 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
score= ((unsigned)(score*score + 128*256))>>16;
s->current_picture.mc_mb_var_sum += score;
- s->current_picture.mc_mb_var[mb_y*s->mb_width + mb_x] = score; //FIXME use SSE
+ s->current_picture.mc_mb_var[mb_y*s->mb_stride + mb_x] = score; //FIXME use SSE
}
if(s->flags&CODEC_FLAG_HQ){
@@ -1534,7 +1533,7 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
if(dmin>256*256*16) type&= ~MB_TYPE_DIRECT; //dont try direct mode if its invalid for this MB
}
- s->mb_type[mb_y*s->mb_width + mb_x]= type;
+ s->mb_type[mb_y*s->mb_stride + mb_x]= type;
}
/* find best f_code for ME which do unlimited searches */
@@ -1551,20 +1550,18 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
for(y=0; y<s->mb_height; y++){
int x;
- int xy= (y+1)* (s->mb_width+2) + 1;
- i= y*s->mb_width;
+ int xy= y*s->mb_stride;
for(x=0; x<s->mb_width; x++){
- if(s->mb_type[i] & type){
+ if(s->mb_type[xy] & type){
int fcode= FFMAX(fcode_tab[mv_table[xy][0] + MAX_MV],
fcode_tab[mv_table[xy][1] + MAX_MV]);
int j;
for(j=0; j<fcode && j<8; j++){
- if(s->pict_type==B_TYPE || s->current_picture.mc_mb_var[i] < s->current_picture.mb_var[i])
+ if(s->pict_type==B_TYPE || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy])
score[j]-= 170;
}
}
- i++;
xy++;
}
}
@@ -1602,23 +1599,18 @@ void ff_fix_long_p_mvs(MpegEncContext * s)
/* clip / convert to intra 16x16 type MVs */
for(y=0; y<s->mb_height; y++){
int x;
- int xy= (y+1)* (s->mb_width+2)+1;
- int i= y*s->mb_width;
+ int xy= y*s->mb_stride;
for(x=0; x<s->mb_width; x++){
- if(s->mb_type[i]&MB_TYPE_INTER){
+ if(s->mb_type[xy]&MB_TYPE_INTER){
if( s->p_mv_table[xy][0] >=range || s->p_mv_table[xy][0] <-range
|| s->p_mv_table[xy][1] >=range || s->p_mv_table[xy][1] <-range){
- s->mb_type[i] &= ~MB_TYPE_INTER;
- s->mb_type[i] |= MB_TYPE_INTRA;
+ s->mb_type[xy] &= ~MB_TYPE_INTER;
+ s->mb_type[xy] |= MB_TYPE_INTRA;
s->p_mv_table[xy][0] = 0;
s->p_mv_table[xy][1] = 0;
-//clip++;
}
-//else
-// noclip++;
}
xy++;
- i++;
}
}
//printf("%d no:%d %d//\n", clip, noclip, f_code);
@@ -1628,7 +1620,7 @@ void ff_fix_long_p_mvs(MpegEncContext * s)
/* clip / convert to intra 8x8 type MVs */
for(y=0; y<s->mb_height; y++){
int xy= (y*2 + 1)*wrap + 1;
- int i= y*s->mb_width;
+ int i= y*s->mb_stride;
int x;
for(x=0; x<s->mb_width; x++){
@@ -1665,10 +1657,9 @@ void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, i
/* clip / convert to intra 16x16 type MVs */
for(y=0; y<s->mb_height; y++){
int x;
- int xy= (y+1)* (s->mb_width+2)+1;
- int i= y*s->mb_width;
+ int xy= y*s->mb_stride;
for(x=0; x<s->mb_width; x++){
- if (s->mb_type[i] & type){ // RAL: "type" test added...
+ if (s->mb_type[xy] & type){ // RAL: "type" test added...
if( mv_table[xy][0] >=range || mv_table[xy][0] <-range
|| mv_table[xy][1] >=range || mv_table[xy][1] <-range){
@@ -1682,7 +1673,6 @@ void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, i
}
}
xy++;
- i++;
}
}
}
diff --git a/libavcodec/motion_est_template.c b/libavcodec/motion_est_template.c
index 31a42d607b..2d403993cd 100644
--- a/libavcodec/motion_est_template.c
+++ b/libavcodec/motion_est_template.c
@@ -897,8 +897,8 @@ static int RENAME(epzs_motion_search)(MpegEncContext * s, int block,
int map_generation;
const int penalty_factor= s->me.penalty_factor;
const int size=0;
- const int ref_mv_stride= s->mb_width+2;
- const int ref_mv_xy= 1 + s->mb_x + (s->mb_y + 1)*ref_mv_stride;
+ const int ref_mv_stride= s->mb_stride;
+ const int ref_mv_xy= s->mb_x + s->mb_y*ref_mv_stride;
me_cmp_func cmp, chroma_cmp;
LOAD_COMMON(s->mb_x*16, s->mb_y*16);
@@ -1009,8 +1009,8 @@ static int RENAME(epzs_motion_search4)(MpegEncContext * s, int block,
int map_generation;
const int penalty_factor= s->me.penalty_factor;
const int size=1;
- const int ref_mv_stride= s->mb_width+2;
- const int ref_mv_xy= 1 + s->mb_x + (s->mb_y + 1)*ref_mv_stride;
+ const int ref_mv_stride= s->mb_stride;
+ const int ref_mv_xy= s->mb_x + s->mb_y *ref_mv_stride;
me_cmp_func cmp, chroma_cmp;
LOAD_COMMON((s->mb_x*2 + (block&1))*8, (s->mb_y*2 + (block>>1))*8);
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 019e29370b..c77e19aeda 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -29,11 +29,6 @@
#include "mpeg12data.h"
-#if 1
-#define PRINT_QP(a, b) {}
-#else
-#define PRINT_QP(a, b) printf(a, b)
-#endif
/* Start codes. */
#define SEQ_END_CODE 0x000001b7
@@ -833,10 +828,10 @@ static void init_vlcs(MpegEncContext *s)
&mbPatTable[0][1], 2, 1,
&mbPatTable[0][0], 2, 1);
- init_vlc(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 32,
+ init_vlc(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 7,
&table_mb_ptype[0][1], 2, 1,
&table_mb_ptype[0][0], 2, 1);
- init_vlc(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 32,
+ init_vlc(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11,
&table_mb_btype[0][1], 2, 1,
&table_mb_btype[0][0], 2, 1);
init_rl(&rl_mpeg1);
@@ -887,6 +882,11 @@ static int mpeg_decode_mb(MpegEncContext *s,
assert(s->mb_skiped==0);
if (s->mb_skip_run-- != 0) {
+ if(s->pict_type == I_TYPE){
+ fprintf(stderr, "skiped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
+ }
+
/* skip mb */
s->mb_intra = 0;
for(i=0;i<6;i++)
@@ -899,6 +899,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
s->mb_skiped = 1;
+ s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else {
/* if B type, reuse previous vectors and directions */
s->mv[0][0][0] = s->last_mv[0][0][0];
@@ -906,6 +907,10 @@ static int mpeg_decode_mb(MpegEncContext *s,
s->mv[1][0][0] = s->last_mv[1][0][0];
s->mv[1][0][1] = s->last_mv[1][0][1];
+ s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
+ s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1] | MB_TYPE_SKIP;
+// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
+
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
s->mb_skiped = 1;
}
@@ -919,9 +924,9 @@ static int mpeg_decode_mb(MpegEncContext *s,
if (get_bits1(&s->gb) == 0) {
if (get_bits1(&s->gb) == 0)
return -1;
- mb_type = MB_QUANT | MB_INTRA;
+ mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
} else {
- mb_type = MB_INTRA;
+ mb_type = MB_TYPE_INTRA;
}
break;
case P_TYPE:
@@ -930,6 +935,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
fprintf(stderr, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
+ mb_type = ptype2mb_type[ mb_type ];
break;
case B_TYPE:
mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
@@ -937,216 +943,236 @@ static int mpeg_decode_mb(MpegEncContext *s,
fprintf(stderr, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
+ mb_type = btype2mb_type[ mb_type ];
break;
}
dprintf("mb_type=%x\n", mb_type);
- motion_type = 0; /* avoid warning */
- if (mb_type & (MB_FOR|MB_BACK)) {
- /* get additionnal motion vector type */
- if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct)
- motion_type = MT_FRAME;
- else
- motion_type = get_bits(&s->gb, 2);
- }
- /* compute dct type */
- if (s->picture_structure == PICT_FRAME &&
- !s->frame_pred_frame_dct &&
- (mb_type & (MB_PAT | MB_INTRA))) {
- s->interlaced_dct = get_bits1(&s->gb);
-#ifdef DEBUG
- if (s->interlaced_dct)
- printf("interlaced_dct\n");
-#endif
- } else {
- s->interlaced_dct = 0; /* frame based */
- }
+// motion_type = 0; /* avoid warning */
+ if (IS_INTRA(mb_type)) {
+ /* compute dct type */
+ if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
+ !s->frame_pred_frame_dct) {
+ s->interlaced_dct = get_bits1(&s->gb);
+ }
- if (mb_type & MB_QUANT) {
- s->qscale = get_qscale(s);
- }
- if (mb_type & MB_INTRA) {
+ if (IS_QUANT(mb_type))
+ s->qscale = get_qscale(s);
+
if (s->concealment_motion_vectors) {
/* just parse them */
if (s->picture_structure != PICT_FRAME)
skip_bits1(&s->gb); /* field select */
mpeg_decode_motion(s, s->mpeg_f_code[0][0], 0);
mpeg_decode_motion(s, s->mpeg_f_code[0][1], 0);
+ skip_bits1(&s->gb); /* marker */
}
s->mb_intra = 1;
- cbp = 0x3f;
memset(s->last_mv, 0, sizeof(s->last_mv)); /* reset mv prediction */
+
+ if (s->mpeg2) {
+ for(i=0;i<6;i++) {
+ if (mpeg2_decode_block_intra(s, block[i], i) < 0)
+ return -1;
+ }
+ } else {
+ for(i=0;i<6;i++) {
+ if (mpeg1_decode_block_intra(s, block[i], i) < 0)
+ return -1;
+ }
+ }
} else {
- s->mb_intra = 0;
- cbp = 0;
- }
- /* special case of implicit zero motion vector */
- if (s->pict_type == P_TYPE && !(mb_type & MB_FOR)) {
- s->mv_dir = MV_DIR_FORWARD;
- s->mv_type = MV_TYPE_16X16;
- s->last_mv[0][0][0] = 0;
- s->last_mv[0][0][1] = 0;
- s->last_mv[0][1][0] = 0;
- s->last_mv[0][1][1] = 0;
- s->mv[0][0][0] = 0;
- s->mv[0][0][1] = 0;
- } else if (mb_type & (MB_FOR | MB_BACK)) {
- /* motion vectors */
- s->mv_dir = 0;
- for(i=0;i<2;i++) {
- if (mb_type & (MB_FOR >> i)) {
- s->mv_dir |= (MV_DIR_FORWARD >> i);
- dprintf("motion_type=%d\n", motion_type);
- switch(motion_type) {
- case MT_FRAME: /* or MT_16X8 */
- if (s->picture_structure == PICT_FRAME) {
- /* MT_FRAME */
- s->mv_type = MV_TYPE_16X16;
- for(k=0;k<2;k++) {
- val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
- s->last_mv[i][0][k]);
- s->last_mv[i][0][k] = val;
- s->last_mv[i][1][k] = val;
+ if (mb_type & MB_TYPE_ZERO_MV){
+ assert(mb_type & MB_TYPE_PAT);
+
+ /* compute dct type */
+ if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
+ !s->frame_pred_frame_dct) {
+ s->interlaced_dct = get_bits1(&s->gb);
+ }
+
+ if (IS_QUANT(mb_type))
+ s->qscale = get_qscale(s);
+
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->last_mv[0][0][0] = 0;
+ s->last_mv[0][0][1] = 0;
+ s->last_mv[0][1][0] = 0;
+ s->last_mv[0][1][1] = 0;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ }else{
+ assert(mb_type & MB_TYPE_L0L1);
+//FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
+ /* get additionnal motion vector type */
+ if (s->frame_pred_frame_dct)
+ motion_type = MT_FRAME;
+ else{
+ motion_type = get_bits(&s->gb, 2);
+ }
+
+ /* compute dct type */
+ if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
+ !s->frame_pred_frame_dct && IS_PAT(mb_type)) {
+ s->interlaced_dct = get_bits1(&s->gb);
+ }
+
+ if (IS_QUANT(mb_type))
+ s->qscale = get_qscale(s);
+
+ /* motion vectors */
+ s->mv_dir = 0;
+ for(i=0;i<2;i++) {
+ if (USES_LIST(mb_type, i)) {
+ s->mv_dir |= (MV_DIR_FORWARD >> i);
+ dprintf("motion_type=%d\n", motion_type);
+ switch(motion_type) {
+ case MT_FRAME: /* or MT_16X8 */
+ if (s->picture_structure == PICT_FRAME) {
+ /* MT_FRAME */
+ mb_type |= MB_TYPE_16x16;
+ s->mv_type = MV_TYPE_16X16;
+ s->mv[i][0][0]= s->last_mv[i][0][0]= s->last_mv[i][1][0] =
+ mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]);
+ s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] =
+ mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]);
/* full_pel: only for mpeg1 */
- if (s->full_pel[i])
- val = val << 1;
- s->mv[i][0][k] = val;
- dprintf("mv%d: %d\n", k, val);
+ if (s->full_pel[i]){
+ s->mv[i][0][0] <<= 1;
+ s->mv[i][0][1] <<= 1;
+ }
+ } else {
+ /* MT_16X8 */
+ mb_type |= MB_TYPE_16x8;
+ s->mv_type = MV_TYPE_16X8;
+ for(j=0;j<2;j++) {
+ s->field_select[i][j] = get_bits1(&s->gb);
+ for(k=0;k<2;k++) {
+ val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
+ s->last_mv[i][j][k]);
+ s->last_mv[i][j][k] = val;
+ s->mv[i][j][k] = val;
+ }
+ }
}
- } else {
- /* MT_16X8 */
- s->mv_type = MV_TYPE_16X8;
- for(j=0;j<2;j++) {
- s->field_select[i][j] = get_bits1(&s->gb);
+ break;
+ case MT_FIELD:
+ s->mv_type = MV_TYPE_FIELD;
+ if (s->picture_structure == PICT_FRAME) {
+ mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
+ for(j=0;j<2;j++) {
+ s->field_select[i][j] = get_bits1(&s->gb);
+ val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
+ s->last_mv[i][j][0]);
+ s->last_mv[i][j][0] = val;
+ s->mv[i][j][0] = val;
+ dprintf("fmx=%d\n", val);
+ val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
+ s->last_mv[i][j][1] >> 1);
+ s->last_mv[i][j][1] = val << 1;
+ s->mv[i][j][1] = val;
+ dprintf("fmy=%d\n", val);
+ }
+ } else {
+ mb_type |= MB_TYPE_16x16;
+ s->field_select[i][0] = get_bits1(&s->gb);
for(k=0;k<2;k++) {
val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
- s->last_mv[i][j][k]);
- s->last_mv[i][j][k] = val;
- s->mv[i][j][k] = val;
+ s->last_mv[i][0][k]);
+ s->last_mv[i][0][k] = val;
+ s->last_mv[i][1][k] = val;
+ s->mv[i][0][k] = val;
}
}
- }
- break;
- case MT_FIELD:
- s->mv_type = MV_TYPE_FIELD;
- if (s->picture_structure == PICT_FRAME) {
- for(j=0;j<2;j++) {
- s->field_select[i][j] = get_bits1(&s->gb);
- val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
- s->last_mv[i][j][0]);
- s->last_mv[i][j][0] = val;
- s->mv[i][j][0] = val;
- dprintf("fmx=%d\n", val);
- val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
- s->last_mv[i][j][1] >> 1);
- s->last_mv[i][j][1] = val << 1;
- s->mv[i][j][1] = val;
- dprintf("fmy=%d\n", val);
- }
- } else {
- s->field_select[i][0] = get_bits1(&s->gb);
- for(k=0;k<2;k++) {
- val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
- s->last_mv[i][0][k]);
- s->last_mv[i][0][k] = val;
- s->last_mv[i][1][k] = val;
- s->mv[i][0][k] = val;
- }
- }
- break;
- case MT_DMV:
- {
- int dmx, dmy, mx, my, m;
-
- mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
- s->last_mv[i][0][0]);
- s->last_mv[i][0][0] = mx;
- s->last_mv[i][1][0] = mx;
- dmx = get_dmv(s);
- my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
- s->last_mv[i][0][1] >> 1);
- dmy = get_dmv(s);
- s->mv_type = MV_TYPE_DMV;
- /* XXX: totally broken */
- if (s->picture_structure == PICT_FRAME) {
- s->last_mv[i][0][1] = my << 1;
- s->last_mv[i][1][1] = my << 1;
-
- m = s->top_field_first ? 1 : 3;
- /* top -> top pred */
- s->mv[i][0][0] = mx;
- s->mv[i][0][1] = my << 1;
- s->mv[i][1][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
- s->mv[i][1][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
- m = 4 - m;
- s->mv[i][2][0] = mx;
- s->mv[i][2][1] = my << 1;
- s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
- s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
- } else {
- s->last_mv[i][0][1] = my;
- s->last_mv[i][1][1] = my;
- s->mv[i][0][0] = mx;
- s->mv[i][0][1] = my;
- s->mv[i][1][0] = ((mx + (mx > 0)) >> 1) + dmx;
- s->mv[i][1][1] = ((my + (my > 0)) >> 1) + dmy - 1
- /* + 2 * cur_field */;
+ break;
+ case MT_DMV:
+ {
+ int dmx, dmy, mx, my, m;
+
+ mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
+ s->last_mv[i][0][0]);
+ s->last_mv[i][0][0] = mx;
+ s->last_mv[i][1][0] = mx;
+ dmx = get_dmv(s);
+ my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
+ s->last_mv[i][0][1] >> 1);
+ dmy = get_dmv(s);
+ s->mv_type = MV_TYPE_DMV;
+ /* XXX: totally broken */
+ if (s->picture_structure == PICT_FRAME) {
+ mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
+
+ s->last_mv[i][0][1] = my << 1;
+ s->last_mv[i][1][1] = my << 1;
+
+ m = s->top_field_first ? 1 : 3;
+ /* top -> top pred */
+ s->mv[i][0][0] = mx;
+ s->mv[i][0][1] = my << 1;
+ s->mv[i][1][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
+ s->mv[i][1][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
+ m = 4 - m;
+ s->mv[i][2][0] = mx;
+ s->mv[i][2][1] = my << 1;
+ s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
+ s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
+ } else {
+ mb_type |= MB_TYPE_16x16;
+
+ s->last_mv[i][0][1] = my;
+ s->last_mv[i][1][1] = my;
+ s->mv[i][0][0] = mx;
+ s->mv[i][0][1] = my;
+ s->mv[i][1][0] = ((mx + (mx > 0)) >> 1) + dmx;
+ s->mv[i][1][1] = ((my + (my > 0)) >> 1) + dmy - 1
+ /* + 2 * cur_field */;
+ }
}
+ break;
}
- break;
}
}
}
- }
-
- if ((mb_type & MB_INTRA) && s->concealment_motion_vectors) {
- skip_bits1(&s->gb); /* marker */
- }
-
- if (mb_type & MB_PAT) {
- cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
- if (cbp < 0){
- fprintf(stderr, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
- return -1;
- }
- cbp++;
- }
- dprintf("cbp=%x\n", cbp);
+
+ s->mb_intra = 0;
- if (s->mpeg2) {
- if (s->mb_intra) {
- for(i=0;i<6;i++) {
- if (mpeg2_decode_block_intra(s, block[i], i) < 0)
- return -1;
+ if (IS_PAT(mb_type)) {
+ cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
+ if (cbp < 0){
+ fprintf(stderr, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
+ return -1;
}
- } else {
- for(i=0;i<6;i++) {
- if (cbp & 32) {
- if (mpeg2_decode_block_non_intra(s, block[i], i) < 0)
- return -1;
- } else {
- s->block_last_index[i] = -1;
+ cbp++;
+
+ if (s->mpeg2) {
+ for(i=0;i<6;i++) {
+ if (cbp & 32) {
+ if (mpeg2_decode_block_non_intra(s, block[i], i) < 0)
+ return -1;
+ } else {
+ s->block_last_index[i] = -1;
+ }
+ cbp+=cbp;
}
- cbp+=cbp;
- }
- }
- } else {
- if (s->mb_intra) {
- for(i=0;i<6;i++) {
- if (mpeg1_decode_block_intra(s, block[i], i) < 0)
- return -1;
- }
- }else{
- for(i=0;i<6;i++) {
- if (cbp & 32) {
- if (mpeg1_decode_block_inter(s, block[i], i) < 0)
- return -1;
- } else {
- s->block_last_index[i] = -1;
+ } else {
+ for(i=0;i<6;i++) {
+ if (cbp & 32) {
+ if (mpeg1_decode_block_inter(s, block[i], i) < 0)
+ return -1;
+ } else {
+ s->block_last_index[i] = -1;
+ }
+ cbp+=cbp;
}
- cbp+=cbp;
}
+ }else{
+ for(i=0;i<6;i++)
+ s->block_last_index[i] = -1;
}
}
+
+ s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
+
return 0;
}
@@ -1156,12 +1182,13 @@ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
int code, sign, val, m, l, shift;
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
- if (code < 0) {
- return 0xffff;
- }
if (code == 0) {
return pred;
}
+ if (code < 0) {
+ return 0xffff;
+ }
+
sign = get_bits1(&s->gb);
shift = fcode - 1;
val = (code - 1) << shift;
@@ -1173,7 +1200,7 @@ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
val += pred;
/* modulo decoding */
- l = (1 << shift) * 16;
+ l = 1 << (shift+4);
m = 2 * l;
if (val < -l) {
val += m;
@@ -1713,7 +1740,7 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s)
s->first_field=0;
else{
s->first_field ^= 1;
- memset(s->mbskip_table, 0, s->mb_width*s->mb_height);
+ memset(s->mbskip_table, 0, s->mb_stride*s->mb_height);
}
if(s->alternate_scan){
@@ -1766,6 +1793,7 @@ static void mpeg_decode_extension(AVCodecContext *avctx,
}
}
+#define DECODE_SLICE_MB_ADDR_ERROR -3 //we faild decoding the mb_x/y info
#define DECODE_SLICE_FATAL_ERROR -2
#define DECODE_SLICE_ERROR -1
#define DECODE_SLICE_OK 0
@@ -1791,16 +1819,20 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
start_code = (start_code - 1) & 0xff;
if (start_code >= s->mb_height){
fprintf(stderr, "slice below image (%d >= %d)\n", start_code, s->mb_height);
- return DECODE_SLICE_ERROR;
+ return DECODE_SLICE_MB_ADDR_ERROR;
}
ff_mpeg1_clean_buffers(s);
+ s->interlaced_dct = 0;
/* start frame decoding */
if (s->first_slice) {
if(s->first_field || s->picture_structure==PICT_FRAME){
if(MPV_frame_start(s, avctx) < 0)
return DECODE_SLICE_FATAL_ERROR;
+
+ ff_er_frame_start(s);
+
/* first check if we must repeat the frame */
s->current_picture.repeat_pict = 0;
@@ -1839,16 +1871,24 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
init_get_bits(&s->gb, buf, buf_size*8);
s->qscale = get_qscale(s);
+ if(s->qscale == 0){
+ fprintf(stderr, "qscale == 0\n");
+ return DECODE_SLICE_MB_ADDR_ERROR;
+ }
+
/* extra slice info */
while (get_bits1(&s->gb) != 0) {
skip_bits(&s->gb, 8);
}
-
+
s->mb_x=0;
+
for(;;) {
int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2);
- if (code < 0)
- return -1; /* error = end of slice, but empty slice is bad or?*/
+ if (code < 0){
+ fprintf(stderr, "first mb_incr damaged\n");
+ return DECODE_SLICE_MB_ADDR_ERROR;
+ }
if (code >= 33) {
if (code == 33) {
s->mb_x += 33;
@@ -1860,16 +1900,42 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
}
}
- s->mb_y = start_code;
+ s->resync_mb_x= s->mb_x;
+ s->resync_mb_y= s->mb_y = start_code;
s->mb_skip_run= 0;
for(;;) {
s->dsp.clear_blocks(s->block[0]);
-
+
ret = mpeg_decode_mb(s, s->block);
+
dprintf("ret=%d\n", ret);
if (ret < 0)
return -1;
+
+ if(s->motion_val && s->pict_type != B_TYPE){ //note motion_val is normally NULL unless we want to extract the MVs
+ const int wrap = s->block_wrap[0];
+ const int xy = s->mb_x*2 + 1 + (s->mb_y*2 +1)*wrap;
+ int motion_x, motion_y;
+
+ if (s->mb_intra || s->mv_type == MV_TYPE_16X16) {
+ motion_x = s->mv[0][0][0];
+ motion_y = s->mv[0][0][1];
+ } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
+ int i;
+ motion_x = s->mv[0][0][0] + s->mv[0][1][0];
+ motion_y = s->mv[0][0][1] + s->mv[0][1][1];
+ motion_x = (motion_x>>1) | (motion_x&1);
+ }
+ s->motion_val[xy][0] = motion_x;
+ s->motion_val[xy][1] = motion_y;
+ s->motion_val[xy + 1][0] = motion_x;
+ s->motion_val[xy + 1][1] = motion_y;
+ s->motion_val[xy + wrap][0] = motion_x;
+ s->motion_val[xy + wrap][1] = motion_y;
+ s->motion_val[xy + 1 + wrap][0] = motion_x;
+ s->motion_val[xy + 1 + wrap][1] = motion_y;
+ }
MPV_decode_mb(s, s->block);
@@ -1884,9 +1950,7 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
s->mb_x = 0;
s->mb_y++;
- PRINT_QP("%s", "\n");
}
- PRINT_QP("%2d", s->qscale);
/* skip mb handling */
if (s->mb_skip_run == -1) {
@@ -1894,8 +1958,14 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
s->mb_skip_run = 0;
for(;;) {
int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2);
- if (code < 0)
+ if (code < 0){
+ align_get_bits(&s->gb);
+ if(s->mb_skip_run != 0 || show_bits(&s->gb, 24) != 1){
+ fprintf(stderr, "slice end missmatch\n");
+ return -1;
+ }
goto eos; /* error = end of slice */
+ }
if (code >= 33) {
if (code == 33) {
s->mb_skip_run += 33;
@@ -1913,8 +1983,11 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
}
}
eos: //end of slice
-
+//printf("y %d %d %d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
+ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
+
emms_c();
+
//intf("%d %d %d %d\n", s->mb_y, s->mb_height, s->pict_type, s->picture_number);
/* end of slice reached */
if (s->mb_y<<field_pic == s->mb_height && !s->first_field) {
@@ -1922,11 +1995,13 @@ eos: //end of slice
if(s->mpeg2)
s->qscale >>=1;
+ ff_er_frame_end(s);
MPV_frame_end(s);
if (s->pict_type == B_TYPE || s->low_delay) {
*pict= *(AVFrame*)&s->current_picture;
+ ff_print_debug_info(s, s->current_picture_ptr);
} else {
s->picture_number++;
/* latency of 1 frame for I and P frames */
@@ -1935,6 +2010,7 @@ eos: //end of slice
return DECODE_SLICE_OK;
} else {
*pict= *(AVFrame*)&s->last_picture;
+ ff_print_debug_info(s, s->last_picture_ptr);
}
}
return DECODE_SLICE_EOP;
@@ -2194,7 +2270,10 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
if (ret == DECODE_SLICE_EOP) {
*data_size = sizeof(AVPicture);
goto the_end;
- }else if(ret<0){
+ }else if(ret < 0){
+ if(ret == DECODE_SLICE_ERROR)
+ ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
+
fprintf(stderr,"Error while decoding slice\n");
if(ret==DECODE_SLICE_FATAL_ERROR) return -1;
}
diff --git a/libavcodec/mpeg12data.h b/libavcodec/mpeg12data.h
index 16d607067f..2e7e58de5a 100644
--- a/libavcodec/mpeg12data.h
+++ b/libavcodec/mpeg12data.h
@@ -289,80 +289,57 @@ static const uint8_t mbPatTable[63][2] = {
{0xc, 6}
};
-#define MB_INTRA 0x01
-#define MB_PAT 0x02
-#define MB_BACK 0x04
-#define MB_FOR 0x08
-#define MB_QUANT 0x10
-
-static const uint8_t table_mb_ptype[32][2] = {
- { 0, 0 }, // 0x00
+#define MB_TYPE_PAT 0x40000000
+#define MB_TYPE_ZERO_MV 0x20000000
+#define IS_ZERO_MV(a) ((a)&MB_TYPE_ZERO_MV)
+#define IS_PAT(a) ((a)&MB_TYPE_PAT)
+
+static const uint8_t table_mb_ptype[7][2] = {
{ 3, 5 }, // 0x01 MB_INTRA
{ 1, 2 }, // 0x02 MB_PAT
- { 0, 0 }, // 0x03
- { 0, 0 }, // 0x04
- { 0, 0 }, // 0x05
- { 0, 0 }, // 0x06
- { 0, 0 }, // 0x07
{ 1, 3 }, // 0x08 MB_FOR
- { 0, 0 }, // 0x09
{ 1, 1 }, // 0x0A MB_FOR|MB_PAT
- { 0, 0 }, // 0x0B
- { 0, 0 }, // 0x0C
- { 0, 0 }, // 0x0D
- { 0, 0 }, // 0x0E
- { 0, 0 }, // 0x0F
- { 0, 0 }, // 0x10
{ 1, 6 }, // 0x11 MB_QUANT|MB_INTRA
{ 1, 5 }, // 0x12 MB_QUANT|MB_PAT
- { 0, 0 }, // 0x13
- { 0, 0 }, // 0x14
- { 0, 0 }, // 0x15
- { 0, 0 }, // 0x16
- { 0, 0 }, // 0x17
- { 0, 0 }, // 0x18
- { 0, 0 }, // 0x19
{ 2, 5 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT
- { 0, 0 }, // 0x1B
- { 0, 0 }, // 0x1C
- { 0, 0 }, // 0x1D
- { 0, 0 }, // 0x1E
- { 0, 0 }, // 0x1F
};
-static const uint8_t table_mb_btype[32][2] = {
- { 0, 0 }, // 0x00
+static const uint32_t ptype2mb_type[7] = {
+ MB_TYPE_INTRA,
+ MB_TYPE_L0 | MB_TYPE_PAT | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
+ MB_TYPE_L0,
+ MB_TYPE_L0 | MB_TYPE_PAT,
+ MB_TYPE_QUANT | MB_TYPE_INTRA,
+ MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
+ MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT,
+};
+
+static const uint8_t table_mb_btype[11][2] = {
{ 3, 5 }, // 0x01 MB_INTRA
- { 0, 0 }, // 0x02
- { 0, 0 }, // 0x03
{ 2, 3 }, // 0x04 MB_BACK
- { 0, 0 }, // 0x05
{ 3, 3 }, // 0x06 MB_BACK|MB_PAT
- { 0, 0 }, // 0x07
{ 2, 4 }, // 0x08 MB_FOR
- { 0, 0 }, // 0x09
{ 3, 4 }, // 0x0A MB_FOR|MB_PAT
- { 0, 0 }, // 0x0B
{ 2, 2 }, // 0x0C MB_FOR|MB_BACK
- { 0, 0 }, // 0x0D
{ 3, 2 }, // 0x0E MB_FOR|MB_BACK|MB_PAT
- { 0, 0 }, // 0x0F
- { 0, 0 }, // 0x10
{ 1, 6 }, // 0x11 MB_QUANT|MB_INTRA
- { 0, 0 }, // 0x12
- { 0, 0 }, // 0x13
- { 0, 0 }, // 0x14
- { 0, 0 }, // 0x15
{ 2, 6 }, // 0x16 MB_QUANT|MB_BACK|MB_PAT
- { 0, 0 }, // 0x17
- { 0, 0 }, // 0x18
- { 0, 0 }, // 0x19
{ 3, 6 }, // 0x1A MB_QUANT|MB_FOR|MB_PAT
- { 0, 0 }, // 0x1B
- { 0, 0 }, // 0x1C
- { 0, 0 }, // 0x1D
{ 2, 5 }, // 0x1E MB_QUANT|MB_FOR|MB_BACK|MB_PAT
- { 0, 0 }, // 0x1F
+};
+
+static const uint32_t btype2mb_type[11] = {
+ MB_TYPE_INTRA,
+ MB_TYPE_L1,
+ MB_TYPE_L1 | MB_TYPE_PAT,
+ MB_TYPE_L0,
+ MB_TYPE_L0 | MB_TYPE_PAT,
+ MB_TYPE_L0L1,
+ MB_TYPE_L0L1 | MB_TYPE_PAT,
+ MB_TYPE_QUANT | MB_TYPE_INTRA,
+ MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_PAT,
+ MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT,
+ MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_PAT,
};
static const uint8_t mbMotionVectorTable[17][2] = {
diff --git a/libavcodec/mpeg4data.h b/libavcodec/mpeg4data.h
index fbaca8d5e2..bc0d4530e1 100644
--- a/libavcodec/mpeg4data.h
+++ b/libavcodec/mpeg4data.h
@@ -27,10 +27,12 @@
#define MOTION_MARKER 0x1F001
#define DC_MARKER 0x6B001
-#define MB_TYPE_B_DIRECT 0
-#define MB_TYPE_B_BIDIR 1
-#define MB_TYPE_B_BACKW 2
-#define MB_TYPE_B_FORW 3
+const static int mb_type_b_map[4]= {
+ MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
+ MB_TYPE_L0L1 | MB_TYPE_16x16,
+ MB_TYPE_L1 | MB_TYPE_16x16,
+ MB_TYPE_L0 | MB_TYPE_16x16,
+};
#define VOS_STARTCODE 0x1B0
#define USER_DATA_STARTCODE 0x1B2
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index c915995c4e..6d7b35fbcc 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -136,16 +136,6 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16
}
#endif //CONFIG_ENCODERS
-// move into common.c perhaps
-#define CHECKED_ALLOCZ(p, size)\
-{\
- p= av_mallocz(size);\
- if(p==NULL){\
- perror("malloc");\
- goto fail;\
- }\
-}
-
void ff_init_scantable(MpegEncContext *s, ScanTable *st, const uint8_t *src_scantable){
int i;
int end;
@@ -227,9 +217,10 @@ int DCT_common_init(MpegEncContext *s)
* The pixels are allocated/set by calling get_buffer() if shared=0
*/
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
- const int big_mb_num= (s->mb_width+1)*(s->mb_height+1);
+ const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
+ const int mb_array_size= s->mb_stride*s->mb_height;
int i;
-
+
if(shared){
assert(pic->data[0]);
assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
@@ -262,23 +253,23 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
if(pic->qscale_table==NULL){
if (s->encoding) {
- CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(int16_t))
- CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(int16_t))
- CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(int8_t))
- CHECKED_ALLOCZ(pic->mb_cmp_score, s->mb_num * sizeof(int32_t))
+ CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
+ CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
+ CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
+ CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
}
- CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(uint8_t)+1) //the +1 is for the slice end check
- CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(uint8_t))
+ CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
+ CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
+ CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int))
+ pic->mb_type= pic->mb_type_base + s->mb_stride+1;
if(s->out_format == FMT_H264){
- CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint16_t))
- pic->mb_type= pic->mb_type_base + s->mb_width+2;
for(i=0; i<2; i++){
CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
}
}
- pic->qstride= s->mb_width;
+ pic->qstride= s->mb_stride;
}
//it might be nicer if the application would keep track of these but it would require a API change
@@ -334,7 +325,7 @@ static void free_picture(MpegEncContext *s, Picture *pic){
/* init common structure for both encoder and decoder */
int MPV_common_init(MpegEncContext *s)
{
- int y_size, c_size, yc_size, i;
+ int y_size, c_size, yc_size, i, mb_array_size, x, y;
dsputil_init(&s->dsp, s->avctx);
DCT_common_init(s);
@@ -343,12 +334,21 @@ int MPV_common_init(MpegEncContext *s)
s->mb_width = (s->width + 15) / 16;
s->mb_height = (s->height + 15) / 16;
+ s->mb_stride = s->mb_width + 1;
+ mb_array_size= s->mb_height * s->mb_stride;
/* set default edge pos, will be overriden in decode_header if needed */
s->h_edge_pos= s->mb_width*16;
s->v_edge_pos= s->mb_height*16;
s->mb_num = s->mb_width * s->mb_height;
+
+ s->block_wrap[0]=
+ s->block_wrap[1]=
+ s->block_wrap[2]=
+ s->block_wrap[3]= s->mb_width*2 + 2;
+ s->block_wrap[4]=
+ s->block_wrap[5]= s->mb_width + 2;
y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
c_size = (s->mb_width + 2) * (s->mb_height + 2);
@@ -365,16 +365,30 @@ int MPV_common_init(MpegEncContext *s)
s->avctx->coded_frame= (AVFrame*)&s->current_picture;
+ CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
+ for(y=0; y<s->mb_height; y++){
+ for(x=0; x<s->mb_width; x++){
+ s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
+ }
+ }
+ s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
+
if (s->encoding) {
- int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
+ int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
/* Allocate MV tables */
- CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(int16_t))
- CHECKED_ALLOCZ(s->b_forw_mv_table , mv_table_size * 2 * sizeof(int16_t))
- CHECKED_ALLOCZ(s->b_back_mv_table , mv_table_size * 2 * sizeof(int16_t))
- CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(int16_t))
- CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(int16_t))
- CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
+ s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
+ s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
+ s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
+ s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
+ s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
+ s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
//FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
@@ -391,14 +405,15 @@ int MPV_common_init(MpegEncContext *s)
CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
}
CHECKED_ALLOCZ(s->avctx->stats_out, 256);
+
+ /* Allocate MB type table */
+ CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding
}
- CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(uint8_t))
+ CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
if (s->out_format == FMT_H263 || s->encoding) {
int size;
- /* Allocate MB type table */
- CHECKED_ALLOCZ(s->mb_type , s->mb_num * sizeof(uint8_t))
/* MV prediction */
size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
@@ -407,12 +422,9 @@ int MPV_common_init(MpegEncContext *s)
if(s->codec_id==CODEC_ID_MPEG4){
/* interlaced direct mode decoding tables */
- CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(int16_t))
- CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(int8_t))
+ CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
}
- /* 4mv b frame decoding table */
- //note this is needed for h263 without b frames too (segfault on damaged streams otherwise)
- CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(uint8_t))
if (s->out_format == FMT_H263) {
/* ac values */
CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
@@ -426,8 +438,8 @@ int MPV_common_init(MpegEncContext *s)
CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
/* cbp, ac_pred, pred_dir */
- CHECKED_ALLOCZ(s->cbp_table , s->mb_num * sizeof(uint8_t))
- CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(uint8_t))
+ CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
+ CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
}
if (s->h263_pred || s->h263_plus || !s->encoding) {
@@ -441,14 +453,14 @@ int MPV_common_init(MpegEncContext *s)
}
/* which mb is a intra block */
- CHECKED_ALLOCZ(s->mbintra_table, s->mb_num);
- memset(s->mbintra_table, 1, s->mb_num);
+ CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
+ memset(s->mbintra_table, 1, mb_array_size);
/* default structure is frame */
s->picture_structure = PICT_FRAME;
/* init macroblock skip table */
- CHECKED_ALLOCZ(s->mbskip_table, s->mb_num+1);
+ CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
//Note the +1 is for a quicker mpeg4 slice_end detection
CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
@@ -472,12 +484,19 @@ void MPV_common_end(MpegEncContext *s)
int i;
av_freep(&s->mb_type);
- av_freep(&s->p_mv_table);
- av_freep(&s->b_forw_mv_table);
- av_freep(&s->b_back_mv_table);
- av_freep(&s->b_bidir_forw_mv_table);
- av_freep(&s->b_bidir_back_mv_table);
- av_freep(&s->b_direct_mv_table);
+ av_freep(&s->p_mv_table_base);
+ av_freep(&s->b_forw_mv_table_base);
+ av_freep(&s->b_back_mv_table_base);
+ av_freep(&s->b_bidir_forw_mv_table_base);
+ av_freep(&s->b_bidir_back_mv_table_base);
+ av_freep(&s->b_direct_mv_table_base);
+ s->p_mv_table= NULL;
+ s->b_forw_mv_table= NULL;
+ s->b_back_mv_table= NULL;
+ s->b_bidir_forw_mv_table= NULL;
+ s->b_bidir_back_mv_table= NULL;
+ s->b_direct_mv_table= NULL;
+
av_freep(&s->motion_val);
av_freep(&s->dc_val[0]);
av_freep(&s->ac_val[0]);
@@ -495,12 +514,12 @@ void MPV_common_end(MpegEncContext *s)
av_freep(&s->tex_pb_buffer);
av_freep(&s->pb2_buffer);
av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
- av_freep(&s->co_located_type_table);
av_freep(&s->field_mv_table);
av_freep(&s->field_select_table);
av_freep(&s->avctx->stats_out);
av_freep(&s->ac_stats);
av_freep(&s->error_status_table);
+ av_freep(&s->mb_index2xy);
for(i=0; i<MAX_PICTURE_COUNT; i++){
free_picture(s, &s->picture[i]);
@@ -925,6 +944,7 @@ alloc:
s->current_picture_ptr->pict_type= s->pict_type;
s->current_picture_ptr->quality= s->qscale;
+ s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
s->current_picture= *s->current_picture_ptr;
@@ -1000,28 +1020,13 @@ void MPV_frame_end(MpegEncContext *s)
}
assert(i<MAX_PICTURE_COUNT);
#endif
- s->current_picture_ptr->quality= s->qscale; //FIXME get average of qscale_table
- s->current_picture_ptr->pict_type= s->pict_type;
- s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
/* release non refernce frames */
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
}
- if(s->avctx->debug&FF_DEBUG_SKIP){
- int x,y;
- for(y=0; y<s->mb_height; y++){
- for(x=0; x<s->mb_width; x++){
- int count= s->mbskip_table[x + y*s->mb_width];
- if(count>9) count=9;
- printf(" %1d", count);
- }
- printf("\n");
- }
- printf("pict type: %d\n", s->pict_type);
- }
-
+
// clear copies, to avoid confusion
#if 0
memset(&s->last_picture, 0, sizeof(Picture));
@@ -1030,6 +1035,82 @@ void MPV_frame_end(MpegEncContext *s)
#endif
}
+/**
+ * prints debuging info for the given picture.
+ */
+void ff_print_debug_info(MpegEncContext *s, Picture *pict){
+
+ if(!pict || !pict->mb_type) return;
+
+ if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
+ int x,y;
+
+ for(y=0; y<s->mb_height; y++){
+ for(x=0; x<s->mb_width; x++){
+ if(s->avctx->debug&FF_DEBUG_SKIP){
+ int count= s->mbskip_table[x + y*s->mb_stride];
+ if(count>9) count=9;
+ printf("%1d", count);
+ }
+ if(s->avctx->debug&FF_DEBUG_QP){
+ printf("%2d", pict->qscale_table[x + y*s->mb_stride]);
+ }
+ if(s->avctx->debug&FF_DEBUG_MB_TYPE){
+ int mb_type= pict->mb_type[x + y*s->mb_stride];
+
+ //Type & MV direction
+ if(IS_PCM(mb_type))
+ printf("P");
+ else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
+ printf("A");
+ else if(IS_INTRA4x4(mb_type))
+ printf("i");
+ else if(IS_INTRA16x16(mb_type))
+ printf("I");
+ else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
+ printf("d");
+ else if(IS_DIRECT(mb_type))
+ printf("D");
+ else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
+ printf("g");
+ else if(IS_GMC(mb_type))
+ printf("G");
+ else if(IS_SKIP(mb_type))
+ printf("S");
+ else if(!USES_LIST(mb_type, 1))
+ printf(">");
+ else if(!USES_LIST(mb_type, 0))
+ printf("<");
+ else{
+ assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
+ printf("X");
+ }
+
+ //segmentation
+ if(IS_8X8(mb_type))
+ printf("+");
+ else if(IS_16X8(mb_type))
+ printf("-");
+ else if(IS_8X16(mb_type))
+ printf("¦");
+ else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
+ printf(" ");
+ else
+ printf("?");
+
+
+ if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
+ printf("=");
+ else
+ printf(" ");
+ }
+// printf(" ");
+ }
+ printf("\n");
+ }
+ }
+}
+
#ifdef CONFIG_ENCODERS
static int get_sae(uint8_t *src, int ref, int stride){
@@ -2007,7 +2088,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
- s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0;
+ s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
}
/* generic function called after a macroblock has been parsed by the
@@ -2023,7 +2104,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
{
int mb_x, mb_y;
- const int mb_xy = s->mb_y * s->mb_width + s->mb_x;
+ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
mb_x = s->mb_x;
mb_y = s->mb_y;
@@ -2049,33 +2130,26 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
//FIXME a lot of thet is only needed for !low_delay
const int wrap = s->block_wrap[0];
const int xy = s->block_index[0];
- const int mb_index= s->mb_x + s->mb_y*s->mb_width;
- if(s->mv_type == MV_TYPE_8X8){
- s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_4MV;
- } else {
+ if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y;
if (s->mb_intra) {
motion_x = 0;
motion_y = 0;
- if(s->co_located_type_table)
- s->co_located_type_table[mb_index]= 0;
} else if (s->mv_type == MV_TYPE_16X16) {
motion_x = s->mv[0][0][0];
motion_y = s->mv[0][0][1];
- if(s->co_located_type_table)
- s->co_located_type_table[mb_index]= 0;
} else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
int i;
motion_x = s->mv[0][0][0] + s->mv[0][1][0];
motion_y = s->mv[0][0][1] + s->mv[0][1][1];
motion_x = (motion_x>>1) | (motion_x&1);
for(i=0; i<2; i++){
- s->field_mv_table[mb_index][i][0]= s->mv[0][i][0];
- s->field_mv_table[mb_index][i][1]= s->mv[0][i][1];
- s->field_select_table[mb_index][i]= s->field_select[0][i];
+ s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0];
+ s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1];
+ s->field_select_table[mb_xy][i]= s->field_select[0][i];
}
- s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_FIELDMV;
}
+
/* no update if 8X8 because it has been done during parsing */
s->motion_val[xy][0] = motion_x;
s->motion_val[xy][1] = motion_y;
@@ -2086,6 +2160,13 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
s->motion_val[xy + 1 + wrap][0] = motion_x;
s->motion_val[xy + 1 + wrap][1] = motion_y;
}
+
+ if(s->encoding){ //FIXME encoding MUST be cleaned up
+ if (s->mv_type == MV_TYPE_8X8)
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
+ else
+ s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
+ }
}
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
@@ -2411,7 +2492,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
for(i=0; i<6; i++) skip_dct[i]=0;
if(s->adaptive_quant){
- s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_width] - s->qscale;
+ s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale;
if(s->out_format==FMT_H263){
if (s->dquant> 2) s->dquant= 2;
@@ -2562,7 +2643,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
}
/* pre quantization */
- if(s->current_picture.mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){
+ if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
//FIXME optimize
if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
@@ -2593,13 +2674,13 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
{
float adap_parm;
- adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) /
- ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
+ adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) /
+ ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
- (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P',
+ (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P',
s->qscale, adap_parm, s->qscale*adap_parm,
- s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var);
+ s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var);
}
#endif
/* DCT & quantize */
@@ -2837,13 +2918,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
}
s->picture_number = picture_number;
-
- s->block_wrap[0]=
- s->block_wrap[1]=
- s->block_wrap[2]=
- s->block_wrap[3]= s->mb_width*2 + 2;
- s->block_wrap[4]=
- s->block_wrap[5]= s->mb_width + 2;
/* Reset the average MB variance */
s->current_picture.mb_var_sum = 0;
@@ -2912,8 +2986,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
/* I-Frame */
//FIXME do we need to zero them?
memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
- memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_width+2)*(s->mb_height+2)*2);
- memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height);
+ memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
+ memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
if(!s->fixed_qscale){
/* finding spatial complexity for I-frame rate control */
@@ -2927,8 +3001,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
- s->current_picture.mb_var [s->mb_width * mb_y + mb_x] = varc;
- s->current_picture.mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8;
+ s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
+ s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
s->current_picture.mb_var_sum += varc;
}
}
@@ -2938,7 +3012,7 @@ static void encode_picture(MpegEncContext *s, int picture_number)
if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
s->pict_type= I_TYPE;
- memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height);
+ memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
}
@@ -3081,8 +3155,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
- int mb_type= s->mb_type[mb_y * s->mb_width + mb_x];
- const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1;
+ const int xy= mb_y*s->mb_stride + mb_x;
+ int mb_type= s->mb_type[xy];
// int d;
int dmin=10000000;
@@ -3235,7 +3309,7 @@ static void encode_picture(MpegEncContext *s, int picture_number)
&dmin, &next_block, 0, 0);
/* force cleaning of ac/dc pred stuff if needed ... */
if(s->h263_pred || s->h263_aic)
- s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
+ s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
}
copy_context_after_encode(s, &best_s, -1);
@@ -3259,16 +3333,16 @@ static void encode_picture(MpegEncContext *s, int picture_number)
} else {
int motion_x, motion_y;
int intra_score;
- int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_width];
+ int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
/* get luma score */
if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
- intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_width]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
+ intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
}else{
uint8_t *dest_y;
- int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_width]; //FIXME
+ int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
mean*= 0x01010101;
dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
@@ -3284,8 +3358,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
/* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8,
- s->current_picture.mb_var[mb_x + mb_y*s->mb_width],
- s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_width]);*/
+ s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
+ s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
}
/* get chroma score */
@@ -3442,7 +3516,7 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
w>>1, h>>1, s->uvlinesize);
}
-//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb));
+//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
}
}
emms_c();
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 9b0df2856b..dc15a51f89 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -137,31 +137,40 @@ typedef struct Picture{
int16_t (*motion_val[2])[2];
int8_t *ref_index[2];
- uint16_t *mb_type_base;
- uint16_t *mb_type; ///< mb_type_base + mb_width + 2
+ uint32_t *mb_type_base;
+ uint32_t *mb_type; ///< mb_type_base + mb_width + 2, note: only used for decoding currently
#define MB_TYPE_INTRA4x4 0x0001
-#define MB_TYPE_INTRA16x16 0x0002
-#define MB_TYPE_INTRA_PCM 0x0004
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME h264 specific
+#define MB_TYPE_INTRA_PCM 0x0004 //FIXME h264 specific
#define MB_TYPE_16x16 0x0008
#define MB_TYPE_16x8 0x0010
#define MB_TYPE_8x16 0x0020
#define MB_TYPE_8x8 0x0040
#define MB_TYPE_INTERLACED 0x0080
#define MB_TYPE_DIRECT2 0x0100 //FIXME
-#define MB_TYPE_REF0 0x0200
-#define MB_TYPE_GMC2 0x0400 //FIXME
+#define MB_TYPE_ACPRED 0x0200
+#define MB_TYPE_GMC 0x0400 //FIXME mpeg4 specific
+#define MB_TYPE_SKIP 0x0800
#define MB_TYPE_P0L0 0x1000
#define MB_TYPE_P1L0 0x2000
#define MB_TYPE_P0L1 0x4000
#define MB_TYPE_P1L1 0x8000
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT 0x00010000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 pat, ...)
#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
#define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16)
-#define IS_INTRA(a) ((a)&3)
+#define IS_PCM(a) ((a)&MB_TYPE_INTRA_PCM)
+#define IS_INTRA(a) ((a)&7)
#define IS_INTER(a) ((a)&(MB_TYPE_16x16|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8))
+#define IS_SKIP(a) ((a)&MB_TYPE_SKIP)
#define IS_INTRA_PCM(a) ((a)&MB_TYPE_INTRA_PCM)
#define IS_INTERLACED(a) ((a)&MB_TYPE_INTERLACED)
#define IS_DIRECT(a) ((a)&MB_TYPE_DIRECT2)
+#define IS_GMC(a) ((a)&MB_TYPE_GMC)
#define IS_16X16(a) ((a)&MB_TYPE_16x16)
#define IS_16X8(a) ((a)&MB_TYPE_16x8)
#define IS_8X16(a) ((a)&MB_TYPE_8x16)
@@ -170,7 +179,8 @@ typedef struct Picture{
#define IS_SUB_8X4(a) ((a)&MB_TYPE_16x8) //note reused
#define IS_SUB_4X8(a) ((a)&MB_TYPE_8x16) //note reused
#define IS_SUB_4X4(a) ((a)&MB_TYPE_8x8) //note reused
-#define IS_REF0(a) ((a)&MB_TYPE_REF0)
+#define IS_ACPRED(a) ((a)&MB_TYPE_ACPRED)
+#define IS_QUANT(a) ((a)&MB_TYPE_QUANT)
#define IS_DIR(a, part, list) ((a) & (MB_TYPE_P0L0<<((part)+2*(list))))
#define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0|MB_TYPE_P1L0)<<(2*(list)))) ///< does this mb use listX, note doesnt work if subMBs
@@ -283,6 +293,7 @@ typedef struct MpegEncContext {
int picture_in_gop_number; ///< 0-> first pic in gop, ...
int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input
int mb_width, mb_height; ///< number of MBs horizontally & vertically
+ int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressng of left & top MBs withoutt sig11
int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replicateion)
int mb_num; ///< number of MBs of a picture
int linesize; ///< line size, in bytes, may be different from width
@@ -355,7 +366,13 @@ typedef struct MpegEncContext {
DSPContext dsp; ///< pointers for accelerated dsp fucntions
int f_code; ///< forward MV resolution
int b_code; ///< backward MV resolution for B Frames (mpeg4)
- int16_t (*motion_val)[2]; ///< used for MV prediction (4MV per MB)
+ int16_t (*motion_val)[2];
+ int16_t (*p_mv_table_base)[2];
+ int16_t (*b_forw_mv_table_base)[2];
+ int16_t (*b_back_mv_table_base)[2];
+ int16_t (*b_bidir_forw_mv_table_base)[2];
+ int16_t (*b_bidir_back_mv_table_base)[2];
+ int16_t (*b_direct_mv_table_base)[2];
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding
@@ -396,12 +413,12 @@ typedef struct MpegEncContext {
int mb_x, mb_y;
int mb_skip_run;
int mb_intra;
- uint8_t *mb_type; ///< Table for MB type
+ uint8_t *mb_type; ///< Table for MB type FIXME remove and use picture->mb_type
#define MB_TYPE_INTRA 0x01
#define MB_TYPE_INTER 0x02
#define MB_TYPE_INTER4V 0x04
#define MB_TYPE_SKIPED 0x08
-#define MB_TYPE_GMC 0x10
+//#define MB_TYPE_GMC 0x10
#define MB_TYPE_DIRECT 0x10
#define MB_TYPE_FORWARD 0x20
@@ -410,6 +427,8 @@ typedef struct MpegEncContext {
int block_index[6]; ///< index to current MB in block based arrays with edges
int block_wrap[6];
+
+ int *mb_index2xy; ///< mb_index -> mb_x + mb_y*mb_stride
/** matrix transmitted in the bitstream */
uint16_t intra_matrix[64];
@@ -467,6 +486,7 @@ typedef struct MpegEncContext {
int last_bits; ///< temp var used for calculating the above vars
/* error concealment / resync */
+ int error_count;
uint8_t *error_status_table; ///< table of the error status of each MB
#define VP_START 1 ///< current MB is the first after a resync marker
#define AC_ERROR 2
@@ -544,9 +564,6 @@ typedef struct MpegEncContext {
uint8_t *tex_pb_buffer;
uint8_t *pb2_buffer;
int mpeg_quant;
-#define CO_LOCATED_TYPE_4MV 1
-#define CO_LOCATED_TYPE_FIELDMV 2
- int8_t *co_located_type_table; ///< 4mv & field_mv info for next b frame
int16_t (*field_mv_table)[2][2]; ///< used for interlaced b frame decoding
int8_t (*field_select_table)[2]; ///< wtf, no really another table for interlaced b frames
int t_frame; ///< time distance of first I -> B, used for interlaced b frames
@@ -693,6 +710,7 @@ void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int bloc
int src_x, int src_y, int w, int h);
char ff_get_pict_type_char(int pict_type);
int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size);
+void ff_print_debug_info(MpegEncContext *s, Picture *pict);
void ff_er_frame_start(MpegEncContext *s);
void ff_er_frame_end(MpegEncContext *s);
@@ -827,7 +845,7 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s);
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s);
int ff_h263_resync(MpegEncContext *s);
int ff_h263_get_gob_height(MpegEncContext *s);
-void ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my);
+int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my);
inline int ff_h263_round_chroma(int x);
diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c
index 9560aaf85d..817fdeaf3d 100644
--- a/libavcodec/msmpeg4.c
+++ b/libavcodec/msmpeg4.c
@@ -27,7 +27,6 @@
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
-//#define PRINT_MB
/*
* You can also call this codec : MPEG4 with a twist !
@@ -1585,13 +1584,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
{
int cbp, code, i;
uint8_t *coded_val;
-
-#ifdef PRINT_MB
-if(s->mb_x==0){
- printf("\n");
- if(s->mb_y==0) printf("\n");
-}
-#endif
+ uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
if (s->pict_type == P_TYPE) {
set_stat(ST_INTER_MB);
@@ -1606,9 +1599,8 @@ if(s->mb_x==0){
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skiped = 1;
-#ifdef PRINT_MB
-printf("S ");
-#endif
+ *mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+
return 0;
}
}
@@ -1654,16 +1646,12 @@ printf("S ");
s->mv_type = MV_TYPE_16X16;
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
-#ifdef PRINT_MB
-printf("P ");
-#endif
+ *mb_type_ptr = MB_TYPE_L0 | MB_TYPE_16x16;
} else {
//printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
set_stat(ST_INTRA_MB);
s->ac_pred = get_bits1(&s->gb);
-#ifdef PRINT_MB
-printf("%c", s->ac_pred ? 'A' : 'I');
-#endif
+ *mb_type_ptr = MB_TYPE_INTRA;
if(s->inter_intra_pred){
s->h263_aic_dir= get_vlc2(&s->gb, inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1);
// printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y);
@@ -1701,14 +1689,7 @@ static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
/* DC coef */
set_stat(ST_DC);
level = msmpeg4_decode_dc(s, n, &dc_pred_dir);
-#ifdef PRINT_MB
-{
- static int c;
- if(n==0) c=0;
- if(n==4) printf("%X", c);
- c+= c +dc_pred_dir;
-}
-#endif
+
if (level < 0){
fprintf(stderr, "dc overflow- block: %d qscale: %d//\n", n, s->qscale);
if(s->inter_intra_pred) level=0;
diff --git a/libavcodec/wmv2.c b/libavcodec/wmv2.c
index 2c15694212..1097b7c29c 100644
--- a/libavcodec/wmv2.c
+++ b/libavcodec/wmv2.c
@@ -267,20 +267,21 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
static void parse_mb_skip(Wmv2Context * w){
int mb_x, mb_y;
MpegEncContext * const s= &w->s;
+ uint32_t * const mb_type= s->current_picture_ptr->mb_type;
w->skip_type= get_bits(&s->gb, 2);
switch(w->skip_type){
case SKIP_TYPE_NONE:
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
- s->mb_type[mb_y*s->mb_width + mb_x]= 0;
+ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_16x16 | MB_TYPE_L0;
}
}
break;
case SKIP_TYPE_MPEG:
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
- s->mb_type[mb_y*s->mb_width + mb_x]= get_bits1(&s->gb) ? MB_TYPE_SKIPED : 0;
+ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
}
}
break;
@@ -288,11 +289,11 @@ static void parse_mb_skip(Wmv2Context * w){
for(mb_y=0; mb_y<s->mb_height; mb_y++){
if(get_bits1(&s->gb)){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
- s->mb_type[mb_y*s->mb_width + mb_x]= MB_TYPE_SKIPED;
+ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
}
}else{
for(mb_x=0; mb_x<s->mb_width; mb_x++){
- s->mb_type[mb_y*s->mb_width + mb_x]= get_bits1(&s->gb) ? MB_TYPE_SKIPED : 0;
+ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
}
}
}
@@ -301,11 +302,11 @@ static void parse_mb_skip(Wmv2Context * w){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
if(get_bits1(&s->gb)){
for(mb_y=0; mb_y<s->mb_height; mb_y++){
- s->mb_type[mb_y*s->mb_width + mb_x]= MB_TYPE_SKIPED;
+ mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
}
}else{
for(mb_y=0; mb_y<s->mb_height; mb_y++){
- s->mb_type[mb_y*s->mb_width + mb_x]= get_bits1(&s->gb) ? MB_TYPE_SKIPED : 0;
+ mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
}
}
}
@@ -455,12 +456,6 @@ return -1;
s->esc3_level_length= 0;
s->esc3_run_length= 0;
- if(s->avctx->debug&FF_DEBUG_SKIP){
- for(i=0; i<s->mb_num; i++){
- if(i%s->mb_width==0) printf("\n");
- printf("%d", s->mb_type[i]);
- }
- }
s->picture_number++; //FIXME ?
@@ -712,7 +707,7 @@ static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
if(w->j_type) return 0;
if (s->pict_type == P_TYPE) {
- if(s->mb_type[s->mb_y * s->mb_width + s->mb_x]&MB_TYPE_SKIPED){
+ if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
/* skip mb */
s->mb_intra = 0;
for(i=0;i<6;i++)