summaryrefslogtreecommitdiff
path: root/libavcodec/h264_slice.c
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2016-03-22 13:31:21 +0100
committerAnton Khirnov <anton@khirnov.net>2016-04-24 10:06:23 +0200
commit3176217c60ca7828712985092d9102d331ea4f3d (patch)
tree1124709788c4b1b3ec4da9cd8e204cc63039cc8f /libavcodec/h264_slice.c
parent44d16df413878588659dd8901bba016b5a869fd1 (diff)
h264: decouple h264_ps from the h264 decoder
Make the SPS/PPS parsing independent of the H264Context, to allow decoupling the parser from the decoder. The change is modelled after the one done earlier for HEVC. Move the dequant buffers to the PPS to avoid complex checks whether they changed and an expensive copy for frame threads.
Diffstat (limited to 'libavcodec/h264_slice.c')
-rw-r--r--libavcodec/h264_slice.c325
1 files changed, 116 insertions, 209 deletions
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 4b9c00c76b..e2617e2ad4 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -282,76 +282,6 @@ static int initialize_cur_frame(H264Context *h)
return 0;
}
-static void init_dequant8_coeff_table(H264Context *h)
-{
- int i, j, q, x;
- const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
-
- for (i = 0; i < 6; i++) {
- h->dequant8_coeff[i] = h->dequant8_buffer[i];
- for (j = 0; j < i; j++)
- if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
- 64 * sizeof(uint8_t))) {
- h->dequant8_coeff[i] = h->dequant8_buffer[j];
- break;
- }
- if (j < i)
- continue;
-
- for (q = 0; q < max_qp + 1; q++) {
- int shift = ff_h264_quant_div6[q];
- int idx = ff_h264_quant_rem6[q];
- for (x = 0; x < 64; x++)
- h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
- ((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
- h->pps.scaling_matrix8[i][x]) << shift;
- }
- }
-}
-
-static void init_dequant4_coeff_table(H264Context *h)
-{
- int i, j, q, x;
- const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
- for (i = 0; i < 6; i++) {
- h->dequant4_coeff[i] = h->dequant4_buffer[i];
- for (j = 0; j < i; j++)
- if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
- 16 * sizeof(uint8_t))) {
- h->dequant4_coeff[i] = h->dequant4_buffer[j];
- break;
- }
- if (j < i)
- continue;
-
- for (q = 0; q < max_qp + 1; q++) {
- int shift = ff_h264_quant_div6[q] + 2;
- int idx = ff_h264_quant_rem6[q];
- for (x = 0; x < 16; x++)
- h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
- ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
- h->pps.scaling_matrix4[i][x]) << shift;
- }
- }
-}
-
-void ff_h264_init_dequant_tables(H264Context *h)
-{
- int i, x;
- init_dequant4_coeff_table(h);
- if (h->pps.transform_8x8_mode)
- init_dequant8_coeff_table(h);
- if (h->sps.transform_bypass) {
- for (i = 0; i < 6; i++)
- for (x = 0; x < 16; x++)
- h->dequant4_coeff[i][0][x] = 1 << 6;
- if (h->pps.transform_8x8_mode)
- for (i = 0; i < 6; i++)
- for (x = 0; x < 64; x++)
- h->dequant8_coeff[i][0][x] = 1 << 6;
- }
-}
-
#define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \
@@ -374,26 +304,6 @@ static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
}
}
-static int copy_parameter_set(void **to, void **from, int count, int size)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- if (to[i] && !from[i]) {
- av_freep(&to[i]);
- } else if (from[i] && !to[i]) {
- to[i] = av_malloc(size);
- if (!to[i])
- return AVERROR(ENOMEM);
- }
-
- if (from[i])
- memcpy(to[i], from[i], size);
- }
-
- return 0;
-}
-
#define copy_fields(to, from, start_field, end_field) \
memcpy(&to->start_field, &from->start_field, \
(char *)&to->end_field - (char *)&to->start_field)
@@ -411,28 +321,40 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
if (dst == src || !h1->context_initialized)
return 0;
+ if (!h1->ps.sps)
+ return AVERROR_INVALIDDATA;
+
if (inited &&
(h->width != h1->width ||
h->height != h1->height ||
h->mb_width != h1->mb_width ||
h->mb_height != h1->mb_height ||
- h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
- h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
- h->sps.colorspace != h1->sps.colorspace)) {
+ !h->ps.sps ||
+ h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
+ h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
+ h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
need_reinit = 1;
}
// SPS/PPS
- if ((ret = copy_parameter_set((void **)h->sps_buffers,
- (void **)h1->sps_buffers,
- MAX_SPS_COUNT, sizeof(SPS))) < 0)
- return ret;
- h->sps = h1->sps;
- if ((ret = copy_parameter_set((void **)h->pps_buffers,
- (void **)h1->pps_buffers,
- MAX_PPS_COUNT, sizeof(PPS))) < 0)
- return ret;
- h->pps = h1->pps;
+ for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
+ av_buffer_unref(&h->ps.sps_list[i]);
+ if (h1->ps.sps_list[i]) {
+ h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
+ if (!h->ps.sps_list[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+ for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
+ av_buffer_unref(&h->ps.pps_list[i]);
+ if (h1->ps.pps_list[i]) {
+ h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
+ if (!h->ps.pps_list[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ h->ps.sps = h1->ps.sps;
if (need_reinit || !inited) {
h->width = h1->width;
@@ -486,20 +408,6 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
h->is_avc = h1->is_avc;
h->nal_length_size = h1->nal_length_size;
- // Dequantization matrices
- // FIXME these are big - can they be only copied when PPS changes?
- copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
-
- for (i = 0; i < 6; i++)
- h->dequant4_coeff[i] = h->dequant4_buffer[0] +
- (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
-
- for (i = 0; i < 6; i++)
- h->dequant8_coeff[i] = h->dequant8_buffer[0] +
- (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
-
- h->dequant_coeff_pps = h1->dequant_coeff_pps;
-
// POC timing
copy_fields(h, h1, poc_lsb, current_slice);
@@ -766,7 +674,7 @@ static void init_scan_tables(H264Context *h)
h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
#undef TRANSPOSE
}
- if (h->sps.transform_bypass) { // FIXME same ugly
+ if (h->ps.sps->transform_bypass) { // FIXME same ugly
h->zigzag_scan_q0 = ff_zigzag_scan;
h->zigzag_scan8x8_q0 = ff_zigzag_direct;
h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
@@ -793,7 +701,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
const enum AVPixelFormat *choices = pix_fmts;
- switch (h->sps.bit_depth_luma) {
+ switch (h->ps.sps->bit_depth_luma) {
case 9:
if (CHROMA444(h)) {
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
@@ -856,7 +764,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
break;
default:
av_log(h->avctx, AV_LOG_ERROR,
- "Unsupported bit depth %d\n", h->sps.bit_depth_luma);
+ "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
return AVERROR_INVALIDDATA;
}
@@ -868,8 +776,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
/* export coded and cropped frame dimensions to AVCodecContext */
static int init_dimensions(H264Context *h)
{
- int width = h->width - (h->sps.crop_right + h->sps.crop_left);
- int height = h->height - (h->sps.crop_top + h->sps.crop_bottom);
+ SPS *sps = h->ps.sps;
+ int width = h->width - (sps->crop_right + sps->crop_left);
+ int height = h->height - (sps->crop_top + sps->crop_bottom);
/* handle container cropping */
if (FFALIGN(h->avctx->width, 16) == FFALIGN(width, 16) &&
@@ -885,11 +794,11 @@ static int init_dimensions(H264Context *h)
return AVERROR_INVALIDDATA;
av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n");
- h->sps.crop_bottom =
- h->sps.crop_top =
- h->sps.crop_right =
- h->sps.crop_left =
- h->sps.crop = 0;
+ sps->crop_bottom =
+ sps->crop_top =
+ sps->crop_right =
+ sps->crop_left =
+ sps->crop = 0;
width = h->width;
height = h->height;
@@ -905,21 +814,22 @@ static int init_dimensions(H264Context *h)
static int h264_slice_header_init(H264Context *h)
{
+ const SPS *sps = h->ps.sps;
int nb_slices = (HAVE_THREADS &&
h->avctx->active_thread_type & FF_THREAD_SLICE) ?
h->avctx->thread_count : 1;
int i, ret;
- ff_set_sar(h->avctx, h->sps.sar);
+ ff_set_sar(h->avctx, sps->sar);
av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
&h->chroma_x_shift, &h->chroma_y_shift);
- if (h->sps.timing_info_present_flag) {
- int64_t den = h->sps.time_scale;
+ if (sps->timing_info_present_flag) {
+ int64_t den = sps->time_scale;
if (h->x264_build < 44U)
den *= 2;
av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
- h->sps.num_units_in_tick, den, 1 << 30);
+ sps->num_units_in_tick, den, 1 << 30);
}
ff_h264_free_tables(h);
@@ -934,24 +844,24 @@ static int h264_slice_header_init(H264Context *h)
return ret;
}
- if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 10) {
+ if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 10) {
av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
- h->sps.bit_depth_luma);
+ sps->bit_depth_luma);
return AVERROR_INVALIDDATA;
}
- h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
- h->pixel_shift = h->sps.bit_depth_luma > 8;
- h->chroma_format_idc = h->sps.chroma_format_idc;
- h->bit_depth_luma = h->sps.bit_depth_luma;
+ h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
+ h->pixel_shift = sps->bit_depth_luma > 8;
+ h->chroma_format_idc = sps->chroma_format_idc;
+ h->bit_depth_luma = sps->bit_depth_luma;
- ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
- h->sps.chroma_format_idc);
- ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma);
- ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma);
- ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma,
- h->sps.chroma_format_idc);
- ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma);
+ ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
+ sps->chroma_format_idc);
+ ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
+ ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
+ ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
+ sps->chroma_format_idc);
+ ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
if (nb_slices > H264_MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
int max_slices;
@@ -1002,6 +912,8 @@ static int h264_slice_header_init(H264Context *h)
*/
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
{
+ const SPS *sps;
+ const PPS *pps;
unsigned int first_mb_in_slice;
unsigned int pps_id;
int ret;
@@ -1064,39 +976,36 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
return AVERROR_INVALIDDATA;
}
- if (!h->pps_buffers[pps_id]) {
+ if (!h->ps.pps_list[pps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing PPS %u referenced\n",
pps_id);
return AVERROR_INVALIDDATA;
}
if (!h->setup_finished) {
- h->pps = *h->pps_buffers[pps_id];
- } else if (h->dequant_coeff_pps != pps_id) {
+ h->ps.pps = (const PPS*)h->ps.pps_list[pps_id]->data;
+ } else if (h->ps.pps != (const PPS*)h->ps.pps_list[pps_id]->data) {
av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
return AVERROR_INVALIDDATA;
}
- if (!h->sps_buffers[h->pps.sps_id]) {
+ if (!h->ps.sps_list[h->ps.pps->sps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing SPS %u referenced\n",
- h->pps.sps_id);
+ h->ps.pps->sps_id);
return AVERROR_INVALIDDATA;
}
- if (h->pps.sps_id != h->sps.sps_id ||
- h->sps_buffers[h->pps.sps_id]->new) {
- h->sps_buffers[h->pps.sps_id]->new = 0;
-
- h->sps = *h->sps_buffers[h->pps.sps_id];
+ if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
+ h->ps.sps = (SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data;
- if (h->bit_depth_luma != h->sps.bit_depth_luma ||
- h->chroma_format_idc != h->sps.chroma_format_idc)
+ if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
+ h->chroma_format_idc != h->ps.sps->chroma_format_idc)
needs_reinit = 1;
if (h->flags & AV_CODEC_FLAG_LOW_DELAY ||
- (h->sps.bitstream_restriction_flag &&
- !h->sps.num_reorder_frames)) {
+ (h->ps.sps->bitstream_restriction_flag &&
+ !h->ps.sps->num_reorder_frames)) {
if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
"Reenabling low delay requires a codec flush.\n");
@@ -1109,23 +1018,26 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
}
+ pps = h->ps.pps;
+ sps = h->ps.sps;
+
if (!h->setup_finished) {
- h->avctx->profile = ff_h264_get_profile(&h->sps);
- h->avctx->level = h->sps.level_idc;
- h->avctx->refs = h->sps.ref_frame_count;
+ h->avctx->profile = ff_h264_get_profile(sps);
+ h->avctx->level = sps->level_idc;
+ h->avctx->refs = sps->ref_frame_count;
- if (h->mb_width != h->sps.mb_width ||
- h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
+ if (h->mb_width != sps->mb_width ||
+ h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag))
needs_reinit = 1;
- h->mb_width = h->sps.mb_width;
- h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
+ h->mb_width = sps->mb_width;
+ h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag);
h->mb_num = h->mb_width * h->mb_height;
h->mb_stride = h->mb_width + 1;
h->b_stride = h->mb_width * 4;
- h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
+ h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
h->width = 16 * h->mb_width;
h->height = 16 * h->mb_height;
@@ -1134,15 +1046,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
if (ret < 0)
return ret;
- if (h->sps.video_signal_type_present_flag) {
- h->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG
+ if (sps->video_signal_type_present_flag) {
+ h->avctx->color_range = sps->full_range ? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
- if (h->sps.colour_description_present_flag) {
- if (h->avctx->colorspace != h->sps.colorspace)
+ if (sps->colour_description_present_flag) {
+ if (h->avctx->colorspace != sps->colorspace)
needs_reinit = 1;
- h->avctx->color_primaries = h->sps.color_primaries;
- h->avctx->color_trc = h->sps.color_trc;
- h->avctx->colorspace = h->sps.colorspace;
+ h->avctx->color_primaries = sps->color_primaries;
+ h->avctx->color_trc = sps->color_trc;
+ h->avctx->colorspace = sps->colorspace;
}
}
}
@@ -1192,12 +1104,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
}
}
- if (sl == h->slice_ctx && h->dequant_coeff_pps != pps_id) {
- h->dequant_coeff_pps = pps_id;
- ff_h264_init_dequant_tables(h);
- }
-
- frame_num = get_bits(&sl->gb, h->sps.log2_max_frame_num);
+ frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
if (!h->setup_finished)
h->frame_num = frame_num;
@@ -1207,7 +1114,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
last_pic_droppable = h->droppable;
droppable = h->nal_ref_idc == 0;
- if (h->sps.frame_mbs_only_flag) {
+ if (sps->frame_mbs_only_flag) {
picture_structure = PICT_FRAME;
} else {
field_pic_flag = get_bits1(&sl->gb);
@@ -1216,7 +1123,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
picture_structure = PICT_TOP_FIELD + bottom_field_flag;
} else {
picture_structure = PICT_FRAME;
- mb_aff_frame = h->sps.mb_aff;
+ mb_aff_frame = sps->mb_aff;
}
}
if (!h->setup_finished) {
@@ -1244,13 +1151,13 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
* frames just to throw them away */
if (h->frame_num != h->prev_frame_num) {
int unwrap_prev_frame_num = h->prev_frame_num;
- int max_frame_num = 1 << h->sps.log2_max_frame_num;
+ int max_frame_num = 1 << sps->log2_max_frame_num;
if (unwrap_prev_frame_num > h->frame_num)
unwrap_prev_frame_num -= max_frame_num;
- if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
- unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
+ if ((h->frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
+ unwrap_prev_frame_num = (h->frame_num - sps->ref_frame_count) - 1;
if (unwrap_prev_frame_num < 0)
unwrap_prev_frame_num += max_frame_num;
@@ -1310,7 +1217,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
}
while (h->frame_num != h->prev_frame_num &&
- h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
+ h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
h->frame_num, h->prev_frame_num);
@@ -1321,7 +1228,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
}
h->prev_frame_num++;
- h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
+ h->prev_frame_num %= 1 << sps->log2_max_frame_num;
h->cur_pic_ptr->frame_num = h->prev_frame_num;
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
@@ -1412,35 +1319,35 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
if (h->picture_structure == PICT_FRAME) {
h->curr_pic_num = h->frame_num;
- h->max_pic_num = 1 << h->sps.log2_max_frame_num;
+ h->max_pic_num = 1 << sps->log2_max_frame_num;
} else {
h->curr_pic_num = 2 * h->frame_num + 1;
- h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
+ h->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
}
if (h->nal_unit_type == NAL_IDR_SLICE)
get_ue_golomb(&sl->gb); /* idr_pic_id */
- if (h->sps.poc_type == 0) {
- int poc_lsb = get_bits(&sl->gb, h->sps.log2_max_poc_lsb);
+ if (sps->poc_type == 0) {
+ int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
if (!h->setup_finished)
h->poc_lsb = poc_lsb;
- if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
+ if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
int delta_poc_bottom = get_se_golomb(&sl->gb);
if (!h->setup_finished)
h->delta_poc_bottom = delta_poc_bottom;
}
}
- if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
+ if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
int delta_poc = get_se_golomb(&sl->gb);
if (!h->setup_finished)
h->delta_poc[0] = delta_poc;
- if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
+ if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
delta_poc = get_se_golomb(&sl->gb);
if (!h->setup_finished)
@@ -1451,14 +1358,14 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
if (!h->setup_finished)
ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc);
- if (h->pps.redundant_pic_cnt_present)
+ if (pps->redundant_pic_cnt_present)
sl->redundant_pic_count = get_ue_golomb(&sl->gb);
if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count,
- &sl->gb, &h->pps, sl->slice_type_nos,
+ &sl->gb, pps, sl->slice_type_nos,
h->picture_structure);
if (ret < 0)
return ret;
@@ -1471,12 +1378,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
}
}
- if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
- (h->pps.weighted_bipred_idc == 1 &&
+ if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
+ (pps->weighted_bipred_idc == 1 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B))
- ff_h264_pred_weight_table(&sl->gb, &h->sps, sl->ref_count,
+ ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count,
sl->slice_type_nos, &sl->pwt);
- else if (h->pps.weighted_bipred_idc == 2 &&
+ else if (pps->weighted_bipred_idc == 2 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, -1);
} else {
@@ -1503,7 +1410,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
if (FRAME_MBAFF(h)) {
ff_h264_fill_mbaff_ref_list(h, sl);
- if (h->pps.weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) {
+ if (pps->weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) {
implicit_weight_table(h, sl, 0);
implicit_weight_table(h, sl, 1);
}
@@ -1513,7 +1420,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
ff_h264_direct_dist_scale_factor(h, sl);
ff_h264_direct_ref_list_init(h, sl);
- if (sl->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
+ if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
tmp = get_ue_golomb_31(&sl->gb);
if (tmp > 2) {
av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
@@ -1523,8 +1430,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
}
sl->last_qscale_diff = 0;
- tmp = h->pps.init_qp + get_se_golomb(&sl->gb);
- if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
+ tmp = pps->init_qp + get_se_golomb(&sl->gb);
+ if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
return AVERROR_INVALIDDATA;
}
@@ -1541,7 +1448,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
sl->deblocking_filter = 1;
sl->slice_alpha_c0_offset = 0;
sl->slice_beta_offset = 0;
- if (h->pps.deblocking_filter_parameters_present) {
+ if (pps->deblocking_filter_parameters_present) {
tmp = get_ue_golomb_31(&sl->gb);
if (tmp > 2) {
av_log(h->avctx, AV_LOG_ERROR,
@@ -1598,9 +1505,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
sl->qp_thresh = 15 -
FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
FFMAX3(0,
- h->pps.chroma_qp_index_offset[0],
- h->pps.chroma_qp_index_offset[1]) +
- 6 * (h->sps.bit_depth_luma - 8);
+ pps->chroma_qp_index_offset[0],
+ pps->chroma_qp_index_offset[1]) +
+ 6 * (sps->bit_depth_luma - 8);
sl->slice_num = ++h->current_slice;
if (sl->slice_num >= MAX_SLICES) {
@@ -1870,7 +1777,7 @@ static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb
/* CAVLC 8x8dct requires NNZ values for residual decoding that differ
* from what the loop filter needs */
- if (!CABAC(h) && h->pps.transform_8x8_mode) {
+ if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
if (IS_8x8DCT(top_type)) {
nnz_cache[4 + 8 * 0] =
nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
@@ -2060,7 +1967,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
avctx->codec_id != AV_CODEC_ID_H264 ||
(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
- if (h->pps.cabac) {
+ if (h->ps.pps->cabac) {
/* realign */
align_get_bits(&sl->gb);