summaryrefslogtreecommitdiff
path: root/libavcodec/hapdec.c
diff options
context:
space:
mode:
authorTom Butterworth <bangnoise@gmail.com>2015-07-23 14:39:29 -0400
committerVittorio Giovara <vittorio.giovara@gmail.com>2015-07-27 16:00:22 +0100
commit3ee217853a6741b829a2683f49c590618891b1ab (patch)
tree2a3ef630b8bf1bede5cf73a30e64f29b22a5555b /libavcodec/hapdec.c
parent43dd004747fa697396b47d034a80e069facbea09 (diff)
Support the Hap chunked frame format
Signed-off-by: Vittorio Giovara <vittorio.giovara@gmail.com>
Diffstat (limited to 'libavcodec/hapdec.c')
-rw-r--r--libavcodec/hapdec.c273
1 files changed, 214 insertions, 59 deletions
diff --git a/libavcodec/hapdec.c b/libavcodec/hapdec.c
index 46dda0400d..8f5365b269 100644
--- a/libavcodec/hapdec.c
+++ b/libavcodec/hapdec.c
@@ -1,6 +1,7 @@
/*
* Vidvox Hap decoder
* Copyright (C) 2015 Vittorio Giovara <vittorio.giovara@gmail.com>
+ * Copyright (C) 2015 Tom Butterworth <bangnoise@gmail.com>
*
* This file is part of Libav.
*
@@ -36,6 +37,7 @@
#include "bytestream.h"
#include "hap.h"
#include "internal.h"
+#include "memory.h"
#include "snappy.h"
#include "texturedsp.h"
#include "thread.h"
@@ -43,85 +45,224 @@
/* The first three bytes are the size of the section past the header, or zero
* if the length is stored in the next long word. The fourth byte in the first
* long word indicates the type of the current section. */
-static int parse_section_header(AVCodecContext *avctx)
+static int parse_section_header(GetByteContext *gbc, int *section_size,
+ enum HapSectionType *section_type)
{
- HapContext *ctx = avctx->priv_data;
- GetByteContext *gbc = &ctx->gbc;
- int length;
-
if (bytestream2_get_bytes_left(gbc) < 4)
return AVERROR_INVALIDDATA;
- length = bytestream2_get_le24(gbc);
+ *section_size = bytestream2_get_le24(gbc);
+ *section_type = bytestream2_get_byte(gbc);
- ctx->section_type = bytestream2_get_byte(gbc);
-
- if (length == 0) {
+ if (*section_size == 0) {
if (bytestream2_get_bytes_left(gbc) < 4)
return AVERROR_INVALIDDATA;
- length = bytestream2_get_le32(gbc);
+
+ *section_size = bytestream2_get_le32(gbc);
}
- if (length > bytestream2_get_bytes_left(gbc) || length == 0)
+ if (*section_size > bytestream2_get_bytes_left(gbc))
return AVERROR_INVALIDDATA;
+ else
+ return 0;
+}
- return length;
+static int hap_parse_decode_instructions(HapContext *ctx, int size)
+{
+ GetByteContext *gbc = &ctx->gbc;
+ int section_size;
+ enum HapSectionType section_type;
+ int is_first_table = 1, had_offsets = 0, had_compressors = 0, had_sizes = 0;
+ int i, ret;
+
+ while (size > 0) {
+ int stream_remaining = bytestream2_get_bytes_left(gbc);
+ ret = parse_section_header(gbc, &section_size, &section_type);
+ if (ret != 0)
+ return ret;
+
+ size -= stream_remaining - bytestream2_get_bytes_left(gbc);
+
+ switch (section_type) {
+ case HAP_ST_COMPRESSOR_TABLE:
+ ret = ff_hap_set_chunk_count(ctx, section_size, is_first_table);
+ if (ret != 0)
+ return ret;
+ for (i = 0; i < section_size; i++) {
+ ctx->chunks[i].compressor = bytestream2_get_byte(gbc) << 4;
+ }
+ had_compressors = 1;
+ is_first_table = 0;
+ break;
+ case HAP_ST_SIZE_TABLE:
+ ret = ff_hap_set_chunk_count(ctx, section_size / 4, is_first_table);
+ if (ret != 0)
+ return ret;
+ for (i = 0; i < section_size / 4; i++) {
+ ctx->chunks[i].compressed_size = bytestream2_get_le32(gbc);
+ }
+ had_sizes = 1;
+ is_first_table = 0;
+ break;
+ case HAP_ST_OFFSET_TABLE:
+ ret = ff_hap_set_chunk_count(ctx, section_size / 4, is_first_table);
+ if (ret != 0)
+ return ret;
+ for (i = 0; i < section_size / 4; i++) {
+ ctx->chunks[i].compressed_offset = bytestream2_get_le32(gbc);
+ }
+ had_offsets = 1;
+ is_first_table = 0;
+ break;
+ default:
+ break;
+ }
+ size -= section_size;
+ }
+
+ if (!had_sizes || !had_compressors)
+ return AVERROR_INVALIDDATA;
+
+ /* The offsets table is optional. If not present than calculate offsets by
+ * summing the sizes of preceding chunks. */
+ if (!had_offsets) {
+ size_t running_size = 0;
+ for (i = 0; i < ctx->chunk_count; i++) {
+ ctx->chunks[i].compressed_offset = running_size;
+ running_size += ctx->chunks[i].compressed_size;
+ }
+ }
+
+ return 0;
+}
+
+static int hap_can_use_tex_in_place(HapContext *ctx)
+{
+ int i;
+ size_t running_offset = 0;
+ for (i = 0; i < ctx->chunk_count; i++) {
+ if (ctx->chunks[i].compressed_offset != running_offset
+ || ctx->chunks[i].compressor != HAP_COMP_NONE)
+ return 0;
+ running_offset += ctx->chunks[i].compressed_size;
+ }
+ return 1;
}
-/* Prepare the texture to be decompressed */
-static int setup_texture(AVCodecContext *avctx, size_t length)
+static int hap_parse_frame_header(AVCodecContext *avctx)
{
HapContext *ctx = avctx->priv_data;
GetByteContext *gbc = &ctx->gbc;
- int64_t snappy_size;
+ int section_size;
+ enum HapSectionType section_type;
const char *compressorstr;
- int ret;
+ int i, ret;
+
+ ret = parse_section_header(gbc, &section_size, &section_type);
+ if (ret != 0)
+ return ret;
- if ((avctx->codec_tag == MKTAG('H','a','p','1') && (ctx->section_type & 0x0F) != HAP_FMT_RGBDXT1) ||
- (avctx->codec_tag == MKTAG('H','a','p','5') && (ctx->section_type & 0x0F) != HAP_FMT_RGBADXT5) ||
- (avctx->codec_tag == MKTAG('H','a','p','Y') && (ctx->section_type & 0x0F) != HAP_FMT_YCOCGDXT5)) {
+ if ((avctx->codec_tag == MKTAG('H','a','p','1') && (section_type & 0x0F) != HAP_FMT_RGBDXT1) ||
+ (avctx->codec_tag == MKTAG('H','a','p','5') && (section_type & 0x0F) != HAP_FMT_RGBADXT5) ||
+ (avctx->codec_tag == MKTAG('H','a','p','Y') && (section_type & 0x0F) != HAP_FMT_YCOCGDXT5)) {
av_log(avctx, AV_LOG_ERROR,
- "Invalid texture format %#04x.\n", ctx->section_type & 0x0F);
+ "Invalid texture format %#04x.\n", section_type & 0x0F);
return AVERROR_INVALIDDATA;
}
- switch (ctx->section_type & 0xF0) {
- case HAP_COMP_NONE:
- /* Only DXTC texture compression */
- ctx->tex_data = gbc->buffer;
- ctx->tex_size = length;
- compressorstr = "none";
- break;
- case HAP_COMP_SNAPPY:
- snappy_size = ff_snappy_peek_uncompressed_length(gbc);
- ret = av_reallocp(&ctx->snappied, snappy_size);
- if (ret < 0) {
- return ret;
+ switch (section_type & 0xF0) {
+ case HAP_COMP_NONE:
+ case HAP_COMP_SNAPPY:
+ ret = ff_hap_set_chunk_count(ctx, 1, 1);
+ if (ret == 0) {
+ ctx->chunks[0].compressor = section_type & 0xF0;
+ ctx->chunks[0].compressed_offset = 0;
+ ctx->chunks[0].compressed_size = section_size;
+ }
+ if (ctx->chunks[0].compressor == HAP_COMP_NONE) {
+ compressorstr = "none";
+ } else {
+ compressorstr = "snappy";
+ }
+ break;
+ case HAP_COMP_COMPLEX:
+ ret = parse_section_header(gbc, &section_size, &section_type);
+ if (ret == 0 && section_type != HAP_ST_DECODE_INSTRUCTIONS)
+ ret = AVERROR_INVALIDDATA;
+ if (ret == 0)
+ ret = hap_parse_decode_instructions(ctx, section_size);
+ compressorstr = "complex";
+ break;
+ default:
+ ret = AVERROR_INVALIDDATA;
+ break;
+ }
+
+ if (ret != 0)
+ return ret;
+
+ /* Check the frame is valid and read the uncompressed chunk sizes */
+ ctx->tex_size = 0;
+ for (i = 0; i < ctx->chunk_count; i++) {
+ HapChunk *chunk = &ctx->chunks[i];
+
+ /* Check the compressed buffer is valid */
+ if (chunk->compressed_offset + chunk->compressed_size > bytestream2_get_bytes_left(gbc))
+ return AVERROR_INVALIDDATA;
+
+ /* Chunks are unpacked sequentially, ctx->tex_size is the uncompressed
+ * size thus far */
+ chunk->uncompressed_offset = ctx->tex_size;
+
+ /* Fill out uncompressed size */
+ if (chunk->compressor == HAP_COMP_SNAPPY) {
+ GetByteContext gbc_tmp;
+ int64_t uncompressed_size;
+ bytestream2_init(&gbc_tmp, gbc->buffer + chunk->compressed_offset,
+ chunk->compressed_size);
+ uncompressed_size = ff_snappy_peek_uncompressed_length(&gbc_tmp);
+ if (uncompressed_size < 0) {
+ return uncompressed_size;
+ }
+ chunk->uncompressed_size = uncompressed_size;
+ } else if (chunk->compressor == HAP_COMP_NONE) {
+ chunk->uncompressed_size = chunk->compressed_size;
+ } else {
+ return AVERROR_INVALIDDATA;
}
+ ctx->tex_size += chunk->uncompressed_size;
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "%s compressor\n", compressorstr);
+
+ return ret;
+}
+
+static int decompress_chunks_thread(AVCodecContext *avctx, void *arg,
+ int chunk_nb, int thread_nb)
+{
+ HapContext *ctx = avctx->priv_data;
+
+ HapChunk *chunk = &ctx->chunks[chunk_nb];
+ GetByteContext gbc;
+ uint8_t *dst = ctx->tex_buf + chunk->uncompressed_offset;
+
+ bytestream2_init(&gbc, ctx->gbc.buffer + chunk->compressed_offset, chunk->compressed_size);
+
+ if (chunk->compressor == HAP_COMP_SNAPPY) {
+ int ret;
+ int64_t uncompressed_size = ctx->tex_size;
+
/* Uncompress the frame */
- ret = ff_snappy_uncompress(gbc, ctx->snappied, &snappy_size);
+ ret = ff_snappy_uncompress(&gbc, dst, &uncompressed_size);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Snappy uncompress error\n");
return ret;
}
-
- ctx->tex_data = ctx->snappied;
- ctx->tex_size = snappy_size;
- compressorstr = "snappy";
- break;
- case HAP_COMP_COMPLEX:
- compressorstr = "complex";
- avpriv_request_sample(avctx, "Complex Hap compressor");
- return AVERROR_PATCHWELCOME;
- break;
- default:
- av_log(avctx, AV_LOG_ERROR,
- "Invalid compressor mode %02X.\n", ctx->section_type);
- return AVERROR_INVALIDDATA;
+ } else if (chunk->compressor == HAP_COMP_NONE) {
+ bytestream2_get_buffer(&gbc, dst, chunk->compressed_size);
}
- av_log(avctx, AV_LOG_DEBUG, "%s compressor\n", compressorstr);
-
return 0;
}
@@ -167,19 +308,12 @@ static int hap_decode(AVCodecContext *avctx, void *data,
{
HapContext *ctx = avctx->priv_data;
ThreadFrame tframe;
- int ret, length;
+ int ret, i;
bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);
/* Check for section header */
- length = parse_section_header(avctx);
- if (length < 0) {
- av_log(avctx, AV_LOG_ERROR, "Frame is too small.\n");
- return length;
- }
-
- /* Prepare the texture buffer and decompress function */
- ret = setup_texture(avctx, length);
+ ret = hap_parse_frame_header(avctx);
if (ret < 0)
return ret;
@@ -190,6 +324,27 @@ static int hap_decode(AVCodecContext *avctx, void *data,
return ret;
ff_thread_finish_setup(avctx);
+ /* Unpack the DXT texture */
+ if (hap_can_use_tex_in_place(ctx)) {
+ /* Only DXTC texture compression in a contiguous block */
+ ctx->tex_data = ctx->gbc.buffer;
+ } else {
+ /* Perform the second-stage decompression */
+ ret = av_reallocp(&ctx->tex_buf, ctx->tex_size);
+ if (ret < 0)
+ return ret;
+
+ avctx->execute2(avctx, decompress_chunks_thread, NULL,
+ ctx->chunk_results, ctx->chunk_count);
+
+ for (i = 0; i < ctx->chunk_count; i++) {
+ if (ctx->chunk_results[i] < 0)
+ return ctx->chunk_results[i];
+ }
+
+ ctx->tex_data = ctx->tex_buf;
+ }
+
/* Use the decompress function on the texture, one block per thread */
avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);
@@ -254,7 +409,7 @@ static av_cold int hap_close(AVCodecContext *avctx)
{
HapContext *ctx = avctx->priv_data;
- av_freep(&ctx->snappied);
+ ff_hap_free_context(ctx);
return 0;
}