summaryrefslogtreecommitdiff
path: root/libavfilter/dnn/dnn_backend_native.c
diff options
context:
space:
mode:
authorGuo, Yejun <yejun.guo@intel.com>2019-10-09 22:08:18 +0800
committerPedro Arthur <bygrandao@gmail.com>2019-10-15 18:56:54 -0300
commit2558e62713ebc5f3ea22c1a28d8e9cf3249badaf (patch)
treec8b4dfe51ec30faa41f7327f15000d49c3f05c72 /libavfilter/dnn/dnn_backend_native.c
parent3fd5ac7e92049b4f31026acdb53a762289f71448 (diff)
avfilter/dnn: unify the layer load function in native mode
Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
Diffstat (limited to 'libavfilter/dnn/dnn_backend_native.c')
-rw-r--r--libavfilter/dnn/dnn_backend_native.c114
1 files changed, 12 insertions, 102 deletions
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index c8fb956dec..06b010d90e 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -25,10 +25,7 @@
#include "dnn_backend_native.h"
#include "libavutil/avassert.h"
-#include "dnn_backend_native_layer_pad.h"
#include "dnn_backend_native_layer_conv2d.h"
-#include "dnn_backend_native_layer_depth2space.h"
-#include "dnn_backend_native_layer_maximum.h"
#include "dnn_backend_native_layers.h"
static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
@@ -104,13 +101,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
int version, header_size, major_version_expected = 0;
ConvolutionalNetwork *network = NULL;
AVIOContext *model_file_context;
- int file_size, dnn_size, kernel_size, i;
+ int file_size, dnn_size, parsed_size;
int32_t layer;
DNNLayerType layer_type;
- ConvolutionalParams *conv_params;
- DepthToSpaceParams *depth_to_space_params;
- LayerPadParams *pad_params;
- DnnLayerMaximumParams *maximum_params;
model = av_malloc(sizeof(DNNModel));
if (!model){
@@ -189,104 +182,21 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
for (layer = 0; layer < network->layers_num; ++layer){
layer_type = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
+
+ if (layer_type >= DLT_COUNT) {
+ avio_closep(&model_file_context);
+ ff_dnn_free_model_native(&model);
+ return NULL;
+ }
+
network->layers[layer].type = layer_type;
- switch (layer_type){
- case DLT_CONV2D:
- conv_params = av_malloc(sizeof(ConvolutionalParams));
- if (!conv_params){
- avio_closep(&model_file_context);
- ff_dnn_free_model_native(&model);
- return NULL;
- }
- conv_params->dilation = (int32_t)avio_rl32(model_file_context);
- conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
- conv_params->activation = (int32_t)avio_rl32(model_file_context);
- conv_params->input_num = (int32_t)avio_rl32(model_file_context);
- conv_params->output_num = (int32_t)avio_rl32(model_file_context);
- conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
- kernel_size = conv_params->input_num * conv_params->output_num *
- conv_params->kernel_size * conv_params->kernel_size;
- dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
- if (dnn_size > file_size || conv_params->input_num <= 0 ||
- conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
- avio_closep(&model_file_context);
- av_freep(&conv_params);
- ff_dnn_free_model_native(&model);
- return NULL;
- }
- conv_params->kernel = av_malloc(kernel_size * sizeof(float));
- conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
- if (!conv_params->kernel || !conv_params->biases){
- avio_closep(&model_file_context);
- av_freep(&conv_params->kernel);
- av_freep(&conv_params->biases);
- av_freep(&conv_params);
- ff_dnn_free_model_native(&model);
- return NULL;
- }
- for (i = 0; i < kernel_size; ++i){
- conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
- }
- for (i = 0; i < conv_params->output_num; ++i){
- conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
- }
- network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
- network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
- dnn_size += 8;
- network->layers[layer].params = conv_params;
- break;
- case DLT_DEPTH_TO_SPACE:
- depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
- if (!depth_to_space_params){
- avio_closep(&model_file_context);
- ff_dnn_free_model_native(&model);
- return NULL;
- }
- depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
- dnn_size += 4;
- network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
- network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
- dnn_size += 8;
- network->layers[layer].params = depth_to_space_params;
- break;
- case DLT_MIRROR_PAD:
- pad_params = av_malloc(sizeof(LayerPadParams));
- if (!pad_params){
- avio_closep(&model_file_context);
- ff_dnn_free_model_native(&model);
- return NULL;
- }
- pad_params->mode = (int32_t)avio_rl32(model_file_context);
- dnn_size += 4;
- for (i = 0; i < 4; ++i) {
- pad_params->paddings[i][0] = avio_rl32(model_file_context);
- pad_params->paddings[i][1] = avio_rl32(model_file_context);
- dnn_size += 8;
- }
- network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
- network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
- dnn_size += 8;
- network->layers[layer].params = pad_params;
- break;
- case DLT_MAXIMUM:
- maximum_params = av_malloc(sizeof(*maximum_params));
- if (!maximum_params){
- avio_closep(&model_file_context);
- ff_dnn_free_model_native(&model);
- return NULL;
- }
- maximum_params->val.u32 = avio_rl32(model_file_context);
- dnn_size += 4;
- network->layers[layer].params = maximum_params;
- network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
- network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
- dnn_size += 8;
- break;
- default:
+ parsed_size = layer_funcs[layer_type].pf_load(&network->layers[layer], model_file_context, file_size);
+ if (!parsed_size) {
avio_closep(&model_file_context);
ff_dnn_free_model_native(&model);
return NULL;
}
+ dnn_size += parsed_size;
}
for (int32_t i = 0; i < network->operands_num; ++i){
@@ -341,7 +251,7 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
for (layer = 0; layer < network->layers_num; ++layer){
DNNLayerType layer_type = network->layers[layer].type;
- layer_funcs[layer_type](network->operands,
+ layer_funcs[layer_type].pf_exec(network->operands,
network->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index,
network->layers[layer].params);