diff options
author | Guo, Yejun <yejun.guo@intel.com> | 2019-10-09 22:08:11 +0800 |
---|---|---|
committer | Pedro Arthur <bygrandao@gmail.com> | 2019-10-15 18:56:25 -0300 |
commit | 3fd5ac7e92049b4f31026acdb53a762289f71448 (patch) | |
tree | 49916c1eca3e45d745c85a10da232c0d981fb3dd /libavfilter/dnn/dnn_backend_native.c | |
parent | b78dc27bba2cc612643df7e9c84addc142273e71 (diff) |
avfilter/dnn: unify the layer execution function in native mode
Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
Diffstat (limited to 'libavfilter/dnn/dnn_backend_native.c')
-rw-r--r-- | libavfilter/dnn/dnn_backend_native.c | 34 |
1 files changed, 6 insertions, 28 deletions
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c index 97549d3077..c8fb956dec 100644 --- a/libavfilter/dnn/dnn_backend_native.c +++ b/libavfilter/dnn/dnn_backend_native.c @@ -29,6 +29,7 @@ #include "dnn_backend_native_layer_conv2d.h" #include "dnn_backend_native_layer_depth2space.h" #include "dnn_backend_native_layer_maximum.h" +#include "dnn_backend_native_layers.h" static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output) { @@ -331,10 +332,6 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output { ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model; int32_t layer; - ConvolutionalParams *conv_params; - DepthToSpaceParams *depth_to_space_params; - LayerPadParams *pad_params; - DnnLayerMaximumParams *maximum_params; uint32_t nb = FFMIN(nb_output, network->nb_output); if (network->layers_num <= 0 || network->operands_num <= 0) @@ -343,30 +340,11 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output return DNN_ERROR; for (layer = 0; layer < network->layers_num; ++layer){ - switch (network->layers[layer].type){ - case DLT_CONV2D: - conv_params = (ConvolutionalParams *)network->layers[layer].params; - convolve(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, conv_params); - break; - case DLT_DEPTH_TO_SPACE: - depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params; - depth_to_space(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, depth_to_space_params->block_size); - break; - case DLT_MIRROR_PAD: - pad_params = (LayerPadParams *)network->layers[layer].params; - dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, pad_params); - break; - case DLT_MAXIMUM: - maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params; - dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes, - network->layers[layer].output_operand_index, maximum_params); - break; - case DLT_INPUT: - return DNN_ERROR; - } + DNNLayerType layer_type = network->layers[layer].type; + layer_funcs[layer_type](network->operands, + network->layers[layer].input_operand_indexes, + network->layers[layer].output_operand_index, + network->layers[layer].params); } for (uint32_t i = 0; i < nb; ++i) { |