summaryrefslogtreecommitdiff
path: root/libavfilter/dnn
diff options
context:
space:
mode:
authorGuo, Yejun <yejun.guo@intel.com>2019-09-20 11:55:48 +0800
committerPedro Arthur <bygrandao@gmail.com>2019-09-20 10:57:18 -0300
commitb2683c66b215ee3b67628880b93f7371d21bc946 (patch)
tree09eb095217f31f5ff83ba1b6d07472e9f88a4423 /libavfilter/dnn
parentea673a0edb4b32cab54344faedb41bc3473730eb (diff)
libavfilter/dnn: add layer maximum for native mode.
The reason to add this layer is that it is used by srcnn in vf_sr. This layer is currently ignored in native mode. After this patch, we can add multiple outputs support for native mode. Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
Diffstat (limited to 'libavfilter/dnn')
-rw-r--r--libavfilter/dnn/Makefile1
-rw-r--r--libavfilter/dnn/dnn_backend_native.c36
-rw-r--r--libavfilter/dnn/dnn_backend_native.h6
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_maximum.c54
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_maximum.h42
-rw-r--r--libavfilter/dnn/dnn_backend_tf.c47
6 files changed, 181 insertions, 5 deletions
diff --git a/libavfilter/dnn/Makefile b/libavfilter/dnn/Makefile
index 63a35e7dd5..721094ddc8 100644
--- a/libavfilter/dnn/Makefile
+++ b/libavfilter/dnn/Makefile
@@ -3,6 +3,7 @@ OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_pad.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_conv2d.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_depth2space.o
+OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_maximum.o
DNN-OBJS-$(CONFIG_LIBTENSORFLOW) += dnn/dnn_backend_tf.o
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index be548c6d46..22a9a33a34 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -28,6 +28,7 @@
#include "dnn_backend_native_layer_pad.h"
#include "dnn_backend_native_layer_conv2d.h"
#include "dnn_backend_native_layer_depth2space.h"
+#include "dnn_backend_native_layer_maximum.h"
static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
{
@@ -78,6 +79,7 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
ConvolutionalParams *conv_params;
DepthToSpaceParams *depth_to_space_params;
LayerPadParams *pad_params;
+ DnnLayerMaximumParams *maximum_params;
model = av_malloc(sizeof(DNNModel));
if (!model){
@@ -237,6 +239,21 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
network->layers[layer].type = MIRROR_PAD;
network->layers[layer].params = pad_params;
break;
+ case MAXIMUM:
+ maximum_params = av_malloc(sizeof(*maximum_params));
+ if (!maximum_params){
+ avio_closep(&model_file_context);
+ ff_dnn_free_model_native(&model);
+ return NULL;
+ }
+ maximum_params->val.u32 = avio_rl32(model_file_context);
+ dnn_size += 4;
+ network->layers[layer].type = MAXIMUM;
+ network->layers[layer].params = maximum_params;
+ network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
+ network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
+ dnn_size += 8;
+ break;
default:
avio_closep(&model_file_context);
ff_dnn_free_model_native(&model);
@@ -290,6 +307,7 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
ConvolutionalParams *conv_params;
DepthToSpaceParams *depth_to_space_params;
LayerPadParams *pad_params;
+ DnnLayerMaximumParams *maximum_params;
if (network->layers_num <= 0 || network->operands_num <= 0)
return DNN_ERROR;
@@ -313,6 +331,11 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index, pad_params);
break;
+ case MAXIMUM:
+ maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params;
+ dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes,
+ network->layers[layer].output_operand_index, maximum_params);
+ break;
case INPUT:
return DNN_ERROR;
}
@@ -333,10 +356,19 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
return DNN_SUCCESS;
}
-int32_t calculate_operand_data_length(DnnOperand* operand)
+int32_t calculate_operand_dims_count(const DnnOperand *oprd)
+{
+ int32_t result = 1;
+ for (int i = 0; i < 4; ++i)
+ result *= oprd->dims[i];
+
+ return result;
+}
+
+int32_t calculate_operand_data_length(const DnnOperand* oprd)
{
// currently, we just support DNN_FLOAT
- return operand->dims[0] * operand->dims[1] * operand->dims[2] * operand->dims[3] * sizeof(float);
+ return oprd->dims[0] * oprd->dims[1] * oprd->dims[2] * oprd->dims[3] * sizeof(float);
}
void ff_dnn_free_model_native(DNNModel **model)
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index a74d1381c2..b238d18dd8 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -30,7 +30,7 @@
#include "../dnn_interface.h"
#include "libavformat/avio.h"
-typedef enum {INPUT, CONV, DEPTH_TO_SPACE, MIRROR_PAD} DNNLayerType;
+typedef enum {INPUT = 0, CONV = 1, DEPTH_TO_SPACE = 2, MIRROR_PAD = 3, MAXIMUM = 4} DNNLayerType;
typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;
@@ -104,6 +104,6 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
void ff_dnn_free_model_native(DNNModel **model);
-int32_t calculate_operand_data_length(DnnOperand *operand);
-
+int32_t calculate_operand_data_length(const DnnOperand *oprd);
+int32_t calculate_operand_dims_count(const DnnOperand *oprd);
#endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.c b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
new file mode 100644
index 0000000000..a2669af794
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Guo Yejun
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * DNN native backend implementation.
+ */
+
+#include "dnn_backend_native.h"
+#include "libavutil/avassert.h"
+#include "dnn_backend_native_layer_maximum.h"
+
+int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params)
+{
+ const DnnOperand *input = &operands[input_operand_indexes[0]];
+ DnnOperand *output = &operands[output_operand_index];
+ int dims_count;
+ const float *src;
+ float *dst;
+
+ for (int i = 0; i < 4; ++i)
+ output->dims[i] = input->dims[i];
+
+ output->data_type = input->data_type;
+ output->length = calculate_operand_data_length(output);
+ output->data = av_realloc(output->data, output->length);
+ if (!output->data)
+ return DNN_ERROR;
+
+ dims_count = calculate_operand_dims_count(output);
+ src = input->data;
+ dst = output->data;
+ for (int i = 0; i < dims_count; ++i)
+ dst[i] = FFMAX(src[i], params->val.y);
+
+ return 0;
+}
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.h b/libavfilter/dnn/dnn_backend_native_layer_maximum.h
new file mode 100644
index 0000000000..6396e5818c
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 Guo Yejun
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * DNN inference functions interface for native backend.
+ */
+
+
+#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_MAXIMUM_H
+#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_MAXIMUM_H
+
+#include "libavformat/avio.h"
+#include "dnn_backend_native.h"
+
+typedef struct DnnLayerMaximumParams{
+ union {
+ uint32_t u32;
+ float y;
+ }val;
+} DnnLayerMaximumParams;
+
+int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params);
+
+#endif
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 8a3e40a751..612d2e0982 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -30,6 +30,7 @@
#include "libavformat/avio.h"
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_pad.h"
+#include "dnn_backend_native_layer_maximum.h"
#include <tensorflow/c/c_api.h>
@@ -401,6 +402,48 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
return DNN_SUCCESS;
}
+static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
+ DnnLayerMaximumParams *params, const int layer)
+{
+ TF_Operation *op;
+ TF_Tensor *tensor;
+ TF_OperationDescription *op_desc;
+ TF_Output input;
+ float *y;
+
+ char name_buffer[NAME_BUFFER_SIZE];
+ snprintf(name_buffer, NAME_BUFFER_SIZE, "maximum/y%d", layer);
+
+ op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer);
+ TF_SetAttrType(op_desc, "dtype", TF_FLOAT);
+ tensor = TF_AllocateTensor(TF_FLOAT, NULL, 0, TF_DataTypeSize(TF_FLOAT));
+ y = (float *)TF_TensorData(tensor);
+ *y = params->val.y;
+ TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
+ if (TF_GetCode(tf_model->status) != TF_OK){
+ return DNN_ERROR;
+ }
+ op = TF_FinishOperation(op_desc, tf_model->status);
+ if (TF_GetCode(tf_model->status) != TF_OK){
+ return DNN_ERROR;
+ }
+
+ snprintf(name_buffer, NAME_BUFFER_SIZE, "maximum%d", layer);
+ op_desc = TF_NewOperation(tf_model->graph, "Maximum", name_buffer);
+ input.oper = *cur_op;
+ input.index = 0;
+ TF_AddInput(op_desc, input);
+ input.oper = op;
+ TF_AddInput(op_desc, input);
+ TF_SetAttrType(op_desc, "T", TF_FLOAT);
+ *cur_op = TF_FinishOperation(op_desc, tf_model->status);
+ if (TF_GetCode(tf_model->status) != TF_OK){
+ return DNN_ERROR;
+ }
+
+ return DNN_SUCCESS;
+}
+
static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename)
{
int32_t layer;
@@ -471,6 +514,10 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
layer_add_res = add_pad_layer(tf_model, &op,
(LayerPadParams *)conv_network->layers[layer].params, layer);
break;
+ case MAXIMUM:
+ layer_add_res = add_maximum_layer(tf_model, &op,
+ (DnnLayerMaximumParams *)conv_network->layers[layer].params, layer);
+ break;
default:
CLEANUP_ON_ERROR(tf_model);
}