summaryrefslogtreecommitdiff
path: root/libavfilter/dnn
diff options
context:
space:
mode:
authorGuo, Yejun <yejun.guo@intel.com>2019-09-05 14:00:28 +0800
committerPedro Arthur <bygrandao@gmail.com>2019-09-19 11:09:25 -0300
commit5f058dd693c4bebcd6a293da4630441f3540902f (patch)
tree4cf12f2fac688758369e55690afb434afc3c947d /libavfilter/dnn
parentc2ab998ff38fa11092ccb1c51ab0a1fe9c24ab09 (diff)
libavfilter/dnn: separate conv2d layer from dnn_backend_native.c to a new file
the logic is that one layer in one separated source file to make the source files simple for maintaining. Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
Diffstat (limited to 'libavfilter/dnn')
-rw-r--r--libavfilter/dnn/Makefile1
-rw-r--r--libavfilter/dnn/dnn_backend_native.c80
-rw-r--r--libavfilter/dnn/dnn_backend_native.h13
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_conv2d.c101
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_conv2d.h39
-rw-r--r--libavfilter/dnn/dnn_backend_tf.c1
6 files changed, 143 insertions, 92 deletions
diff --git a/libavfilter/dnn/Makefile b/libavfilter/dnn/Makefile
index 83938e5693..40b848b442 100644
--- a/libavfilter/dnn/Makefile
+++ b/libavfilter/dnn/Makefile
@@ -1,6 +1,7 @@
OBJS-$(CONFIG_DNN) += dnn/dnn_interface.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_pad.o
+OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_conv2d.o
DNN-OBJS-$(CONFIG_LIBTENSORFLOW) += dnn/dnn_backend_tf.o
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index f56cd81187..5dabd151c0 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -26,6 +26,7 @@
#include "dnn_backend_native.h"
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_pad.h"
+#include "dnn_backend_native_layer_conv2d.h"
static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
{
@@ -281,85 +282,6 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
return model;
}
-#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
-
-static int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
-{
- float *output;
- int32_t input_operand_index = input_operand_indexes[0];
- int number = operands[input_operand_index].dims[0];
- int height = operands[input_operand_index].dims[1];
- int width = operands[input_operand_index].dims[2];
- int channel = operands[input_operand_index].dims[3];
- const float *input = operands[input_operand_index].data;
-
- int radius = conv_params->kernel_size >> 1;
- int src_linesize = width * conv_params->input_num;
- int filter_linesize = conv_params->kernel_size * conv_params->input_num;
- int filter_size = conv_params->kernel_size * filter_linesize;
- int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
-
- DnnOperand *output_operand = &operands[output_operand_index];
- output_operand->dims[0] = number;
- output_operand->dims[1] = height - pad_size * 2;
- output_operand->dims[2] = width - pad_size * 2;
- output_operand->dims[3] = conv_params->output_num;
- output_operand->length = calculate_operand_data_length(output_operand);
- output_operand->data = av_realloc(output_operand->data, output_operand->length);
- if (!output_operand->data)
- return -1;
- output = output_operand->data;
-
- av_assert0(channel == conv_params->input_num);
-
- for (int y = pad_size; y < height - pad_size; ++y) {
- for (int x = pad_size; x < width - pad_size; ++x) {
- for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
- output[n_filter] = conv_params->biases[n_filter];
-
- for (int ch = 0; ch < conv_params->input_num; ++ch) {
- for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
- for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
- float input_pel;
- if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
- int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
- int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
- input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
- } else {
- int y_pos = y + (kernel_y - radius) * conv_params->dilation;
- int x_pos = x + (kernel_x - radius) * conv_params->dilation;
- input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
- input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
- }
-
-
- output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
- kernel_x * conv_params->input_num + ch];
- }
- }
- }
- switch (conv_params->activation){
- case RELU:
- output[n_filter] = FFMAX(output[n_filter], 0.0);
- break;
- case TANH:
- output[n_filter] = 2.0f / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
- break;
- case SIGMOID:
- output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
- break;
- case NONE:
- break;
- case LEAKY_RELU:
- output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
- }
- }
- output += conv_params->output_num;
- }
- }
- return 0;
-}
-
static int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size)
{
float *output;
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index 08e7d15331..aa52222c77 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -32,10 +32,6 @@
typedef enum {INPUT, CONV, DEPTH_TO_SPACE, MIRROR_PAD} DNNLayerType;
-typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
-
-typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNConvPaddingParam;
-
typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;
typedef struct Layer{
@@ -90,15 +86,6 @@ typedef struct DnnOperand{
int32_t usedNumbersLeft;
}DnnOperand;
-typedef struct ConvolutionalParams{
- int32_t input_num, output_num, kernel_size;
- DNNActivationFunc activation;
- DNNConvPaddingParam padding_method;
- int32_t dilation;
- float *kernel;
- float *biases;
-} ConvolutionalParams;
-
typedef struct InputParams{
int height, width, channels;
} InputParams;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
new file mode 100644
index 0000000000..b13b4314ec
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018 Sergey Lavrushkin
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "dnn_backend_native_layer_conv2d.h"
+
+#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
+
+int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
+{
+ float *output;
+ int32_t input_operand_index = input_operand_indexes[0];
+ int number = operands[input_operand_index].dims[0];
+ int height = operands[input_operand_index].dims[1];
+ int width = operands[input_operand_index].dims[2];
+ int channel = operands[input_operand_index].dims[3];
+ const float *input = operands[input_operand_index].data;
+
+ int radius = conv_params->kernel_size >> 1;
+ int src_linesize = width * conv_params->input_num;
+ int filter_linesize = conv_params->kernel_size * conv_params->input_num;
+ int filter_size = conv_params->kernel_size * filter_linesize;
+ int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
+
+ DnnOperand *output_operand = &operands[output_operand_index];
+ output_operand->dims[0] = number;
+ output_operand->dims[1] = height - pad_size * 2;
+ output_operand->dims[2] = width - pad_size * 2;
+ output_operand->dims[3] = conv_params->output_num;
+ output_operand->length = calculate_operand_data_length(output_operand);
+ output_operand->data = av_realloc(output_operand->data, output_operand->length);
+ if (!output_operand->data)
+ return -1;
+ output = output_operand->data;
+
+ av_assert0(channel == conv_params->input_num);
+
+ for (int y = pad_size; y < height - pad_size; ++y) {
+ for (int x = pad_size; x < width - pad_size; ++x) {
+ for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
+ output[n_filter] = conv_params->biases[n_filter];
+
+ for (int ch = 0; ch < conv_params->input_num; ++ch) {
+ for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
+ for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
+ float input_pel;
+ if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
+ int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
+ int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
+ input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
+ } else {
+ int y_pos = y + (kernel_y - radius) * conv_params->dilation;
+ int x_pos = x + (kernel_x - radius) * conv_params->dilation;
+ input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
+ input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
+ }
+
+
+ output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
+ kernel_x * conv_params->input_num + ch];
+ }
+ }
+ }
+ switch (conv_params->activation){
+ case RELU:
+ output[n_filter] = FFMAX(output[n_filter], 0.0);
+ break;
+ case TANH:
+ output[n_filter] = 2.0f / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
+ break;
+ case SIGMOID:
+ output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
+ break;
+ case NONE:
+ break;
+ case LEAKY_RELU:
+ output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
+ }
+ }
+ output += conv_params->output_num;
+ }
+ }
+ return 0;
+}
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
new file mode 100644
index 0000000000..7ddfff38ba
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018 Sergey Lavrushkin
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_CONV2D_H
+#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_CONV2D_H
+
+#include "dnn_backend_native.h"
+
+typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
+typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNConvPaddingParam;
+
+typedef struct ConvolutionalParams{
+ int32_t input_num, output_num, kernel_size;
+ DNNActivationFunc activation;
+ DNNConvPaddingParam padding_method;
+ int32_t dilation;
+ float *kernel;
+ float *biases;
+} ConvolutionalParams;
+
+int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params);
+#endif
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 626fba9903..46dfa009cd 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -25,6 +25,7 @@
#include "dnn_backend_tf.h"
#include "dnn_backend_native.h"
+#include "dnn_backend_native_layer_conv2d.h"
#include "libavformat/avio.h"
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_pad.h"