summaryrefslogtreecommitdiff
path: root/libavfilter
diff options
context:
space:
mode:
authorMingyu Yin <mingyu.yin@intel.com>2020-09-22 15:11:09 +0800
committerGuo, Yejun <yejun.guo@intel.com>2020-09-29 14:19:55 +0800
commitad2546e3b33eabeeeeed7d1b1f5e804181e819b7 (patch)
treeda1715f0fb27398f8290b50ef70ef4985da14ad0 /libavfilter
parentadcdf0bc6057a99989a28bb3d1ba65e8b66eff3d (diff)
dnn/native: add native support for dense
Signed-off-by: Mingyu Yin <mingyu.yin@intel.com>
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/dnn/Makefile1
-rw-r--r--libavfilter/dnn/dnn_backend_native.h2
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_conv2d.h1
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_dense.c151
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_dense.h37
-rw-r--r--libavfilter/dnn/dnn_backend_native_layers.c2
6 files changed, 193 insertions, 1 deletions
diff --git a/libavfilter/dnn/Makefile b/libavfilter/dnn/Makefile
index ee08cc5243..b0b76301ec 100644
--- a/libavfilter/dnn/Makefile
+++ b/libavfilter/dnn/Makefile
@@ -3,6 +3,7 @@ OBJS-$(CONFIG_DNN) += dnn/dnn_io_proc.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layers.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_avgpool.o
+OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_dense.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_pad.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_conv2d.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_depth2space.o
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index 2f8d73fcf6..2d02c063d4 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -45,11 +45,13 @@ typedef enum {
DLT_MATH_BINARY = 5,
DLT_MATH_UNARY = 6,
DLT_AVG_POOL = 7,
+ DLT_DENSE = 8,
DLT_COUNT
} DNNLayerType;
typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_OUTPUT} DNNOperandType;
typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNPaddingParam;
+typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
typedef struct Layer{
DNNLayerType type;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
index 72319f2ebe..1295028c46 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
@@ -23,7 +23,6 @@
#include "dnn_backend_native.h"
-typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
typedef struct ConvolutionalParams{
int32_t input_num, output_num, kernel_size;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_dense.c b/libavfilter/dnn/dnn_backend_native_layer_dense.c
new file mode 100644
index 0000000000..1029137792
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_dense.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2020
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "dnn_backend_native_layer_dense.h"
+
+int dnn_load_layer_dense(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num)
+{
+ DenseParams *dense_params;
+ int kernel_size;
+ int dnn_size = 0;
+ dense_params = av_malloc(sizeof(*dense_params));
+ if (!dense_params)
+ return 0;
+
+ dense_params->activation = (int32_t)avio_rl32(model_file_context);
+ dense_params->input_num = (int32_t)avio_rl32(model_file_context);
+ dense_params->output_num = (int32_t)avio_rl32(model_file_context);
+ dense_params->has_bias = (int32_t)avio_rl32(model_file_context);
+ dnn_size += 16;
+
+ kernel_size = dense_params->input_num * dense_params->output_num;
+ dnn_size += kernel_size * 4;
+ if (dense_params->has_bias)
+ dnn_size += dense_params->output_num * 4;
+
+ if (dnn_size > file_size || dense_params->input_num <= 0 ||
+ dense_params->output_num <= 0){
+ av_freep(&dense_params);
+ return 0;
+ }
+
+ dense_params->kernel = av_malloc(kernel_size * sizeof(float));
+ if (!dense_params->kernel) {
+ av_freep(&dense_params);
+ return 0;
+ }
+ for (int i = 0; i < kernel_size; ++i) {
+ dense_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
+ }
+
+ dense_params->biases = NULL;
+ if (dense_params->has_bias) {
+ dense_params->biases = av_malloc(dense_params->output_num * sizeof(float));
+ if (!dense_params->biases){
+ av_freep(&dense_params->kernel);
+ av_freep(&dense_params);
+ return 0;
+ }
+ for (int i = 0; i < dense_params->output_num; ++i){
+ dense_params->biases[i] = av_int2float(avio_rl32(model_file_context));
+ }
+ }
+
+ layer->params = dense_params;
+
+ layer->input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
+ layer->output_operand_index = (int32_t)avio_rl32(model_file_context);
+ dnn_size += 8;
+
+ if (layer->input_operand_indexes[0] >= operands_num || layer->output_operand_index >= operands_num) {
+ return 0;
+ }
+
+ return dnn_size;
+}
+
+int dnn_execute_layer_dense(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters, NativeContext *ctx)
+{
+ float *output;
+ int32_t input_operand_index = input_operand_indexes[0];
+ int number = operands[input_operand_index].dims[0];
+ int height = operands[input_operand_index].dims[1];
+ int width = operands[input_operand_index].dims[2];
+ int channel = operands[input_operand_index].dims[3];
+ const float *input = operands[input_operand_index].data;
+ const DenseParams *dense_params = (const DenseParams *)parameters;
+
+ int src_linesize = width * channel;
+ DnnOperand *output_operand = &operands[output_operand_index];
+ output_operand->dims[0] = number;
+ output_operand->dims[1] = height;
+ output_operand->dims[2] = width;
+ output_operand->dims[3] = dense_params->output_num;
+ output_operand->data_type = operands[input_operand_index].data_type;
+ output_operand->length = calculate_operand_data_length(output_operand);
+ if (output_operand->length <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
+ return DNN_ERROR;
+ }
+ output_operand->data = av_realloc(output_operand->data, output_operand->length);
+ if (!output_operand->data) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
+ return DNN_ERROR;
+ }
+ output = output_operand->data;
+
+ av_assert0(channel == dense_params->input_num);
+
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ for (int n_filter = 0; n_filter < dense_params->output_num; ++n_filter) {
+ if (dense_params->has_bias)
+ output[n_filter] = dense_params->biases[n_filter];
+ else
+ output[n_filter] = 0.f;
+
+ for (int ch = 0; ch < dense_params->input_num; ++ch) {
+ float input_pel;
+ input_pel = input[y * src_linesize + x * dense_params->input_num + ch];
+ output[n_filter] += input_pel * dense_params->kernel[n_filter*dense_params->input_num + ch];
+ }
+ switch (dense_params->activation){
+ case RELU:
+ output[n_filter] = FFMAX(output[n_filter], 0.0);
+ break;
+ case TANH:
+ output[n_filter] = 2.0f / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
+ break;
+ case SIGMOID:
+ output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
+ break;
+ case NONE:
+ break;
+ case LEAKY_RELU:
+ output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
+ }
+ }
+ output += dense_params->output_num;
+ }
+ }
+ return 0;
+}
diff --git a/libavfilter/dnn/dnn_backend_native_layer_dense.h b/libavfilter/dnn/dnn_backend_native_layer_dense.h
new file mode 100644
index 0000000000..f98284b154
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_dense.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2020
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_DENSE_H
+#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_DENSE_H
+
+#include "dnn_backend_native.h"
+
+typedef struct DenseParams{
+ int32_t input_num, output_num;
+ DNNActivationFunc activation;
+ int32_t has_bias;
+ float *kernel;
+ float *biases;
+} DenseParams;
+
+int dnn_load_layer_dense(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
+int dnn_execute_layer_dense(DnnOperand *operands, const int32_t *input_operand_indexes,
+ int32_t output_operand_index, const void *parameters, NativeContext *ctx);
+#endif
diff --git a/libavfilter/dnn/dnn_backend_native_layers.c b/libavfilter/dnn/dnn_backend_native_layers.c
index 4f42f62abb..638a94e9a3 100644
--- a/libavfilter/dnn/dnn_backend_native_layers.c
+++ b/libavfilter/dnn/dnn_backend_native_layers.c
@@ -27,6 +27,7 @@
#include "dnn_backend_native_layer_mathbinary.h"
#include "dnn_backend_native_layer_mathunary.h"
#include "dnn_backend_native_layer_avgpool.h"
+#include "dnn_backend_native_layer_dense.h"
LayerFunc layer_funcs[DLT_COUNT] = {
{NULL, NULL},
@@ -37,4 +38,5 @@ LayerFunc layer_funcs[DLT_COUNT] = {
{dnn_execute_layer_math_binary, dnn_load_layer_math_binary},
{dnn_execute_layer_math_unary, dnn_load_layer_math_unary},
{dnn_execute_layer_avg_pool, dnn_load_layer_avg_pool},
+ {dnn_execute_layer_dense, dnn_load_layer_dense},
};