summaryrefslogtreecommitdiff
path: root/libavfilter
diff options
context:
space:
mode:
authorGuo, Yejun <yejun.guo@intel.com>2019-10-21 20:38:03 +0800
committerPedro Arthur <bygrandao@gmail.com>2019-10-30 10:31:55 -0300
commitdff39ea9f0154ec52b7548b122a4a5332df3c2c6 (patch)
treea435c8d24d80bc9c38ecd4b93c51113c47acad05 /libavfilter
parenta269fa044b1364af1654456c33b7d45407822876 (diff)
dnn: add tf.nn.conv2d support for native model
Unlike other tf.*.conv2d layers, tf.nn.conv2d does not create many nodes (within a scope) in the graph, it just acts like other layers. tf.nn.conv2d only creates one node in the graph, and no internal nodes such as 'kernel' are created. The format of native model file is also changed, a flag named has_bias is added, so change the version number. Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/dnn/dnn_backend_native.c2
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_conv2d.c37
-rw-r--r--libavfilter/dnn/dnn_backend_native_layer_conv2d.h1
3 files changed, 29 insertions, 11 deletions
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index 06b010d90e..ff280b5506 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -98,7 +98,7 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
char header_expected[] = "FFMPEGDNNNATIVE";
char *buf;
size_t size;
- int version, header_size, major_version_expected = 0;
+ int version, header_size, major_version_expected = 1;
ConvolutionalNetwork *network = NULL;
AVIOContext *model_file_context;
int file_size, dnn_size, parsed_size;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
index 0de890217d..6ec0fa7a99 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -38,27 +38,41 @@ int dnn_load_layer_conv2d(Layer *layer, AVIOContext *model_file_context, int fil
conv_params->input_num = (int32_t)avio_rl32(model_file_context);
conv_params->output_num = (int32_t)avio_rl32(model_file_context);
conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
+ conv_params->has_bias = (int32_t)avio_rl32(model_file_context);
+ dnn_size += 28;
+
kernel_size = conv_params->input_num * conv_params->output_num *
- conv_params->kernel_size * conv_params->kernel_size;
- dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
+ conv_params->kernel_size * conv_params->kernel_size;
+ dnn_size += kernel_size * 4;
+ if (conv_params->has_bias)
+ dnn_size += conv_params->output_num * 4;
+
if (dnn_size > file_size || conv_params->input_num <= 0 ||
conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
av_freep(&conv_params);
return 0;
}
+
conv_params->kernel = av_malloc(kernel_size * sizeof(float));
- conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
- if (!conv_params->kernel || !conv_params->biases){
- av_freep(&conv_params->kernel);
- av_freep(&conv_params->biases);
+ if (!conv_params->kernel) {
av_freep(&conv_params);
return 0;
}
- for (int i = 0; i < kernel_size; ++i){
+ for (int i = 0; i < kernel_size; ++i) {
conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
}
- for (int i = 0; i < conv_params->output_num; ++i){
- conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
+
+ conv_params->biases = NULL;
+ if (conv_params->has_bias) {
+ conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
+ if (!conv_params->biases){
+ av_freep(&conv_params->kernel);
+ av_freep(&conv_params);
+ return 0;
+ }
+ for (int i = 0; i < conv_params->output_num; ++i){
+ conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
+ }
}
layer->params = conv_params;
@@ -103,7 +117,10 @@ int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_
for (int y = pad_size; y < height - pad_size; ++y) {
for (int x = pad_size; x < width - pad_size; ++x) {
for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
- output[n_filter] = conv_params->biases[n_filter];
+ if (conv_params->has_bias)
+ output[n_filter] = conv_params->biases[n_filter];
+ else
+ output[n_filter] = 0.f;
for (int ch = 0; ch < conv_params->input_num; ++ch) {
for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
index db90b2b6f6..bf872642dd 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
@@ -31,6 +31,7 @@ typedef struct ConvolutionalParams{
DNNActivationFunc activation;
DNNConvPaddingParam padding_method;
int32_t dilation;
+ int32_t has_bias;
float *kernel;
float *biases;
} ConvolutionalParams;