summaryrefslogtreecommitdiff
path: root/tests/dnn
diff options
context:
space:
mode:
authorXu Jun <xujunzz@sjtu.edu.cn>2020-09-06 20:28:53 +0800
committerGuo, Yejun <yejun.guo@intel.com>2020-09-09 14:24:36 +0800
commit3c7cad69f233252e5178f7732baa0da950d74bbd (patch)
tree139b5e492fbb0af4699d24e4c15acaaba30f11c0 /tests/dnn
parent235e01f5a0b6218590eff2377574046c684143e8 (diff)
dnn_backend_native_layer_conv2d.c:Add mutithread function
Use pthread to multithread dnn_execute_layer_conv2d. Can be tested with command "./ffmpeg_g -i input.png -vf \ format=yuvj420p,dnn_processing=dnn_backend=native:model= \ espcn.model:input=x:output=y:options=conv2d_threads=23 \ -y sr_native.jpg -benchmark" before patch: utime=11.238s stime=0.005s rtime=11.248s after patch: utime=20.817s stime=0.047s rtime=1.051s on my 3900X 12c24t @4.2GHz About the increase of utime, it's because that CPU HyperThreading technology makes logical cores twice of physical cores while cpu's counting performance improves less than double. And utime sums all cpu's logical cores' runtime. As a result, using threads num near cpu's logical core's number will double utime, while reduce rtime less than half for HyperThreading CPUs. Signed-off-by: Xu Jun <xujunzz@sjtu.edu.cn> Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Diffstat (limited to 'tests/dnn')
-rw-r--r--tests/dnn/dnn-layer-conv2d-test.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/tests/dnn/dnn-layer-conv2d-test.c b/tests/dnn/dnn-layer-conv2d-test.c
index 836839cc64..378a05eafc 100644
--- a/tests/dnn/dnn-layer-conv2d-test.c
+++ b/tests/dnn/dnn-layer-conv2d-test.c
@@ -25,6 +25,8 @@
#define EPSON 0.00001
+extern const AVClass dnn_native_class;
+
static int test_with_same_dilate(void)
{
// the input data and expected data are generated with below python code.
@@ -96,6 +98,10 @@ static int test_with_same_dilate(void)
};
float bias[2] = { -1.6574852, -0.72915393 };
+ NativeContext ctx;
+ ctx.class = &dnn_native_class;
+ ctx.options.conv2d_threads = 1;
+
params.activation = TANH;
params.has_bias = 1;
params.biases = bias;
@@ -114,7 +120,7 @@ static int test_with_same_dilate(void)
operands[1].data = NULL;
input_indexes[0] = 0;
- dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, NULL);
+ dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {
@@ -196,6 +202,10 @@ static int test_with_valid(void)
};
float bias[2] = { -0.4773722, -0.19620377 };
+ NativeContext ctx;
+ ctx.class = &dnn_native_class;
+ ctx.options.conv2d_threads = 1;
+
params.activation = TANH;
params.has_bias = 1;
params.biases = bias;
@@ -214,7 +224,7 @@ static int test_with_valid(void)
operands[1].data = NULL;
input_indexes[0] = 0;
- dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, NULL);
+ dnn_execute_layer_conv2d(operands, input_indexes, 1, &params, &ctx);
output = operands[1].data;
for (int i = 0; i < sizeof(expected_output) / sizeof(float); i++) {