diff options
author | Guo, Yejun <yejun.guo@intel.com> | 2019-04-25 10:14:17 +0800 |
---|---|---|
committer | Pedro Arthur <bygrandao@gmail.com> | 2019-05-08 12:33:00 -0300 |
commit | e2b92896c4ca609c851ea8c1a1bfd5d0918a5269 (patch) | |
tree | 098a919f164031cbc2011d5167a82cf8281fe70e /libavfilter/vf_sr.c | |
parent | 05f86f05bb5060492dd3ff22c23628e4e4334a1e (diff) |
libavfilter/dnn: determine dnn output during execute_model instead of set_input_output
Currently, within interface set_input_output, the dims/memory of the tensorflow
dnn model output is determined by executing the model with zero input,
actually, the output dims might vary with different input data for networks
such as object detection models faster-rcnn, ssd and yolo.
This patch moves the logic from set_input_output to execute_model which
is suitable for all the cases. Since interface changed, and so dnn_backend_native
also changes.
In vf_sr.c, it knows it's srcnn or espcn by executing the model with zero input,
so execute_model has to be called in function config_props
Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
Diffstat (limited to 'libavfilter/vf_sr.c')
-rw-r--r-- | libavfilter/vf_sr.c | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c index 0c048e03a5..577b4fcb75 100644 --- a/libavfilter/vf_sr.c +++ b/libavfilter/vf_sr.c @@ -121,20 +121,31 @@ static int config_props(AVFilterLink *inlink) sr_context->input.height = inlink->h * sr_context->scale_factor; sr_context->input.channels = 1; - result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, "x", &sr_context->output, "y"); + result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, "x", "y"); if (result != DNN_SUCCESS){ av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n"); return AVERROR(EIO); } + result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output); + if (result != DNN_SUCCESS){ + av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n"); + return AVERROR(EIO); + } + if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){ sr_context->input.width = inlink->w; sr_context->input.height = inlink->h; - result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, "x", &sr_context->output, "y"); + result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, "x", "y"); if (result != DNN_SUCCESS){ av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n"); return AVERROR(EIO); } + result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output); + if (result != DNN_SUCCESS){ + av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n"); + return AVERROR(EIO); + } sr_context->scale_factor = 0; } outlink->h = sr_context->output.height; @@ -245,7 +256,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) } av_frame_free(&in); - dnn_result = (sr_context->dnn_module->execute_model)(sr_context->model); + dnn_result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output); if (dnn_result != DNN_SUCCESS){ av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n"); return AVERROR(EIO); @@ -263,6 +274,9 @@ static av_cold void uninit(AVFilterContext *context) int i; SRContext *sr_context = context->priv; + if (sr_context->backend_type == DNN_TF) + av_freep(&sr_context->output.data); + if (sr_context->dnn_module){ (sr_context->dnn_module->free_model)(&sr_context->model); av_freep(&sr_context->dnn_module); |