summaryrefslogtreecommitdiff
path: root/libavfilter/dnn
diff options
context:
space:
mode:
authorShubhanshu Saxena <shubhanshu.e01@gmail.com>2021-07-05 16:00:54 +0530
committerGuo Yejun <yejun.guo@intel.com>2021-07-11 20:12:27 +0800
commita4de605110cb19ea6cf9fc244028f0f37fb40fc0 (patch)
tree59ea70c4466451d5855ee4277f93569e6bd8963d /libavfilter/dnn
parent68cf14d2b1c0d9bad4da78058172d079136fbddc (diff)
lavfi/dnn_backend_tf: Add TFInferRequest and TFRequestItem
This commit introduces a typedef TFInferRequest to store execution parameters for a single call to the TensorFlow C API. This typedef is used in the TFRequestItem. Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
Diffstat (limited to 'libavfilter/dnn')
-rw-r--r--libavfilter/dnn/dnn_backend_tf.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 8762211ebc..578748eb35 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -56,6 +56,23 @@ typedef struct TFModel{
Queue *inference_queue;
} TFModel;
+/**
+ * Stores execution parameters for single
+ * call to the TensorFlow C API
+ */
+typedef struct TFInferRequest {
+ TF_Output *tf_outputs;
+ TF_Tensor **output_tensors;
+ TF_Output *tf_input;
+ TF_Tensor *input_tensor;
+} TFInferRequest;
+
+typedef struct TFRequestItem {
+ TFInferRequest *infer_request;
+ InferenceItem *inference;
+ // further properties will be added later for async
+} TFRequestItem;
+
#define OFFSET(x) offsetof(TFContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
static const AVOption dnn_tensorflow_options[] = {
@@ -72,6 +89,38 @@ static void free_buffer(void *data, size_t length)
av_freep(&data);
}
+static void tf_free_request(TFInferRequest *request)
+{
+ if (!request)
+ return;
+ if (request->input_tensor) {
+ TF_DeleteTensor(request->input_tensor);
+ request->input_tensor = NULL;
+ }
+ av_freep(&request->tf_input);
+ av_freep(&request->tf_outputs);
+ if (request->output_tensors) {
+ int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
+ for (uint32_t i = 0; i < nb_output; ++i) {
+ if (request->output_tensors[i]) {
+ TF_DeleteTensor(request->output_tensors[i]);
+ request->output_tensors[i] = NULL;
+ }
+ }
+ av_freep(&request->output_tensors);
+ }
+}
+
+static TFInferRequest *tf_create_inference_request(void)
+{
+ TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
+ infer_request->tf_outputs = NULL;
+ infer_request->tf_input = NULL;
+ infer_request->input_tensor = NULL;
+ infer_request->output_tensors = NULL;
+ return infer_request;
+}
+
static DNNReturnType extract_inference_from_task(TaskItem *task, Queue *inference_queue)
{
InferenceItem *inference = av_malloc(sizeof(*inference));