libavfilter/dnn: add layer maximum for native mode.
authorGuo, Yejun <yejun.guo@intel.com>
Fri, 20 Sep 2019 03:55:48 +0000 (11:55 +0800)
committerPedro Arthur <bygrandao@gmail.com>
Fri, 20 Sep 2019 13:57:18 +0000 (10:57 -0300)
The reason to add this layer is that it is used by srcnn in vf_sr.
This layer is currently ignored in native mode. After this patch,
we can add multiple outputs support for native mode.

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
libavfilter/dnn/Makefile
libavfilter/dnn/dnn_backend_native.c
libavfilter/dnn/dnn_backend_native.h
libavfilter/dnn/dnn_backend_native_layer_maximum.c [new file with mode: 0644]
libavfilter/dnn/dnn_backend_native_layer_maximum.h [new file with mode: 0644]
libavfilter/dnn/dnn_backend_tf.c
tools/python/convert_from_tensorflow.py
tools/python/convert_header.py

index 63a35e7dd53a9600e9c318b9943991f39ebcd577..721094ddc83c194260a48bab34eb55c12726bcfb 100644 (file)
@@ -3,6 +3,7 @@ OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native.o
 OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_pad.o
 OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_conv2d.o
 OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_depth2space.o
+OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_maximum.o
 
 DNN-OBJS-$(CONFIG_LIBTENSORFLOW)             += dnn/dnn_backend_tf.o
 
index be548c6d467dbc4df063e0d9256f7819c87f3f72..22a9a33a34fbadb7a66c1aed3b593f941f05208d 100644 (file)
@@ -28,6 +28,7 @@
 #include "dnn_backend_native_layer_pad.h"
 #include "dnn_backend_native_layer_conv2d.h"
 #include "dnn_backend_native_layer_depth2space.h"
+#include "dnn_backend_native_layer_maximum.h"
 
 static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
 {
@@ -78,6 +79,7 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
     ConvolutionalParams *conv_params;
     DepthToSpaceParams *depth_to_space_params;
     LayerPadParams *pad_params;
+    DnnLayerMaximumParams *maximum_params;
 
     model = av_malloc(sizeof(DNNModel));
     if (!model){
@@ -237,6 +239,21 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
             network->layers[layer].type = MIRROR_PAD;
             network->layers[layer].params = pad_params;
             break;
+        case MAXIMUM:
+            maximum_params = av_malloc(sizeof(*maximum_params));
+            if (!maximum_params){
+                avio_closep(&model_file_context);
+                ff_dnn_free_model_native(&model);
+                return NULL;
+            }
+            maximum_params->val.u32 = avio_rl32(model_file_context);
+            dnn_size += 4;
+            network->layers[layer].type = MAXIMUM;
+            network->layers[layer].params = maximum_params;
+            network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
+            network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
+            dnn_size += 8;
+            break;
         default:
             avio_closep(&model_file_context);
             ff_dnn_free_model_native(&model);
@@ -290,6 +307,7 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
     ConvolutionalParams *conv_params;
     DepthToSpaceParams *depth_to_space_params;
     LayerPadParams *pad_params;
+    DnnLayerMaximumParams *maximum_params;
 
     if (network->layers_num <= 0 || network->operands_num <= 0)
         return DNN_ERROR;
@@ -313,6 +331,11 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
             dnn_execute_layer_pad(network->operands, network->layers[layer].input_operand_indexes,
                                   network->layers[layer].output_operand_index, pad_params);
             break;
+        case MAXIMUM:
+            maximum_params = (DnnLayerMaximumParams *)network->layers[layer].params;
+            dnn_execute_layer_maximum(network->operands, network->layers[layer].input_operand_indexes,
+                                  network->layers[layer].output_operand_index, maximum_params);
+            break;
         case INPUT:
             return DNN_ERROR;
         }
@@ -333,10 +356,19 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
     return DNN_SUCCESS;
 }
 
-int32_t calculate_operand_data_length(DnnOperand* operand)
+int32_t calculate_operand_dims_count(const DnnOperand *oprd)
+{
+    int32_t result = 1;
+    for (int i = 0; i < 4; ++i)
+        result *= oprd->dims[i];
+
+    return result;
+}
+
+int32_t calculate_operand_data_length(const DnnOperand* oprd)
 {
     // currently, we just support DNN_FLOAT
-    return operand->dims[0] * operand->dims[1] * operand->dims[2] * operand->dims[3] * sizeof(float);
+    return oprd->dims[0] * oprd->dims[1] * oprd->dims[2] * oprd->dims[3] * sizeof(float);
 }
 
 void ff_dnn_free_model_native(DNNModel **model)
index a74d1381c22e6f9c66b7aeeee4e98cc854aaae93..b238d18dd8aa99517a1281c1a996ec254a30668f 100644 (file)
@@ -30,7 +30,7 @@
 #include "../dnn_interface.h"
 #include "libavformat/avio.h"
 
-typedef enum {INPUT, CONV, DEPTH_TO_SPACE, MIRROR_PAD} DNNLayerType;
+typedef enum {INPUT = 0, CONV = 1, DEPTH_TO_SPACE = 2, MIRROR_PAD = 3, MAXIMUM = 4} DNNLayerType;
 
 typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;
 
@@ -104,6 +104,6 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
 
 void ff_dnn_free_model_native(DNNModel **model);
 
-int32_t calculate_operand_data_length(DnnOperand *operand);
-
+int32_t calculate_operand_data_length(const DnnOperand *oprd);
+int32_t calculate_operand_dims_count(const DnnOperand *oprd);
 #endif
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.c b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
new file mode 100644 (file)
index 0000000..a2669af
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019 Guo Yejun
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * DNN native backend implementation.
+ */
+
+#include "dnn_backend_native.h"
+#include "libavutil/avassert.h"
+#include "dnn_backend_native_layer_maximum.h"
+
+int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params)
+{
+    const DnnOperand *input = &operands[input_operand_indexes[0]];
+    DnnOperand *output = &operands[output_operand_index];
+    int dims_count;
+    const float *src;
+    float *dst;
+
+    for (int i = 0; i < 4; ++i)
+        output->dims[i] = input->dims[i];
+
+    output->data_type = input->data_type;
+    output->length = calculate_operand_data_length(output);
+    output->data = av_realloc(output->data, output->length);
+    if (!output->data)
+        return DNN_ERROR;
+
+    dims_count = calculate_operand_dims_count(output);
+    src = input->data;
+    dst = output->data;
+    for (int i = 0; i < dims_count; ++i)
+        dst[i] = FFMAX(src[i], params->val.y);
+
+    return 0;
+}
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.h b/libavfilter/dnn/dnn_backend_native_layer_maximum.h
new file mode 100644 (file)
index 0000000..6396e58
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 Guo Yejun
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * DNN inference functions interface for native backend.
+ */
+
+
+#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_MAXIMUM_H
+#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_MAXIMUM_H
+
+#include "libavformat/avio.h"
+#include "dnn_backend_native.h"
+
+typedef struct DnnLayerMaximumParams{
+    union {
+        uint32_t u32;
+        float y;
+    }val;
+} DnnLayerMaximumParams;
+
+int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const DnnLayerMaximumParams *params);
+
+#endif
index 8a3e40a751d78e2deb98cf71bf4c1896cbcd1ae0..612d2e0982889c44d40b5fb59109b8056743f614 100644 (file)
@@ -30,6 +30,7 @@
 #include "libavformat/avio.h"
 #include "libavutil/avassert.h"
 #include "dnn_backend_native_layer_pad.h"
+#include "dnn_backend_native_layer_maximum.h"
 
 #include <tensorflow/c/c_api.h>
 
@@ -401,6 +402,48 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
     return DNN_SUCCESS;
 }
 
+static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
+                                       DnnLayerMaximumParams *params, const int layer)
+{
+    TF_Operation *op;
+    TF_Tensor *tensor;
+    TF_OperationDescription *op_desc;
+    TF_Output input;
+    float *y;
+
+    char name_buffer[NAME_BUFFER_SIZE];
+    snprintf(name_buffer, NAME_BUFFER_SIZE, "maximum/y%d", layer);
+
+    op_desc = TF_NewOperation(tf_model->graph, "Const", name_buffer);
+    TF_SetAttrType(op_desc, "dtype", TF_FLOAT);
+    tensor = TF_AllocateTensor(TF_FLOAT, NULL, 0, TF_DataTypeSize(TF_FLOAT));
+    y = (float *)TF_TensorData(tensor);
+    *y = params->val.y;
+    TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
+    if (TF_GetCode(tf_model->status) != TF_OK){
+        return DNN_ERROR;
+    }
+    op = TF_FinishOperation(op_desc, tf_model->status);
+    if (TF_GetCode(tf_model->status) != TF_OK){
+        return DNN_ERROR;
+    }
+
+    snprintf(name_buffer, NAME_BUFFER_SIZE, "maximum%d", layer);
+    op_desc = TF_NewOperation(tf_model->graph, "Maximum", name_buffer);
+    input.oper = *cur_op;
+    input.index = 0;
+    TF_AddInput(op_desc, input);
+    input.oper = op;
+    TF_AddInput(op_desc, input);
+    TF_SetAttrType(op_desc, "T", TF_FLOAT);
+    *cur_op = TF_FinishOperation(op_desc, tf_model->status);
+    if (TF_GetCode(tf_model->status) != TF_OK){
+        return DNN_ERROR;
+    }
+
+    return DNN_SUCCESS;
+}
+
 static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename)
 {
     int32_t layer;
@@ -471,6 +514,10 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
             layer_add_res = add_pad_layer(tf_model, &op,
                                           (LayerPadParams *)conv_network->layers[layer].params, layer);
             break;
+        case MAXIMUM:
+            layer_add_res = add_maximum_layer(tf_model, &op,
+                                          (DnnLayerMaximumParams *)conv_network->layers[layer].params, layer);
+            break;
         default:
             CLEANUP_ON_ERROR(tf_model);
         }
index 1437ad358d87f85e08ef1cd672f1850312cc6126..a663b3400400e606c45c6994fabf6581e455dc30 100644 (file)
@@ -70,7 +70,7 @@ class TFConverter:
         self.converted_nodes = set()
         self.conv2d_scope_names = set()
         self.conv2d_scopename_inputname_dict = {}
-        self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3}
+        self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4}
         self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
         self.name_operand_dict = {}
 
@@ -200,6 +200,19 @@ class TFConverter:
         np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
 
 
+    def dump_maximum_to_file(self, node, f):
+        assert(node.op == 'Maximum')
+        self.layer_number = self.layer_number + 1
+        ynode = self.name_node_dict[node.input[1]]
+        y = ynode.attr['value'].tensor.float_val[0]
+        np.array([self.op2code[node.op]], dtype=np.uint32).tofile(f)
+        np.array([y], dtype=np.float32).tofile(f)
+        self.converted_nodes.add(node.name)
+        input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
+        output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
+        np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
+
+
     def dump_layers_to_file(self, f):
         for node in self.nodes:
             if node.name in self.converted_nodes:
@@ -216,6 +229,8 @@ class TFConverter:
                 self.dump_depth2space_to_file(node, f)
             elif node.op == 'MirrorPad':
                 self.dump_mirrorpad_to_file(node, f)
+            elif node.op == 'Maximum':
+                self.dump_maximum_to_file(node, f)
 
 
     def dump_operands_to_file(self, f):
index 6a7e4af0c9b3f1f976176505530b90ffc96c311b..3c2acd5b1599034d555f2964406fea4467aed8ee 100644 (file)
@@ -23,4 +23,4 @@ str = 'FFMPEGDNNNATIVE'
 major = 0
 
 # increase minor when we don't have to re-convert the model file
-minor = 1
+minor = 2