It can be tested with the model generated with below python script:

import tensorflow as tf
import numpy as np
import imageio

in_img = imageio.imread('input.jpeg')
in_img = in_img.astype(np.float32)/255.0
in_data = in_img[np.newaxis, :]

x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in')
x1 = tf.asin(x)
x2 = tf.divide(x1, 3.1416/2) # pi/2
y = tf.identity(x2, name='dnn_out')

sess=tf.Session()
sess.run(tf.global_variables_initializer())

graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, 
['dnn_out'])
tf.train.write_graph(graph_def, '.', 'image_process.pb', as_text=False)

print("image_process.pb generated, please use \
path_to_ffmpeg/tools/python/convert.py to generate image_process.model\n")

output = sess.run(y, feed_dict={x: in_data})
imageio.imsave("out.jpg", np.squeeze(output))

Signed-off-by: Ting Fu <ting...@intel.com>
---
 libavfilter/dnn/dnn_backend_native_layer_mathunary.c | 4 ++++
 libavfilter/dnn/dnn_backend_native_layer_mathunary.h | 1 +
 tools/python/convert_from_tensorflow.py              | 2 +-
 tools/python/convert_header.py                       | 2 +-
 4 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c 
b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
index 90fac6aa67..3a147c2b3c 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
@@ -92,6 +92,10 @@ int dnn_execute_layer_math_unary(DnnOperand *operands, const 
int32_t *input_oper
         for (int i = 0; i < dims_count; ++i)
             dst[i] = tan(src[i]);
         return 0;
+    case DMUO_ASIN:
+        for (int i = 0; i < dims_count; ++i)
+            dst[i] = asin(src[i]);
+        return 0;
     default:
         return -1;
     }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h 
b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
index 40a9bb5fb8..1c25db5a42 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
@@ -34,6 +34,7 @@ typedef enum {
     DMUO_SIN = 1,
     DMUO_COS = 2,
     DMUO_TAN = 3,
+    DMUO_ASIN = 4,
     DMUO_COUNT
 } DNNMathUnaryOperation;
 
diff --git a/tools/python/convert_from_tensorflow.py 
b/tools/python/convert_from_tensorflow.py
index 9da6a43612..5e526e31ce 100644
--- a/tools/python/convert_from_tensorflow.py
+++ b/tools/python/convert_from_tensorflow.py
@@ -72,7 +72,7 @@ class TFConverter:
         self.conv2d_scopename_inputname_dict = {}
         self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 
'Maximum':4, 'MathBinary':5, 'MathUnary':6}
         self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 
'Minimum':4}
-        self.mathun2code  = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3}
+        self.mathun2code  = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4}
         self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
         self.name_operand_dict = {}
 
diff --git a/tools/python/convert_header.py b/tools/python/convert_header.py
index b7fb0f797a..2b6afe8d13 100644
--- a/tools/python/convert_header.py
+++ b/tools/python/convert_header.py
@@ -23,4 +23,4 @@ str = 'FFMPEGDNNNATIVE'
 major = 1
 
 # increase minor when we don't have to re-convert the model file
-minor = 9
+minor = 10
-- 
2.17.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to