Re: [FFmpeg-devel] [PATCH 2/2] avformat/hls: use av_strncasecmp()

2021-05-06 Thread Steven Liu


> 2021年5月7日 上午9:23,lance.lmw...@gmail.com 写道:
> 
> From: Limin Wang 
> 
> Signed-off-by: Limin Wang 
> ---
> libavformat/hls.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/libavformat/hls.c b/libavformat/hls.c
> index c7f9f06..9610b83 100644
> --- a/libavformat/hls.c
> +++ b/libavformat/hls.c
> @@ -799,7 +799,7 @@ static int parse_playlist(HLSContext *c, const char *url,
> key_type = KEY_AES_128;
> if (!strcmp(info.method, "SAMPLE-AES"))
> key_type = KEY_SAMPLE_AES;
> -if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
> +if (!av_strncasecmp(info.iv, "0x", 2)) {
> ff_hex_to_data(iv, sizeof(iv), info.iv + 2);
> has_iv = 1;
> }
> -- 
> 1.8.3.1
> 
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe”.
> 
LGTM

Thanks

Steven Liu



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 2/2] avcodec/ttmlenc: Mark encoder as init-threadsafe

2021-05-06 Thread Andreas Rheinhardt
Signed-off-by: Andreas Rheinhardt 
---
This of course supersedes
https://ffmpeg.org/pipermail/ffmpeg-devel/2021-May/279974.html

 libavcodec/ttmlenc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavcodec/ttmlenc.c b/libavcodec/ttmlenc.c
index e274a92e04..5cab33cc60 100644
--- a/libavcodec/ttmlenc.c
+++ b/libavcodec/ttmlenc.c
@@ -392,5 +392,5 @@ const AVCodec ff_ttml_encoder = {
 .init   = ttml_encode_init,
 .encode_sub = ttml_encode_frame,
 .close  = ttml_encode_close,
-.caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
+.caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
 };
-- 
2.27.0

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 1/2] avcodec/ttmlenc: Don't confuse capabilities and caps_internal

2021-05-06 Thread Andreas Rheinhardt
Signed-off-by: Andreas Rheinhardt 
---
 libavcodec/ttmlenc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavcodec/ttmlenc.c b/libavcodec/ttmlenc.c
index 09f2657cd6..e274a92e04 100644
--- a/libavcodec/ttmlenc.c
+++ b/libavcodec/ttmlenc.c
@@ -392,5 +392,5 @@ const AVCodec ff_ttml_encoder = {
 .init   = ttml_encode_init,
 .encode_sub = ttml_encode_frame,
 .close  = ttml_encode_close,
-.capabilities   = FF_CODEC_CAP_INIT_CLEANUP,
+.caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
 };
-- 
2.27.0

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH 1/2] avformat: add data_size for ff_hex_to_data()

2021-05-06 Thread lance . lmwang
On Thu, May 06, 2021 at 10:25:47PM -0300, James Almer wrote:
> On 5/6/2021 10:23 PM, lance.lmw...@gmail.com wrote:
> > From: Limin Wang 
> > 
> > This prevents OOM in case of data buffer size is insufficient.
> > 
> > Signed-off-by: Limin Wang 
> > ---
> >   libavfilter/dnn/dnn_backend_tf.c | 4 ++--
> >   libavformat/hls.c| 2 +-
> >   libavformat/internal.h   | 6 --
> >   libavformat/rtpdec_latm.c| 4 ++--
> >   libavformat/rtpdec_mpeg4.c   | 4 ++--
> >   libavformat/utils.c  | 7 +--
> >   6 files changed, 16 insertions(+), 11 deletions(-)
> > 
> > diff --git a/libavfilter/dnn/dnn_backend_tf.c 
> > b/libavfilter/dnn/dnn_backend_tf.c
> > index 03fe310..4eb5bec 100644
> > --- a/libavfilter/dnn/dnn_backend_tf.c
> > +++ b/libavfilter/dnn/dnn_backend_tf.c
> > @@ -219,14 +219,14 @@ static DNNReturnType load_tf_model(TFModel *tf_model, 
> > const char *model_filename
> >   return DNN_ERROR;
> >   }
> >   config = tf_model->ctx.options.sess_config + 2;
> > -sess_config_length = ff_hex_to_data(NULL, config);
> > +sess_config_length = ff_hex_to_data(NULL, 0, config);
> >   sess_config = av_mallocz(sess_config_length + 
> > AV_INPUT_BUFFER_PADDING_SIZE);
> >   if (!sess_config) {
> >   av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
> >   return DNN_ERROR;
> >   }
> > -ff_hex_to_data(sess_config, config);
> > +ff_hex_to_data(sess_config, sess_config_length, config);
> 
> When did this function start being used in lavfi? It's internal to lavf, it
> can't be accessed here.
> 
> Was this not tested with a shared build of the libraries?

Sorry, I haven't tested the issue for I'm using static library always.
I'll copy the function as static function for this file to use.


> 
> >   }
> >   graph_def = read_graph(model_filename);
> > diff --git a/libavformat/hls.c b/libavformat/hls.c
> > index 584f658..c7f9f06 100644
> > --- a/libavformat/hls.c
> > +++ b/libavformat/hls.c
> > @@ -800,7 +800,7 @@ static int parse_playlist(HLSContext *c, const char 
> > *url,
> >   if (!strcmp(info.method, "SAMPLE-AES"))
> >   key_type = KEY_SAMPLE_AES;
> >   if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) 
> > {
> > -ff_hex_to_data(iv, info.iv + 2);
> > +ff_hex_to_data(iv, sizeof(iv), info.iv + 2);
> >   has_iv = 1;
> >   }
> >   av_strlcpy(key, info.uri, sizeof(key));
> > diff --git a/libavformat/internal.h b/libavformat/internal.h
> > index 7d0eab4..e0e625f 100644
> > --- a/libavformat/internal.h
> > +++ b/libavformat/internal.h
> > @@ -397,10 +397,12 @@ char *ff_data_to_hex(char *buf, const uint8_t *src, 
> > int size, int lowercase);
> >* digits is ignored.
> >*
> >* @param data if non-null, the parsed data is written to this pointer
> > + * @param data_size the data buffer size
> >* @param p the string to parse
> > - * @return the number of bytes written (or to be written, if data is null)
> > + * @return the number of bytes written (or to be written, if data is null),
> > + * or a negative value in case data buffer size is insufficient.
> >*/
> > -int ff_hex_to_data(uint8_t *data, const char *p);
> > +int ff_hex_to_data(uint8_t *data, int data_size, const char *p);
> >   /**
> >* Add packet to an AVFormatContext's packet_buffer list, determining its
> > diff --git a/libavformat/rtpdec_latm.c b/libavformat/rtpdec_latm.c
> > index 104a00a..c348cc8 100644
> > --- a/libavformat/rtpdec_latm.c
> > +++ b/libavformat/rtpdec_latm.c
> > @@ -91,7 +91,7 @@ static int latm_parse_packet(AVFormatContext *ctx, 
> > PayloadContext *data,
> >   static int parse_fmtp_config(AVStream *st, const char *value)
> >   {
> > -int len = ff_hex_to_data(NULL, value), i, ret = 0;
> > +int len = ff_hex_to_data(NULL, 0, value), i, ret = 0;
> >   GetBitContext gb;
> >   uint8_t *config;
> >   int audio_mux_version, same_time_framing, num_programs, num_layers;
> > @@ -100,7 +100,7 @@ static int parse_fmtp_config(AVStream *st, const char 
> > *value)
> >   config = av_mallocz(len + AV_INPUT_BUFFER_PADDING_SIZE);
> >   if (!config)
> >   return AVERROR(ENOMEM);
> > -ff_hex_to_data(config, value);
> > +ff_hex_to_data(config, len, value);
> >   init_get_bits(, config, len*8);
> >   audio_mux_version = get_bits(, 1);
> >   same_time_framing = get_bits(, 1);
> > diff --git a/libavformat/rtpdec_mpeg4.c b/libavformat/rtpdec_mpeg4.c
> > index 34c7950..540192c 100644
> > --- a/libavformat/rtpdec_mpeg4.c
> > +++ b/libavformat/rtpdec_mpeg4.c
> > @@ -112,11 +112,11 @@ static void close_context(PayloadContext *data)
> >   static int parse_fmtp_config(AVCodecParameters *par, const char *value)
> >   {
> >   /* decode the hexa encoded parameter */
> > -int len = ff_hex_to_data(NULL, 

[FFmpeg-devel] [PATCH 2/2] avformat/hls: use av_strncasecmp()

2021-05-06 Thread lance . lmwang
From: Limin Wang 

Signed-off-by: Limin Wang 
---
 libavformat/hls.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavformat/hls.c b/libavformat/hls.c
index c7f9f06..9610b83 100644
--- a/libavformat/hls.c
+++ b/libavformat/hls.c
@@ -799,7 +799,7 @@ static int parse_playlist(HLSContext *c, const char *url,
 key_type = KEY_AES_128;
 if (!strcmp(info.method, "SAMPLE-AES"))
 key_type = KEY_SAMPLE_AES;
-if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
+if (!av_strncasecmp(info.iv, "0x", 2)) {
 ff_hex_to_data(iv, sizeof(iv), info.iv + 2);
 has_iv = 1;
 }
-- 
1.8.3.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH 5/6] avfilter/dnn/dnn_backend_tf: simplify the code with ff_hex_to_data

2021-05-06 Thread James Almer

On 4/26/2021 7:48 AM, lance.lmw...@gmail.com wrote:

From: Limin Wang 

please use tools/python/tf_sess_config.py to get the sess_config after that.
note the byte order of session config is the normal order.

Signed-off-by: Limin Wang 
---
  libavfilter/dnn/dnn_backend_tf.c | 34 ++
  1 file changed, 6 insertions(+), 28 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index fb799d2..0084157 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -28,6 +28,7 @@
  #include "dnn_backend_native_layer_conv2d.h"
  #include "dnn_backend_native_layer_depth2space.h"
  #include "libavformat/avio.h"
+#include "libavformat/internal.h"
  #include "libavutil/avassert.h"
  #include "../internal.h"
  #include "dnn_backend_native_layer_pad.h"
@@ -202,35 +203,21 @@ static DNNReturnType load_tf_model(TFModel *tf_model, 
const char *model_filename
  TF_SessionOptions *sess_opts;
  const TF_Operation *init_op;
  uint8_t *sess_config = NULL;
-int sess_config_length = 0;
+int sess_config_length = ff_hex_to_data(NULL, 
tf_model->ctx.options.sess_config + 2);


This is a lavf internal function. You can't use it here.

Please revert this patch, or make a copy of this function to be used in 
lavfi.


  
  // prepare the sess config data

  if (tf_model->ctx.options.sess_config != NULL) {
  /*
  tf_model->ctx.options.sess_config is hex to present the serialized 
proto
  required by TF_SetConfig below, so we need to first generate the 
serialized
-proto in a python script, the following is a script example to generate
-serialized proto which specifies one GPU, we can change the script to 
add
-more options.
-
-import tensorflow as tf
-gpu_options = tf.GPUOptions(visible_device_list='0')
-config = tf.ConfigProto(gpu_options=gpu_options)
-s = config.SerializeToString()
-b = ''.join("%02x" % int(ord(b)) for b in s[::-1])
-print('0x%s' % b)
-
-the script output looks like: 0xab...cd, and then pass 0xab...cd to 
sess_config.
+proto in a python script, tools/python/tf_sess_config.py is a script 
example
+to generate the configs of sess_config.
  */
-char tmp[3];
-tmp[2] = '\0';
-
  if (strncmp(tf_model->ctx.options.sess_config, "0x", 2) != 0) {
  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
  return DNN_ERROR;
  }
  
-sess_config_length = strlen(tf_model->ctx.options.sess_config);

  if (sess_config_length % 2 != 0) {
  av_log(ctx, AV_LOG_ERROR, "the length of sess_config is not even (%s), 
"
"please re-generate the config.\n",
@@ -238,21 +225,12 @@ static DNNReturnType load_tf_model(TFModel *tf_model, 
const char *model_filename
  return DNN_ERROR;
  }
  
-sess_config_length -= 2; //ignore the first '0x'

-sess_config_length /= 2; //get the data length in byte
-
-sess_config = av_malloc(sess_config_length);
+sess_config = av_mallocz(sess_config_length + 
AV_INPUT_BUFFER_PADDING_SIZE);
  if (!sess_config) {
  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
  return DNN_ERROR;
  }
-
-for (int i = 0; i < sess_config_length; i++) {
-int index = 2 + (sess_config_length - 1 - i) * 2;
-tmp[0] = tf_model->ctx.options.sess_config[index];
-tmp[1] = tf_model->ctx.options.sess_config[index + 1];
-sess_config[i] = strtol(tmp, NULL, 16);
-}
+ff_hex_to_data(sess_config, tf_model->ctx.options.sess_config + 2);
  }
  
  graph_def = read_graph(model_filename);




___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH 1/2] avformat: add data_size for ff_hex_to_data()

2021-05-06 Thread James Almer

On 5/6/2021 10:23 PM, lance.lmw...@gmail.com wrote:

From: Limin Wang 

This prevents OOM in case of data buffer size is insufficient.

Signed-off-by: Limin Wang 
---
  libavfilter/dnn/dnn_backend_tf.c | 4 ++--
  libavformat/hls.c| 2 +-
  libavformat/internal.h   | 6 --
  libavformat/rtpdec_latm.c| 4 ++--
  libavformat/rtpdec_mpeg4.c   | 4 ++--
  libavformat/utils.c  | 7 +--
  6 files changed, 16 insertions(+), 11 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 03fe310..4eb5bec 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -219,14 +219,14 @@ static DNNReturnType load_tf_model(TFModel *tf_model, 
const char *model_filename
  return DNN_ERROR;
  }
  config = tf_model->ctx.options.sess_config + 2;
-sess_config_length = ff_hex_to_data(NULL, config);
+sess_config_length = ff_hex_to_data(NULL, 0, config);
  
  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);

  if (!sess_config) {
  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
  return DNN_ERROR;
  }
-ff_hex_to_data(sess_config, config);
+ff_hex_to_data(sess_config, sess_config_length, config);


When did this function start being used in lavfi? It's internal to lavf, 
it can't be accessed here.


Was this not tested with a shared build of the libraries?


  }
  
  graph_def = read_graph(model_filename);

diff --git a/libavformat/hls.c b/libavformat/hls.c
index 584f658..c7f9f06 100644
--- a/libavformat/hls.c
+++ b/libavformat/hls.c
@@ -800,7 +800,7 @@ static int parse_playlist(HLSContext *c, const char *url,
  if (!strcmp(info.method, "SAMPLE-AES"))
  key_type = KEY_SAMPLE_AES;
  if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
-ff_hex_to_data(iv, info.iv + 2);
+ff_hex_to_data(iv, sizeof(iv), info.iv + 2);
  has_iv = 1;
  }
  av_strlcpy(key, info.uri, sizeof(key));
diff --git a/libavformat/internal.h b/libavformat/internal.h
index 7d0eab4..e0e625f 100644
--- a/libavformat/internal.h
+++ b/libavformat/internal.h
@@ -397,10 +397,12 @@ char *ff_data_to_hex(char *buf, const uint8_t *src, int 
size, int lowercase);
   * digits is ignored.
   *
   * @param data if non-null, the parsed data is written to this pointer
+ * @param data_size the data buffer size
   * @param p the string to parse
- * @return the number of bytes written (or to be written, if data is null)
+ * @return the number of bytes written (or to be written, if data is null),
+ * or a negative value in case data buffer size is insufficient.
   */
-int ff_hex_to_data(uint8_t *data, const char *p);
+int ff_hex_to_data(uint8_t *data, int data_size, const char *p);
  
  /**

   * Add packet to an AVFormatContext's packet_buffer list, determining its
diff --git a/libavformat/rtpdec_latm.c b/libavformat/rtpdec_latm.c
index 104a00a..c348cc8 100644
--- a/libavformat/rtpdec_latm.c
+++ b/libavformat/rtpdec_latm.c
@@ -91,7 +91,7 @@ static int latm_parse_packet(AVFormatContext *ctx, 
PayloadContext *data,
  
  static int parse_fmtp_config(AVStream *st, const char *value)

  {
-int len = ff_hex_to_data(NULL, value), i, ret = 0;
+int len = ff_hex_to_data(NULL, 0, value), i, ret = 0;
  GetBitContext gb;
  uint8_t *config;
  int audio_mux_version, same_time_framing, num_programs, num_layers;
@@ -100,7 +100,7 @@ static int parse_fmtp_config(AVStream *st, const char 
*value)
  config = av_mallocz(len + AV_INPUT_BUFFER_PADDING_SIZE);
  if (!config)
  return AVERROR(ENOMEM);
-ff_hex_to_data(config, value);
+ff_hex_to_data(config, len, value);
  init_get_bits(, config, len*8);
  audio_mux_version = get_bits(, 1);
  same_time_framing = get_bits(, 1);
diff --git a/libavformat/rtpdec_mpeg4.c b/libavformat/rtpdec_mpeg4.c
index 34c7950..540192c 100644
--- a/libavformat/rtpdec_mpeg4.c
+++ b/libavformat/rtpdec_mpeg4.c
@@ -112,11 +112,11 @@ static void close_context(PayloadContext *data)
  static int parse_fmtp_config(AVCodecParameters *par, const char *value)
  {
  /* decode the hexa encoded parameter */
-int len = ff_hex_to_data(NULL, value), ret;
+int len = ff_hex_to_data(NULL, 0, value), ret;
  
  if ((ret = ff_alloc_extradata(par, len)) < 0)

  return ret;
-ff_hex_to_data(par->extradata, value);
+ff_hex_to_data(par->extradata, par->extradata_size, value);
  return 0;
  }
  
diff --git a/libavformat/utils.c b/libavformat/utils.c

index 6c8b974..7085c28 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -4762,7 +4762,7 @@ char *ff_data_to_hex(char *buff, const uint8_t *src, int 
s, int lowercase)
  return buff;
  }
  
-int ff_hex_to_data(uint8_t *data, const char *p)

+int 

[FFmpeg-devel] [PATCH 1/2] avformat: add data_size for ff_hex_to_data()

2021-05-06 Thread lance . lmwang
From: Limin Wang 

This prevents OOM in case of data buffer size is insufficient.

Signed-off-by: Limin Wang 
---
 libavfilter/dnn/dnn_backend_tf.c | 4 ++--
 libavformat/hls.c| 2 +-
 libavformat/internal.h   | 6 --
 libavformat/rtpdec_latm.c| 4 ++--
 libavformat/rtpdec_mpeg4.c   | 4 ++--
 libavformat/utils.c  | 7 +--
 6 files changed, 16 insertions(+), 11 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 03fe310..4eb5bec 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -219,14 +219,14 @@ static DNNReturnType load_tf_model(TFModel *tf_model, 
const char *model_filename
 return DNN_ERROR;
 }
 config = tf_model->ctx.options.sess_config + 2;
-sess_config_length = ff_hex_to_data(NULL, config);
+sess_config_length = ff_hex_to_data(NULL, 0, config);
 
 sess_config = av_mallocz(sess_config_length + 
AV_INPUT_BUFFER_PADDING_SIZE);
 if (!sess_config) {
 av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
 return DNN_ERROR;
 }
-ff_hex_to_data(sess_config, config);
+ff_hex_to_data(sess_config, sess_config_length, config);
 }
 
 graph_def = read_graph(model_filename);
diff --git a/libavformat/hls.c b/libavformat/hls.c
index 584f658..c7f9f06 100644
--- a/libavformat/hls.c
+++ b/libavformat/hls.c
@@ -800,7 +800,7 @@ static int parse_playlist(HLSContext *c, const char *url,
 if (!strcmp(info.method, "SAMPLE-AES"))
 key_type = KEY_SAMPLE_AES;
 if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
-ff_hex_to_data(iv, info.iv + 2);
+ff_hex_to_data(iv, sizeof(iv), info.iv + 2);
 has_iv = 1;
 }
 av_strlcpy(key, info.uri, sizeof(key));
diff --git a/libavformat/internal.h b/libavformat/internal.h
index 7d0eab4..e0e625f 100644
--- a/libavformat/internal.h
+++ b/libavformat/internal.h
@@ -397,10 +397,12 @@ char *ff_data_to_hex(char *buf, const uint8_t *src, int 
size, int lowercase);
  * digits is ignored.
  *
  * @param data if non-null, the parsed data is written to this pointer
+ * @param data_size the data buffer size
  * @param p the string to parse
- * @return the number of bytes written (or to be written, if data is null)
+ * @return the number of bytes written (or to be written, if data is null),
+ * or a negative value in case data buffer size is insufficient.
  */
-int ff_hex_to_data(uint8_t *data, const char *p);
+int ff_hex_to_data(uint8_t *data, int data_size, const char *p);
 
 /**
  * Add packet to an AVFormatContext's packet_buffer list, determining its
diff --git a/libavformat/rtpdec_latm.c b/libavformat/rtpdec_latm.c
index 104a00a..c348cc8 100644
--- a/libavformat/rtpdec_latm.c
+++ b/libavformat/rtpdec_latm.c
@@ -91,7 +91,7 @@ static int latm_parse_packet(AVFormatContext *ctx, 
PayloadContext *data,
 
 static int parse_fmtp_config(AVStream *st, const char *value)
 {
-int len = ff_hex_to_data(NULL, value), i, ret = 0;
+int len = ff_hex_to_data(NULL, 0, value), i, ret = 0;
 GetBitContext gb;
 uint8_t *config;
 int audio_mux_version, same_time_framing, num_programs, num_layers;
@@ -100,7 +100,7 @@ static int parse_fmtp_config(AVStream *st, const char 
*value)
 config = av_mallocz(len + AV_INPUT_BUFFER_PADDING_SIZE);
 if (!config)
 return AVERROR(ENOMEM);
-ff_hex_to_data(config, value);
+ff_hex_to_data(config, len, value);
 init_get_bits(, config, len*8);
 audio_mux_version = get_bits(, 1);
 same_time_framing = get_bits(, 1);
diff --git a/libavformat/rtpdec_mpeg4.c b/libavformat/rtpdec_mpeg4.c
index 34c7950..540192c 100644
--- a/libavformat/rtpdec_mpeg4.c
+++ b/libavformat/rtpdec_mpeg4.c
@@ -112,11 +112,11 @@ static void close_context(PayloadContext *data)
 static int parse_fmtp_config(AVCodecParameters *par, const char *value)
 {
 /* decode the hexa encoded parameter */
-int len = ff_hex_to_data(NULL, value), ret;
+int len = ff_hex_to_data(NULL, 0, value), ret;
 
 if ((ret = ff_alloc_extradata(par, len)) < 0)
 return ret;
-ff_hex_to_data(par->extradata, value);
+ff_hex_to_data(par->extradata, par->extradata_size, value);
 return 0;
 }
 
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 6c8b974..7085c28 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -4762,7 +4762,7 @@ char *ff_data_to_hex(char *buff, const uint8_t *src, int 
s, int lowercase)
 return buff;
 }
 
-int ff_hex_to_data(uint8_t *data, const char *p)
+int ff_hex_to_data(uint8_t *data, int data_size, const char *p)
 {
 int c, len, v;
 
@@ -4781,8 +4781,11 @@ int ff_hex_to_data(uint8_t *data, const char *p)
 break;
 v = (v << 4) | c;
 if (v & 0x100) {
-if (data)
+if (data) {
+

[FFmpeg-devel] [PATCH] ffmpeg: return no chosen output if an uninitialized stream is unavailable

2021-05-06 Thread Jan Ekström
Otherwise the rate emulation logic in `transcode_step` never gets
hit, and the unavailability flag never gets reset, leading to an
eternal loop.

Fixes #9160
---
 fftools/ffmpeg.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index 3ad11452da..b3658d8f65 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -3958,7 +3958,7 @@ static OutputStream *choose_output(void)
 ost->st->index, ost->st->id, ost->initialized, 
ost->inputs_done, ost->finished);
 
 if (!ost->initialized && !ost->inputs_done)
-return ost;
+return ost->unavailable ? NULL : ost;
 
 if (!ost->finished && opts < opts_min) {
 opts_min = opts;
-- 
2.31.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH] ffmpeg: return no chosen output if an uninitialized stream is unavailable

2021-05-06 Thread Jan Ekström
On Fri, May 7, 2021 at 12:22 AM Jan Ekström  wrote:
>
> Otherwise the rate emulation logic in `transcode_step` never gets
> hit, and the unavailability flag never gets reset, leading to an
> eternal loop.
>
> Fixes #9160

Sent this out as one way of dealing with this and to receive comments.
Alternatively the logic in transcode_step could be modified to be:

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index b3658d8f65..4d93bf0e9b 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -4684,7 +4684,7 @@ static int transcode_step(void)
 int ret;

 ost = choose_output();
-if (!ost) {
+if (!ost || ost->unavailable) {
 if (got_eagain()) {
 reset_eagain();
 av_usleep(1);

Which I would expect to have a similar effect (have not tested yet).

Jan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH 3/5 v2] avformat: move AVStream.{parser, need_parsing} to AVStreamInternal

2021-05-06 Thread James Almer

On 5/6/2021 3:31 PM, Andreas Rheinhardt wrote:

James Almer:

Those are private fields, no reason to have them exposed in a public
header.

Signed-off-by: James Almer 
---
Now also porting the v4l2 outdev, which unfortunately requires an accessor.
If anyone with a v4l2 capable machine wants to check if not setting
need_parsing at all for h264 streams is an option, that'd be better.



Alternatively one could just move needs_parsing to the beginning of
AVStreamInternal.


That still ties the contents of AVStreamInternal to the ABI, even if a 
single field at the beginning, and the plan is to replace the 
AVCodecParserContext API altogether, which may include removing this 
field or even changing its semantics.


Once any change takes place, avpriv_stream_set_need_parsing() can be 
made into a no-op, or made to set a different field converting 
AVStreamParseType values as required.

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH 3/5 v2] avformat: move AVStream.{parser, need_parsing} to AVStreamInternal

2021-05-06 Thread Andreas Rheinhardt
James Almer:
> Those are private fields, no reason to have them exposed in a public
> header.
> 
> Signed-off-by: James Almer 
> ---
> Now also porting the v4l2 outdev, which unfortunately requires an accessor.
> If anyone with a v4l2 capable machine wants to check if not setting
> need_parsing at all for h264 streams is an option, that'd be better.
> 

Alternatively one could just move needs_parsing to the beginning of
AVStreamInternal.

>  libavdevice/v4l2.c   |   2 +-
>  libavformat/aacdec.c |   2 +-
>  libavformat/aadec.c  |   6 +-
>  libavformat/acm.c|   2 +-
>  libavformat/asfdec_f.c   |  10 +--
>  libavformat/av1dec.c |   2 +-
>  libavformat/avformat.h   |   4 --
>  libavformat/avidec.c |  16 ++---
>  libavformat/dtshddec.c   |   2 +-
>  libavformat/electronicarts.c |   2 +-
>  libavformat/flacdec.c|   2 +-
>  libavformat/flvdec.c |   4 +-
>  libavformat/gxf.c|   6 +-
>  libavformat/img2dec.c|   4 +-
>  libavformat/internal.h   |   6 ++
>  libavformat/ipudec.c |   2 +-
>  libavformat/iv8.c|   2 +-
>  libavformat/ivfdec.c |   2 +-
>  libavformat/lmlm4.c  |   4 +-
>  libavformat/loasdec.c|   2 +-
>  libavformat/lxfdec.c |   2 +-
>  libavformat/matroskadec.c|   6 +-
>  libavformat/mgsts.c  |   2 +-
>  libavformat/mov.c|  18 +++---
>  libavformat/mp3dec.c |   2 +-
>  libavformat/mpeg.c   |   2 +-
>  libavformat/mpegts.c |  14 ++---
>  libavformat/msf.c|   2 +-
>  libavformat/mtv.c|   2 +-
>  libavformat/mxfdec.c |   8 +--
>  libavformat/ncdec.c  |   2 +-
>  libavformat/nsvdec.c |   4 +-
>  libavformat/nuv.c|   2 +-
>  libavformat/oggparseflac.c   |   2 +-
>  libavformat/oggparseogm.c|   4 +-
>  libavformat/oggparsetheora.c |   2 +-
>  libavformat/oggparsevp8.c|   2 +-
>  libavformat/omadec.c |   2 +-
>  libavformat/pva.c|   4 +-
>  libavformat/rawdec.c |   4 +-
>  libavformat/rmdec.c  |   8 +--
>  libavformat/rtpdec_asf.c |   4 +-
>  libavformat/rtsp.c   |   2 +-
>  libavformat/s337m.c  |   3 +-
>  libavformat/sdr2.c   |   2 +-
>  libavformat/segafilm.c   |   2 +-
>  libavformat/swfdec.c |   2 +-
>  libavformat/takdec.c |   2 +-
>  libavformat/ty.c |   4 +-
>  libavformat/utils.c  | 118 ++-
>  libavformat/wavdec.c |   6 +-
>  libavformat/wtvdec.c |   2 +-
>  libavformat/xvag.c   |   2 +-
>  libavformat/xwma.c   |   2 +-
>  54 files changed, 168 insertions(+), 159 deletions(-)
> 
> diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c
> index a5149a9132..981b6207fb 100644
> --- a/libavdevice/v4l2.c
> +++ b/libavdevice/v4l2.c
> @@ -961,7 +961,7 @@ static int v4l2_read_header(AVFormatContext *ctx)
>  st->codecpar->codec_tag =
>  avcodec_pix_fmt_to_codec_tag(st->codecpar->format);
>  else if (codec_id == AV_CODEC_ID_H264) {
> -st->need_parsing = AVSTREAM_PARSE_FULL_ONCE;
> +avpriv_stream_set_need_parsing(st, AVSTREAM_PARSE_FULL_ONCE);
>  }
>  if (desired_format == V4L2_PIX_FMT_YVU420)
>  st->codecpar->codec_tag = MKTAG('Y', 'V', '1', '2');
> diff --git a/libavformat/aacdec.c b/libavformat/aacdec.c
> index ba468909e9..94e39f592f 100644
> --- a/libavformat/aacdec.c
> +++ b/libavformat/aacdec.c
> @@ -112,7 +112,7 @@ static int adts_aac_read_header(AVFormatContext *s)
>  
>  st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
>  st->codecpar->codec_id   = s->iformat->raw_codec_id;
> -st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
> +st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW;
>  
>  ff_id3v1_read(s);
>  if ((s->pb->seekable & AVIO_SEEKABLE_NORMAL) &&
> diff --git a/libavformat/aadec.c b/libavformat/aadec.c
> index 21051d79b8..9fe24d5d53 100644
> --- a/libavformat/aadec.c
> +++ b/libavformat/aadec.c
> @@ -183,7 +183,7 @@ static int aa_read_header(AVFormatContext *s)
>  if (!strcmp(codec_name, "mp332")) {
>  st->codecpar->codec_id = AV_CODEC_ID_MP3;
>  st->codecpar->sample_rate = 22050;
> -st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
> +st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW;
>  avpriv_set_pts_info(st, 64, 8, 32000 * TIMEPREC);
>  // encoded audio frame is MP3_FRAME_SIZE bytes (+1 with padding, 
> unlikely)
>  } else if (!strcmp(codec_name, "acelp85")) {
> @@ -192,7 +192,7 @@ static int aa_read_header(AVFormatContext *s)
>  st->codecpar->channels = 1;
>  st->codecpar->sample_rate = 8500;
>  st->codecpar->bit_rate = 8500;
> -st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
> +st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW;
>  avpriv_set_pts_info(st, 64, 8, 8500 * 

Re: [FFmpeg-devel] [PATCH 1/5] avformat: move AVStream.last-IP_{pts, duration} to AVStreamInternal

2021-05-06 Thread James Almer

On 5/3/2021 10:31 AM, James Almer wrote:

Those are private fields, no reason to have them exposed in a public
header.

Signed-off-by: James Almer 
---
  libavformat/avformat.h |  2 --
  libavformat/internal.h |  3 +++
  libavformat/nutdec.c   |  2 +-
  libavformat/utils.c| 20 ++--
  4 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/libavformat/avformat.h b/libavformat/avformat.h
index 624d2dae2c..e62c6d1567 100644
--- a/libavformat/avformat.h
+++ b/libavformat/avformat.h
@@ -1002,8 +1002,6 @@ typedef struct AVStream {
   */
  int64_t first_dts;
  int64_t cur_dts;
-int64_t last_IP_pts;
-int last_IP_duration;
  
  /**

   * Number of packets to buffer for codec probing
diff --git a/libavformat/internal.h b/libavformat/internal.h
index 7d0eab44ac..10d8f8dfeb 100644
--- a/libavformat/internal.h
+++ b/libavformat/internal.h
@@ -365,6 +365,9 @@ struct AVStreamInternal {
   * last packet in packet_buffer for this stream when muxing.
   */
  struct PacketList *last_in_packet_buffer;
+
+int64_t last_IP_pts;
+int last_IP_duration;
  };
  
  #ifdef __GNUC__

diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c
index 46f21ddd57..e709257135 100644
--- a/libavformat/nutdec.c
+++ b/libavformat/nutdec.c
@@ -1086,7 +1086,7 @@ static int decode_frame(NUTContext *nut, AVPacket *pkt, 
int frame_code)
  stc->skip_until_key_frame = 0;
  
  discard = s->streams[stream_id]->discard;

-last_IP_pts = s->streams[stream_id]->last_IP_pts;
+last_IP_pts = s->streams[stream_id]->internal->last_IP_pts;
  if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) ||
  (discard >= AVDISCARD_BIDIR  && last_IP_pts != AV_NOPTS_VALUE &&
   last_IP_pts > pts) ||
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 49bf19b2b0..b5b0995f32 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -1285,28 +1285,28 @@ static void compute_pkt_fields(AVFormatContext *s, 
AVStream *st,
  /* DTS = decompression timestamp */
  /* PTS = presentation timestamp */
  if (pkt->dts == AV_NOPTS_VALUE)
-pkt->dts = st->last_IP_pts;
+pkt->dts = st->internal->last_IP_pts;
  update_initial_timestamps(s, pkt->stream_index, pkt->dts, 
pkt->pts, pkt);
  if (pkt->dts == AV_NOPTS_VALUE)
  pkt->dts = st->cur_dts;
  
  /* This is tricky: the dts must be incremented by the duration

   * of the frame we are displaying, i.e. the last I- or P-frame. */
-if (st->last_IP_duration == 0 && (uint64_t)pkt->duration <= 
INT32_MAX)
-st->last_IP_duration = pkt->duration;
+if (st->internal->last_IP_duration == 0 && (uint64_t)pkt->duration 
<= INT32_MAX)
+st->internal->last_IP_duration = pkt->duration;
  if (pkt->dts != AV_NOPTS_VALUE)
-st->cur_dts = av_sat_add64(pkt->dts, st->last_IP_duration);
+st->cur_dts = av_sat_add64(pkt->dts, 
st->internal->last_IP_duration);
  if (pkt->dts != AV_NOPTS_VALUE &&
  pkt->pts == AV_NOPTS_VALUE &&
-st->last_IP_duration > 0 &&
+st->internal->last_IP_duration > 0 &&
  ((uint64_t)st->cur_dts - (uint64_t)next_dts + 1) <= 2 &&
  next_dts != next_pts &&
  next_pts != AV_NOPTS_VALUE)
  pkt->pts = next_dts;
  
  if ((uint64_t)pkt->duration <= INT32_MAX)

-st->last_IP_duration = pkt->duration;
-st->last_IP_pts  = pkt->pts;
+st->internal->last_IP_duration = pkt->duration;
+st->internal->last_IP_pts  = pkt->pts;
  /* Cannot compute PTS if not present (we can compute it only
   * by knowing the future. */
  } else if (pkt->pts != AV_NOPTS_VALUE ||
@@ -1823,7 +1823,7 @@ void ff_read_frame_flush(AVFormatContext *s)
  av_parser_close(st->parser);
  st->parser = NULL;
  }
-st->last_IP_pts = AV_NOPTS_VALUE;
+st->internal->last_IP_pts = AV_NOPTS_VALUE;
  st->internal->last_dts_for_order_check = AV_NOPTS_VALUE;
  if (st->first_dts == AV_NOPTS_VALUE)
  st->cur_dts = RELATIVE_TS_BASE;
@@ -2840,7 +2840,7 @@ skip_duration_calc:
  
  st  = ic->streams[i];

  st->cur_dts = st->first_dts;
-st->last_IP_pts = AV_NOPTS_VALUE;
+st->internal->last_IP_pts = AV_NOPTS_VALUE;
  st->internal->last_dts_for_order_check = AV_NOPTS_VALUE;
  for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
  st->internal->pts_buffer[j] = AV_NOPTS_VALUE;
@@ -4423,7 +4423,7 @@ AVStream *avformat_new_stream(AVFormatContext *s, const 
AVCodec *c)
  st->internal->pts_wrap_reference = AV_NOPTS_VALUE;
  st->internal->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
 

[FFmpeg-devel] [PATCHv2] fate/integer.c: Connect test to fuzzer

2021-05-06 Thread Vedaa
Hi,

I have made the requisite changes. 
---
 Makefile  |  2 ++
 libavutil/tests/integer.c | 21 ++-
 libavutil/tests/integer.h | 43 +++
 tools/Makefile|  3 +++
 tools/target_int_fuzzer.c | 35 +++
 5 files changed, 85 insertions(+), 19 deletions(-)
 create mode 100644 libavutil/tests/integer.h
 create mode 100644 tools/target_int_fuzzer.c

diff --git a/Makefile b/Makefile
index 7e9d8b08c3..92fe8cac65 100644
--- a/Makefile
+++ b/Makefile
@@ -62,6 +62,8 @@ tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o 
$(FF_DEP_LIBS)
 tools/target_io_dem_fuzzer$(EXESUF): tools/target_io_dem_fuzzer.o 
$(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) 
$(LIBFUZZER_PATH)
 
+tools/target_int_fuzzer$(EXESUF): tools/target_int_fuzzer.o $(FF_DEP_LIBS)
+   $(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) 
$(LIBFUZZER_PATH)
 
 tools/enum_options$(EXESUF): ELIBS = $(FF_EXTRALIBS)
 tools/enum_options$(EXESUF): $(FF_DEP_LIBS)
diff --git a/libavutil/tests/integer.c b/libavutil/tests/integer.c
index d2c8f2a903..1d28a3aa93 100644
--- a/libavutil/tests/integer.c
+++ b/libavutil/tests/integer.c
@@ -18,31 +18,14 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#include 
-
-#include "libavutil/avassert.h"
-#include "libavutil/integer.h"
-#include "libavutil/intmath.h"
+#include "libavutil/tests/integer.h"
 
 int main(void){
 int64_t a,b;
 
 for(a=7; a<256*256*256; a+=13215){
 for(b=3; b<256*256*256; b+=27118){
-AVInteger ai= av_int2i(a);
-AVInteger bi= av_int2i(b);
-
-av_assert0(av_i2int(ai) == a);
-av_assert0(av_i2int(bi) == b);
-av_assert0(av_i2int(av_add_i(ai,bi)) == a+b);
-av_assert0(av_i2int(av_sub_i(ai,bi)) == a-b);
-av_assert0(av_i2int(av_mul_i(ai,bi)) == a*b);
-av_assert0(av_i2int(av_shr_i(ai, 9)) == a>>9);
-av_assert0(av_i2int(av_shr_i(ai,-9)) == a<<9);
-av_assert0(av_i2int(av_shr_i(ai, 17)) == a>>17);
-av_assert0(av_i2int(av_shr_i(ai,-17)) == a<<17);
-av_assert0(av_log2_i(ai) == av_log2(a));
-av_assert0(av_i2int(av_div_i(ai,bi)) == a/b);
+TestInteger(a,b);
 }
 }
 return 0;
diff --git a/libavutil/tests/integer.h b/libavutil/tests/integer.h
new file mode 100644
index 00..4dbd239ac4
--- /dev/null
+++ b/libavutil/tests/integer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2004 Michael Niedermayer 
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include 
+
+#include "libavutil/avassert.h"
+#include "libavutil/integer.h"
+#include "libavutil/intmath.h"
+
+static inline void TestInteger(int64_t a, int64_t b)
+{
+AVInteger ai= av_int2i(a);
+AVInteger bi= av_int2i(b);
+
+av_assert0(av_i2int(ai) == a);
+av_assert0(av_i2int(bi) == b);
+av_assert0(av_i2int(av_add_i(ai,bi)) == a+b);
+av_assert0(av_i2int(av_sub_i(ai,bi)) == a-b);
+av_assert0(av_i2int(av_mul_i(ai,bi)) == a*b);
+av_assert0(av_i2int(av_shr_i(ai, 9)) == a>>9);
+av_assert0(av_i2int(av_shr_i(ai,-9)) == a<<9);
+av_assert0(av_i2int(av_shr_i(ai, 17)) == a>>17);
+av_assert0(av_i2int(av_shr_i(ai,-17)) == a<<17);
+av_assert0(av_log2_i(ai) == av_log2(a));
+av_assert0(av_i2int(av_div_i(ai,bi)) == a/b);
+}
diff --git a/tools/Makefile b/tools/Makefile
index 82baa8eadb..fde7f08984 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -17,6 +17,9 @@ tools/target_dem_fuzzer.o: tools/target_dem_fuzzer.c
 tools/target_io_dem_fuzzer.o: tools/target_dem_fuzzer.c
$(COMPILE_C) -DIO_FLAT=0
 
+tools/target_int_fuzzer.o: tools/target_int_fuzzer.c
+   $(COMPILE_C)
+
 OUTDIRS += tools
 
 clean::
diff --git a/tools/target_int_fuzzer.c b/tools/target_int_fuzzer.c
new file mode 100644
index 00..811f410b1a
--- /dev/null
+++ b/tools/target_int_fuzzer.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2004 Michael Niedermayer 
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of 

[FFmpeg-devel] [PATCH] fftools/ffmpeg: fix -t inaccurate recording time

2021-05-06 Thread Shiwang.Xie

if input start time is not 0 -t is inaccurate doing stream copy,
will record extra duration according to input start time.
it should base on following cases:

input video start time from 60s, duration is 300s,
1. stream copy:
   ffmpeg -ss 40 -t 60 -i in.mp4 -c copy -y out.mp4
   open_input_file() will seek to 100 and set ts_offset to -100,
   process_input() will offset pkt->pts with ts_offset to make it 0,
   so when do_streamcopy() with -t, exits when ist->pts >= recording_time.

2. stream copy with -copyts:
   ffmpeg -ss 40 -t 60 -copyts -i in.mp4 -c copy -y out.mp4
   open_input_file() will seek to 100 and set ts_offset to 0,
   process_input() will keep raw pkt->pts as ts_offset is 0,
   so when do_streamcopy() with -t, exits when
   ist->pts >= (recording_time+f->start_time+f->ctx->start_time).

3. stream copy with -copyts -start_at_zero:
   ffmpeg -ss 40 -t 60 -copyts -start_at_zero -i in.mp4 -c copy -y out.mp4
   open_input_file() will seek to 120 and set ts_offset to -60 as start_to_zero 
option,
   process_input() will offset pkt->pts with input file start time,
   so when do_streamcopy() with -t, exits when ist->pts >= 
(recording_time+f->start_time).

0  60 40  60 360
|___|_|___|___|
  start   -ss -t

This fixes ticket #9141.

Signed-off-by: Shiwang.Xie 
---
 fftools/ffmpeg.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index 3ad11452da..fac9f67462 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -2082,9 +2082,11 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
 }
 
 if (f->recording_time != INT64_MAX) {
-start_time = f->ctx->start_time;
-if (f->start_time != AV_NOPTS_VALUE && copy_ts)
-start_time += f->start_time;
+start_time = 0;
+if (copy_ts) {
+start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
+start_time += start_at_zero ? 0 : f->ctx->start_time;
+}
 if (ist->pts >= f->recording_time + start_time) {
 close_output_stream(ost);
 return;
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH 2/2] GSoC: Add guided filter

2021-05-06 Thread Xuewei Meng
This version can be executed by thread in slice level.

To-Do-List:

1. Fast guided filter

2. Improve the derain/dehaze/denoise performance of guided filter

Signed-off-by: Xuewei Meng <928826...@qq.com>
---
 doc/filters.texi |  21 +++
 libavfilter/Makefile |   1 +
 libavfilter/allfilters.c |   1 +
 libavfilter/vf_guided.c  | 426 +++
 4 files changed, 449 insertions(+)
 create mode 100644 libavfilter/vf_guided.c

diff --git a/doc/filters.texi b/doc/filters.texi
index 36e35a1..d027ce9 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -12918,6 +12918,27 @@ greyedge=difford=1:minknorm=0:sigma=2
 
 @end itemize
 
+@section guided filter
+Apply guided filter for dehazing, deraining and denoising.
+
+The filter accepts the following options:
+@table @option
+@item radius
+Set the radius in pixels.
+Allowed range is 1 to 20. Default is 3.
+
+@item eps
+Set regularization parameter.
+Allowed range is 0 to 1. Default is 0.2.
+
+@item planes
+Set planes to filter. Default is first only.
+@end table
+
+@subsection Commands
+
+This filter supports the all above options as @ref{commands}.
+
 @anchor{haldclut}
 @section haldclut
 
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 5a28736..7091508 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -284,6 +284,7 @@ OBJS-$(CONFIG_GBLUR_FILTER)  += vf_gblur.o
 OBJS-$(CONFIG_GEQ_FILTER)+= vf_geq.o
 OBJS-$(CONFIG_GRADFUN_FILTER)+= vf_gradfun.o
 OBJS-$(CONFIG_GRAPHMONITOR_FILTER)   += f_graphmonitor.o
+OBJS-$(CONFIG_GUIDED_FILTER) += vf_guided.o
 OBJS-$(CONFIG_GREYEDGE_FILTER)   += vf_colorconstancy.o
 OBJS-$(CONFIG_HALDCLUT_FILTER)   += vf_lut3d.o framesync.o
 OBJS-$(CONFIG_HFLIP_FILTER)  += vf_hflip.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 931d7db..962f656 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -270,6 +270,7 @@ extern const AVFilter ff_vf_geq;
 extern const AVFilter ff_vf_gradfun;
 extern const AVFilter ff_vf_graphmonitor;
 extern const AVFilter ff_vf_greyedge;
+extern const AVFilter ff_vf_guided;
 extern const AVFilter ff_vf_haldclut;
 extern const AVFilter ff_vf_hflip;
 extern const AVFilter ff_vf_histeq;
diff --git a/libavfilter/vf_guided.c b/libavfilter/vf_guided.c
new file mode 100644
index 000..9a8aee5
--- /dev/null
+++ b/libavfilter/vf_guided.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2021 Xuewei Meng
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in 
all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 
THE
+ * SOFTWARE.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "framesync.h"
+#include "internal.h"
+#include "video.h"
+
+typedef struct GuidedContext {
+const AVClass *class;
+FFFrameSync fs;
+
+int radius;
+float eps;
+
+int planes;
+
+int width;
+int height;
+
+int nb_planes;
+int depth;
+int planewidth[4];
+int planeheight[4];
+
+int (*box)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+} GuidedContext;
+
+#define OFFSET(x) offsetof(GuidedContext, x)
+#define FLAGS 
AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
+
+static const AVOption guided_options[] = {
+{ "radius", "set radius",   OFFSET(radius), AV_OPT_TYPE_INT,   
{.i64=3},   1,  20, FLAGS },
+{ "eps","set eps",  OFFSET(eps),AV_OPT_TYPE_FLOAT, 
{.dbl=0.2  }, 0.0,   1, FLAGS },
+{ "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT,   
{.i64=1},   0, 0xF, FLAGS },
+{ NULL }
+};
+
+typedef struct ThreadData {
+int width;
+int height;
+float *src;
+float *dst;
+int srcStride;
+int dstStride;
+} ThreadData;
+
+AVFILTER_DEFINE_CLASS(guided);
+
+static int box(AVFilterContext *ctx, void *arg, int jobnr, 

Re: [FFmpeg-devel] [PATCH] fftools/ffmpeg: fix -t inaccurate recording time

2021-05-06 Thread Gyan Doshi



On 2021-05-06 20:00, Shiwang Xie wrote:
Have tested several cases, will push after tomorrow if without 
objection, thanks.


Resend the patch inlined or attached with text/x-diff or text/x-patch 
mime type

so that patchwork picks it up and runs fate on it.

Regards,
Gyan



On Thu, 29 Apr 2021, Shiwang.Xie wrote:


if input start time is not 0 -t is inaccurate doing stream copy,
will record extra duration according to input start time.
it should base on following cases:

input video start time from 60s, duration is 300s,
1. stream copy:
  ffmpeg -ss 40 -t 60 -i in.mp4 -c copy -y out.mp4
  open_input_file() will seek to 100 and set ts_offset to -100,
  process_input() will offset pkt->pts with ts_offset to make it 0,
  so when do_streamcopy() with -t, exits when ist->pts >= 
recording_time.


2. stream copy with -copyts:
  ffmpeg -ss 40 -t 60 -copyts -i in.mp4 -c copy -y out.mp4
  open_input_file() will seek to 100 and set ts_offset to 0,
  process_input() will keep raw pkt->pts as ts_offset is 0,
  so when do_streamcopy() with -t, exits when
  ist->pts >= (recording_time+f->start_time+f->ctx->start_time).

3. stream copy with -copyts -start_at_zero:
  ffmpeg -ss 40 -t 60 -copyts -start_at_zero -i in.mp4 -c copy -y 
out.mp4
  open_input_file() will seek to 120 and set ts_offset to -60 as 
start_to_zero option,

  process_input() will offset pkt->pts with input file start time,
  so when do_streamcopy() with -t, exits when ist->pts >= 
(recording_time+f->start_time).


0  60 40  60 360
|___|_|___|___|
 start   -ss -t

This fixes ticket #9141.

Signed-off-by: Shiwang.Xie 
---
fftools/ffmpeg.c | 8 +---
1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index 3ad11452da..fac9f67462 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -2082,9 +2082,11 @@ static void do_streamcopy(InputStream *ist, 
OutputStream *ost, const AVPacket *p

    }

    if (f->recording_time != INT64_MAX) {
-    start_time = f->ctx->start_time;
-    if (f->start_time != AV_NOPTS_VALUE && copy_ts)
-    start_time += f->start_time;
+    start_time = 0;
+    if (copy_ts) {
+    start_time += f->start_time != AV_NOPTS_VALUE ? 
f->start_time : 0;

+    start_time += start_at_zero ? 0 : f->ctx->start_time;
+    }
    if (ist->pts >= f->recording_time + start_time) {
    close_output_stream(ost);
    return;
--
2.24.3 (Apple Git-128)



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH] fftools/ffmpeg: fix -t inaccurate recording time

2021-05-06 Thread Shiwang Xie
Have tested several cases, will push after tomorrow if without objection, 
thanks.


On Thu, 29 Apr 2021, Shiwang.Xie wrote:


if input start time is not 0 -t is inaccurate doing stream copy,
will record extra duration according to input start time.
it should base on following cases:

input video start time from 60s, duration is 300s,
1. stream copy:
  ffmpeg -ss 40 -t 60 -i in.mp4 -c copy -y out.mp4
  open_input_file() will seek to 100 and set ts_offset to -100,
  process_input() will offset pkt->pts with ts_offset to make it 0,
  so when do_streamcopy() with -t, exits when ist->pts >= recording_time.

2. stream copy with -copyts:
  ffmpeg -ss 40 -t 60 -copyts -i in.mp4 -c copy -y out.mp4
  open_input_file() will seek to 100 and set ts_offset to 0,
  process_input() will keep raw pkt->pts as ts_offset is 0,
  so when do_streamcopy() with -t, exits when
  ist->pts >= (recording_time+f->start_time+f->ctx->start_time).

3. stream copy with -copyts -start_at_zero:
  ffmpeg -ss 40 -t 60 -copyts -start_at_zero -i in.mp4 -c copy -y out.mp4
  open_input_file() will seek to 120 and set ts_offset to -60 as start_to_zero 
option,
  process_input() will offset pkt->pts with input file start time,
  so when do_streamcopy() with -t, exits when ist->pts >= 
(recording_time+f->start_time).

0  60 40  60 360
|___|_|___|___|
 start   -ss -t

This fixes ticket #9141.

Signed-off-by: Shiwang.Xie 
---
fftools/ffmpeg.c | 8 +---
1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index 3ad11452da..fac9f67462 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -2082,9 +2082,11 @@ static void do_streamcopy(InputStream *ist, OutputStream 
*ost, const AVPacket *p
}

if (f->recording_time != INT64_MAX) {
-start_time = f->ctx->start_time;
-if (f->start_time != AV_NOPTS_VALUE && copy_ts)
-start_time += f->start_time;
+start_time = 0;
+if (copy_ts) {
+start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
+start_time += start_at_zero ? 0 : f->ctx->start_time;
+}
if (ist->pts >= f->recording_time + start_time) {
close_output_stream(ost);
return;
--
2.24.3 (Apple Git-128)



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH] avformat/utils: constrain the guaranteed lifetime of the pointer returned by avformat_index_get_entry()

2021-05-06 Thread James Almer

On 4/8/2021 2:21 PM, James Almer wrote:

This will give us more room to improve the implementation later.

Suggested-by: Anton Khirnov 
Signed-off-by: James Almer 
---
  libavformat/avformat.h | 12 ++--
  libavformat/utils.c|  4 ++--
  2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/libavformat/avformat.h b/libavformat/avformat.h
index 8600ee1bf7..8b49871cdc 100644
--- a/libavformat/avformat.h
+++ b/libavformat/avformat.h
@@ -2770,10 +2770,10 @@ int avformat_index_get_entries_count(const AVStream 
*st);
   * @return A pointer to the requested AVIndexEntry if it exists, NULL 
otherwise.
   *
   * @note The pointer returned by this function is only guaranteed to be valid
- *   until any function that could alter the stream or the AVFormatContext
- *   that contains it is called.
+ *   until any function that takes the stream or the parent AVFormatContext
+ *   as input argument is called.
   */
-const AVIndexEntry *avformat_index_get_entry(const AVStream *st, int idx);
+const AVIndexEntry *avformat_index_get_entry(AVStream *st, int idx);
  
  /**

   * Get the AVIndexEntry corresponding to the given timestamp.
@@ -2787,10 +2787,10 @@ const AVIndexEntry *avformat_index_get_entry(const 
AVStream *st, int idx);
   * @return A pointer to the requested AVIndexEntry if it exists, NULL 
otherwise.
   *
   * @note The pointer returned by this function is only guaranteed to be valid
- *   until any function that could alter the stream or the AVFormatContext
- *   that contains it is called.
+ *   until any function that takes the stream or the parent AVFormatContext
+ *   as input argument is called.
   */
-const AVIndexEntry *avformat_index_get_entry_from_timestamp(const AVStream *st,
+const AVIndexEntry *avformat_index_get_entry_from_timestamp(AVStream *st,
  int64_t 
wanted_timestamp,
  int flags);
  /**
diff --git a/libavformat/utils.c b/libavformat/utils.c
index d9971d7fd3..3ea34fa042 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -2169,7 +2169,7 @@ int avformat_index_get_entries_count(const AVStream *st)
  return st->internal->nb_index_entries;
  }
  
-const AVIndexEntry *avformat_index_get_entry(const AVStream *st, int idx)

+const AVIndexEntry *avformat_index_get_entry(AVStream *st, int idx)
  {
  if (idx < 0 || idx >= st->internal->nb_index_entries)
  return NULL;
@@ -2177,7 +2177,7 @@ const AVIndexEntry *avformat_index_get_entry(const 
AVStream *st, int idx)
  return >internal->index_entries[idx];
  }
  
-const AVIndexEntry *avformat_index_get_entry_from_timestamp(const AVStream *st,

+const AVIndexEntry *avformat_index_get_entry_from_timestamp(AVStream *st,
  int64_t 
wanted_timestamp,
  int flags)
  {


Will apply.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


Re: [FFmpeg-devel] [PATCH V4] [mips] Optimize H264 decoding for MIPS platform.

2021-05-06 Thread 殷时友

> 2021年4月12日 下午11:37,Shiyou Yin  写道:
> 
> v2: Fixed a build error in [PATCH 2/5].
> v3: add patch 4/5.
> v4: Fix bug in 2/5 caused by instruction 'lhu' on BIGENDIAN environment.
> 
> [PATCH v4 1/5] avcodec/mips: Restore the initialization sequence of
> [PATCH v4 2/5] avcodec/mips: Refine get_cabac_inline_mips.
> [PATCH v4 3/5] avcodec/mips: Optimize function 
> ff_h264_loop_filter_strength_msa.
> [PATCH v4 4/5] avcodec/mips: Refine ff_h264_h_lpf_luma_inter_msa
> [PATCH v4 5/5] mips: Fix potential illegal instruction error.
> 
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Ping.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH V2 4/4] dnn/vf_dnn_detect: add tensorflow output parse support

2021-05-06 Thread Ting Fu
Testing model is tensorflow offical model in github repo, please refer
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md
to download the detect model as you need.
For example, local testing was carried on with 
'ssd_mobilenet_v2_coco_2018_03_29.tar.gz', and
used one image of dog in
https://github.com/tensorflow/models/blob/master/research/object_detection/test_images/image1.jpg

Testing command is:
./ffmpeg -i image1.jpg -vf 
dnn_detect=dnn_backend=tensorflow:input=image_tensor:output=\
"num_detections_scores_classes_boxes":model=ssd_mobilenet_v2_coco.pb,\
showinfo -f null -

We will see the result similar as below:
[Parsed_showinfo_1 @ 0x33e65f0]   side data - detection bounding boxes:
[Parsed_showinfo_1 @ 0x33e65f0] source: ssd_mobilenet_v2_coco.pb
[Parsed_showinfo_1 @ 0x33e65f0] index: 0,   region: (382, 60) -> (1005, 
593), label: 18, confidence: 9834/1.
[Parsed_showinfo_1 @ 0x33e65f0] index: 1,   region: (12, 8) -> (328, 549), 
label: 18, confidence: 8555/1.
[Parsed_showinfo_1 @ 0x33e65f0] index: 2,   region: (293, 7) -> (682, 458), 
label: 1, confidence: 8033/1.
[Parsed_showinfo_1 @ 0x33e65f0] index: 3,   region: (342, 0) -> (690, 325), 
label: 1, confidence: 5878/1.

There are two boxes of dog with cores 94.05% & 93.45% and two boxes of person 
with scores 80.33% & 58.78%.

Signed-off-by: Ting Fu 
---
 libavfilter/vf_dnn_detect.c | 95 -
 1 file changed, 94 insertions(+), 1 deletion(-)

diff --git a/libavfilter/vf_dnn_detect.c b/libavfilter/vf_dnn_detect.c
index 7d39acb653..818b53a052 100644
--- a/libavfilter/vf_dnn_detect.c
+++ b/libavfilter/vf_dnn_detect.c
@@ -48,6 +48,9 @@ typedef struct DnnDetectContext {
 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
 static const AVOption dnn_detect_options[] = {
 { "dnn_backend", "DNN backend",OFFSET(backend_type), 
AV_OPT_TYPE_INT,   { .i64 = 2 },INT_MIN, INT_MAX, FLAGS, "backend" },
+#if (CONFIG_LIBTENSORFLOW == 1)
+{ "tensorflow",  "tensorflow backend flag",0,
AV_OPT_TYPE_CONST, { .i64 = 1 },0, 0, FLAGS, "backend" },
+#endif
 #if (CONFIG_LIBOPENVINO == 1)
 { "openvino","openvino backend flag",  0,
AV_OPT_TYPE_CONST, { .i64 = 2 },0, 0, FLAGS, "backend" },
 #endif
@@ -59,7 +62,7 @@ static const AVOption dnn_detect_options[] = {
 
 AVFILTER_DEFINE_CLASS(dnn_detect);
 
-static int dnn_detect_post_proc(AVFrame *frame, DNNData *output, uint32_t nb, 
AVFilterContext *filter_ctx)
+static int dnn_detect_post_proc_ov(AVFrame *frame, DNNData *output, 
AVFilterContext *filter_ctx)
 {
 DnnDetectContext *ctx = filter_ctx->priv;
 float conf_threshold = ctx->confidence;
@@ -136,6 +139,96 @@ static int dnn_detect_post_proc(AVFrame *frame, DNNData 
*output, uint32_t nb, AV
 return 0;
 }
 
+static int dnn_detect_post_proc_tf(AVFrame *frame, DNNData *output, 
AVFilterContext *filter_ctx)
+{
+DnnDetectContext *ctx = filter_ctx->priv;
+int proposal_count;
+float conf_threshold = ctx->confidence;
+float *conf, *position, *label_id, x0, y0, x1, y1;
+int nb_bboxes = 0;
+AVFrameSideData *sd;
+AVDetectionBBox *bbox;
+AVDetectionBBoxHeader *header;
+
+proposal_count = *(float *)(output[0].data);
+conf   = output[1].data;
+position   = output[3].data;
+label_id   = output[2].data;
+
+sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DETECTION_BBOXES);
+if (sd) {
+av_log(filter_ctx, AV_LOG_ERROR, "already have dnn bounding boxes in 
side data.\n");
+return -1;
+}
+
+for (int i = 0; i < proposal_count; ++i) {
+if (conf[i] < conf_threshold)
+continue;
+nb_bboxes++;
+}
+
+if (nb_bboxes == 0) {
+av_log(filter_ctx, AV_LOG_VERBOSE, "nothing detected in this 
frame.\n");
+return 0;
+}
+
+header = av_detection_bbox_create_side_data(frame, nb_bboxes);
+if (!header) {
+av_log(filter_ctx, AV_LOG_ERROR, "failed to create side data with %d 
bounding boxes\n", nb_bboxes);
+return -1;
+}
+
+av_strlcpy(header->source, ctx->dnnctx.model_filename, 
sizeof(header->source));
+
+for (int i = 0; i < proposal_count; ++i) {
+y0 = position[i * 4];
+x0 = position[i * 4 + 1];
+y1 = position[i * 4 + 2];
+x1 = position[i * 4 + 3];
+
+bbox = av_get_detection_bbox(header, i);
+
+if (conf[i] < conf_threshold) {
+continue;
+}
+
+bbox->x = (int)(x0 * frame->width);
+bbox->w = (int)(x1 * frame->width) - bbox->x;
+bbox->y = (int)(y0 * frame->height);
+bbox->h = (int)(y1 * frame->height) - bbox->y;
+
+bbox->detect_confidence = av_make_q((int)(conf[i] * 1), 1);
+bbox->classify_count = 0;
+
+if (ctx->labels && label_id[i] < ctx->label_count) {
+  

[FFmpeg-devel] [PATCH V2 3/4] lavfi/dnn_backend_tensorflow: support detect model

2021-05-06 Thread Ting Fu
Signed-off-by: Ting Fu 
---
 libavfilter/dnn/dnn_backend_tf.c | 39 ++--
 libavfilter/vf_dnn_detect.c  | 32 +-
 2 files changed, 63 insertions(+), 8 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index b6b1812cd9..622b5a8464 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -793,15 +793,40 @@ static DNNReturnType execute_model_tf(const DNNModel 
*model, const char *input_n
 outputs[i].data = TF_TensorData(output_tensors[i]);
 outputs[i].dt = TF_TensorType(output_tensors[i]);
 }
-if (do_ioproc) {
-if (tf_model->model->frame_post_proc != NULL) {
-tf_model->model->frame_post_proc(out_frame, outputs, 
tf_model->model->filter_ctx);
+switch (model->func_type) {
+case DFT_PROCESS_FRAME:
+//it only support 1 output if it's frame in & frame out
+if (do_ioproc) {
+if (tf_model->model->frame_post_proc != NULL) {
+tf_model->model->frame_post_proc(out_frame, outputs, 
tf_model->model->filter_ctx);
+} else {
+ff_proc_from_dnn_to_frame(out_frame, outputs, ctx);
+}
 } else {
-ff_proc_from_dnn_to_frame(out_frame, outputs, ctx);
+out_frame->width = outputs[0].width;
+out_frame->height = outputs[0].height;
+}
+break;
+case DFT_ANALYTICS_DETECT:
+if (!model->detect_post_proc) {
+av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post 
proc\n");
+return DNN_ERROR;
+}
+model->detect_post_proc(out_frame, outputs, nb_output, 
model->filter_ctx);
+break;
+default:
+for (uint32_t i = 0; i < nb_output; ++i) {
+if (output_tensors[i]) {
+TF_DeleteTensor(output_tensors[i]);
+}
 }
-} else {
-out_frame->width = outputs[0].width;
-out_frame->height = outputs[0].height;
+TF_DeleteTensor(input_tensor);
+av_freep(_tensors);
+av_freep(_outputs);
+av_freep();
+
+av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this 
kind of dnn filter now\n");
+return DNN_ERROR;
 }
 
 for (uint32_t i = 0; i < nb_output; ++i) {
diff --git a/libavfilter/vf_dnn_detect.c b/libavfilter/vf_dnn_detect.c
index 1dbe4f29a4..7d39acb653 100644
--- a/libavfilter/vf_dnn_detect.c
+++ b/libavfilter/vf_dnn_detect.c
@@ -203,10 +203,40 @@ static int read_detect_label_file(AVFilterContext 
*context)
 return 0;
 }
 
+static int check_output_nb(DnnDetectContext *ctx, DNNBackendType backend_type, 
int output_nb)
+{
+switch(backend_type) {
+case DNN_TF:
+if (output_nb != 4) {
+av_log(ctx, AV_LOG_ERROR, "Only support tensorflow detect model 
with 4 outputs, \
+   but get %d instead\n", output_nb);
+return AVERROR(EINVAL);
+}
+return 0;
+case DNN_OV:
+if (output_nb != 1) {
+av_log(ctx, AV_LOG_ERROR, "Dnn detect filter with openvino backend 
needs 1 output only, \
+   but get %d instead\n", output_nb);
+return AVERROR(EINVAL);
+}
+return 0;
+default:
+avpriv_report_missing_feature(ctx, "Dnn detect filter does not support 
current backend\n");
+return AVERROR(EINVAL);
+}
+return 0;
+}
+
 static av_cold int dnn_detect_init(AVFilterContext *context)
 {
 DnnDetectContext *ctx = context->priv;
-int ret = ff_dnn_init(>dnnctx, DFT_ANALYTICS_DETECT, context);
+DnnContext *dnn_ctx = >dnnctx;
+int ret;
+
+ret = ff_dnn_init(>dnnctx, DFT_ANALYTICS_DETECT, context);
+if (ret < 0)
+return ret;
+ret = check_output_nb(ctx, dnn_ctx->backend_type, dnn_ctx->nb_outputs);
 if (ret < 0)
 return ret;
 ff_dnn_set_detect_post_proc(>dnnctx, dnn_detect_post_proc);
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".


[FFmpeg-devel] [PATCH V2 2/4] lavfi/dnn_backend_tensorflow: add multiple outputs support

2021-05-06 Thread Ting Fu
Signed-off-by: Ting Fu 
---
 libavfilter/dnn/dnn_backend_tf.c | 49 ++---
 libavfilter/dnn_filter_common.c  | 53 ++--
 libavfilter/dnn_filter_common.h  |  6 ++--
 libavfilter/vf_derain.c  |  2 +-
 libavfilter/vf_sr.c  |  2 +-
 5 files changed, 75 insertions(+), 37 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 45da29ae70..b6b1812cd9 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -155,7 +155,7 @@ static DNNReturnType get_input_tf(void *model, DNNData 
*input, const char *input
 TF_DeleteStatus(status);
 
 // currently only NHWC is supported
-av_assert0(dims[0] == 1);
+av_assert0(dims[0] == 1 || dims[0] == -1);
 input->height = dims[1];
 input->width = dims[2];
 input->channels = dims[3];
@@ -707,7 +707,7 @@ static DNNReturnType execute_model_tf(const DNNModel 
*model, const char *input_n
 TF_Output *tf_outputs;
 TFModel *tf_model = model->model;
 TFContext *ctx = _model->ctx;
-DNNData input, output;
+DNNData input, *outputs;
 TF_Tensor **output_tensors;
 TF_Output tf_input;
 TF_Tensor *input_tensor;
@@ -738,14 +738,6 @@ static DNNReturnType execute_model_tf(const DNNModel 
*model, const char *input_n
 }
 }
 
-if (nb_output != 1) {
-// currently, the filter does not need multiple outputs,
-// so we just pending the support until we really need it.
-TF_DeleteTensor(input_tensor);
-avpriv_report_missing_feature(ctx, "multiple outputs");
-return DNN_ERROR;
-}
-
 tf_outputs = av_malloc_array(nb_output, sizeof(*tf_outputs));
 if (tf_outputs == NULL) {
 TF_DeleteTensor(input_tensor);
@@ -785,23 +777,31 @@ static DNNReturnType execute_model_tf(const DNNModel 
*model, const char *input_n
 return DNN_ERROR;
 }
 
+outputs = av_malloc_array(nb_output, sizeof(*outputs));
+if (!outputs) {
+TF_DeleteTensor(input_tensor);
+av_freep(_outputs);
+av_freep(_tensors);
+av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n"); 
\
+return DNN_ERROR;
+}
+
 for (uint32_t i = 0; i < nb_output; ++i) {
-output.height = TF_Dim(output_tensors[i], 1);
-output.width = TF_Dim(output_tensors[i], 2);
-output.channels = TF_Dim(output_tensors[i], 3);
-output.data = TF_TensorData(output_tensors[i]);
-output.dt = TF_TensorType(output_tensors[i]);
-
-if (do_ioproc) {
-if (tf_model->model->frame_post_proc != NULL) {
-tf_model->model->frame_post_proc(out_frame, , 
tf_model->model->filter_ctx);
-} else {
-ff_proc_from_dnn_to_frame(out_frame, , ctx);
-}
+outputs[i].height = TF_Dim(output_tensors[i], 1);
+outputs[i].width = TF_Dim(output_tensors[i], 2);
+outputs[i].channels = TF_Dim(output_tensors[i], 3);
+outputs[i].data = TF_TensorData(output_tensors[i]);
+outputs[i].dt = TF_TensorType(output_tensors[i]);
+}
+if (do_ioproc) {
+if (tf_model->model->frame_post_proc != NULL) {
+tf_model->model->frame_post_proc(out_frame, outputs, 
tf_model->model->filter_ctx);
 } else {
-out_frame->width = output.width;
-out_frame->height = output.height;
+ff_proc_from_dnn_to_frame(out_frame, outputs, ctx);
 }
+} else {
+out_frame->width = outputs[0].width;
+out_frame->height = outputs[0].height;
 }
 
 for (uint32_t i = 0; i < nb_output; ++i) {
@@ -812,6 +812,7 @@ static DNNReturnType execute_model_tf(const DNNModel 
*model, const char *input_n
 TF_DeleteTensor(input_tensor);
 av_freep(_tensors);
 av_freep(_outputs);
+av_freep();
 return DNN_SUCCESS;
 }
 
diff --git a/libavfilter/dnn_filter_common.c b/libavfilter/dnn_filter_common.c
index 52c7a5392a..0ed0ac2e30 100644
--- a/libavfilter/dnn_filter_common.c
+++ b/libavfilter/dnn_filter_common.c
@@ -17,6 +17,39 @@
  */
 
 #include "dnn_filter_common.h"
+#include "libavutil/avstring.h"
+
+#define MAX_SUPPORTED_OUTPUTS_NB 4
+
+static char **separate_output_names(const char *expr, const char *val_sep, int 
*separated_nb)
+{
+char *val, **parsed_vals = NULL;
+int val_num = 0;
+if (!expr || !val_sep || !separated_nb) {
+return NULL;
+}
+
+parsed_vals = av_mallocz_array(MAX_SUPPORTED_OUTPUTS_NB, 
sizeof(*parsed_vals));
+if (!parsed_vals) {
+return NULL;
+}
+
+do {
+val = av_get_token(, val_sep);
+if(val) {
+parsed_vals[val_num] = val;
+val_num++;
+}
+if (*expr) {
+expr++;
+}
+} while(*expr);
+
+parsed_vals[val_num] = NULL;
+*separated_nb = val_num;
+
+return parsed_vals;
+}
 
 int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, 

[FFmpeg-devel] [PATCH V2 1/4] dnn: add DCO_RGB color order to enum DNNColorOrder

2021-05-06 Thread Ting Fu
Adding DCO_RGB color order to DNNColorOrder, since tensorflow model
needs this kind of color oder as input.

Signed-off-by: Ting Fu 
---
V2:
Rebase patch to latest code

 libavfilter/dnn/dnn_backend_tf.c |  1 +
 libavfilter/dnn/dnn_io_proc.c| 14 +++---
 libavfilter/dnn_interface.h  |  1 +
 3 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 03fe310b03..45da29ae70 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -143,6 +143,7 @@ static DNNReturnType get_input_tf(void *model, DNNData 
*input, const char *input
 
 tf_output.index = 0;
 input->dt = TF_OperationOutputType(tf_output);
+input->order = DCO_RGB;
 
 status = TF_NewStatus();
 TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
diff --git a/libavfilter/dnn/dnn_io_proc.c b/libavfilter/dnn/dnn_io_proc.c
index 5f60d68078..1e2bef3f9a 100644
--- a/libavfilter/dnn/dnn_io_proc.c
+++ b/libavfilter/dnn/dnn_io_proc.c
@@ -168,11 +168,19 @@ static DNNReturnType 
proc_from_frame_to_dnn_frameprocessing(AVFrame *frame, DNND
 
 static enum AVPixelFormat get_pixel_format(DNNData *data)
 {
-if (data->dt == DNN_UINT8 && data->order == DCO_BGR) {
-return AV_PIX_FMT_BGR24;
+if (data->dt == DNN_UINT8) {
+switch (data->order) {
+case DCO_BGR:
+return AV_PIX_FMT_BGR24;
+case DCO_RGB:
+return AV_PIX_FMT_RGB24;
+default:
+av_assert0(!"unsupported data pixel format.\n");
+return AV_PIX_FMT_BGR24;
+}
 }
 
-av_assert0(!"not supported yet.\n");
+av_assert0(!"unsupported data type.\n");
 return AV_PIX_FMT_BGR24;
 }
 
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index 799244ee14..5e9ffeb077 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -39,6 +39,7 @@ typedef enum {DNN_FLOAT = 1, DNN_UINT8 = 4} DNNDataType;
 typedef enum {
 DCO_NONE,
 DCO_BGR,
+DCO_RGB,
 } DNNColorOrder;
 
 typedef enum {
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".