[FFmpeg-devel] [ffmpeg-devel][GSoC][PATCH 1/2] libavfilter/vf_colorconstancy.c : Cleanup code for new filter

2020-07-01 Thread Yatendra Singh
Signed-off-by: Yatendra Singh 
---
 libavfilter/vf_colorconstancy.c | 46 ++---
 1 file changed, 13 insertions(+), 33 deletions(-)

diff --git a/libavfilter/vf_colorconstancy.c b/libavfilter/vf_colorconstancy.c
index eae62204b5..d974317a48 100644
--- a/libavfilter/vf_colorconstancy.c
+++ b/libavfilter/vf_colorconstancy.c
@@ -552,32 +552,6 @@ static void normalize_light(double *light)
 }
 }
 
-/**
- * Redirects to corresponding algorithm estimation function and performs 
normalization
- * after estimation.
- *
- * @param ctx the filter context.
- * @param in frame to perfrom estimation on.
- *
- * @return 0 in case of success, a negative value corresponding to an
- * AVERROR code in case of failure.
- */
-static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
-{
-ColorConstancyContext *s = ctx->priv;
-int ret;
-
-ret = filter_grey_edge(ctx, in);
-
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-normalize_light(s->white);
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f 
%f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-
-return ret;
-}
-
 /**
  * Performs simple correction via diagonal transformation model.
  *
@@ -682,12 +656,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 int ret;
 int direct = 0;
 
-ret = illumination_estimation(ctx, in);
-if (ret) {
-av_frame_free(&in);
-return ret;
-}
-
 if (av_frame_is_writable(in)) {
 direct = 1;
 out = in;
@@ -699,7 +667,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 }
 av_frame_copy_props(out, in);
 }
-chromatic_adaptation(ctx, in, out);
+
+if (!strcmp(ctx->filter->name, GREY_EDGE)) {
+ColorConstancyContext *s = ctx->priv;
+ret = filter_grey_edge(ctx, in);
+
+normalize_light(s->white);
+
+if (ret) {
+av_frame_free(&in);
+return ret;
+}
+chromatic_adaptation(ctx, in, out);
+}
 
 if (!direct)
 av_frame_free(&in);
-- 
2.20.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [ffmpeg-devel][GSoC][PATCH 2/2] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

2020-07-01 Thread Yatendra Singh
Signed-off-by: Yatendra Singh 
---
 doc/filters.texi|  36 ++
 libavfilter/Makefile|   1 +
 libavfilter/allfilters.c|   1 +
 libavfilter/vf_colorconstancy.c | 220 
 4 files changed, 258 insertions(+)

diff --git a/doc/filters.texi b/doc/filters.texi
index 85a511b205..2946b5b9e6 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -20250,6 +20250,42 @@ 
separatefields,select=eq(mod(n,4),0)+eq(mod(n,4),3),weave
 @end example
 @end itemize
 
+@section weighted_greyedge
+Apply the color constancy filter which estimates illumination and updates the
+image colors accordingly.
+
+It accepts the following options:
+
+@table @option
+@item minknorm
+The Minkowski parameter to be used for calculating the Minkowski distance. Must
+be chosen in the range [0,20] and default value is 1. Set to 0 for getting
+max value instead of calculating Minkowski distance.
+
+@item sigma
+The standard deviation of Gaussian blur to be applied on the scene. Must be
+chosen in the range [0,1024.0] and default value = 1. floor( @var{sigma} * 
break_off_sigma(3) )
+can't be equal to 0 if @var{difford} is greater than 0.
+
+@item min_err
+The error angle between the estimated illumination and white light at which 
the algorithm stops even
+if it hasn't reached the required number of iterations @code{max_iters}. Must 
be chosen in the range
+[0.02,PI] radians with default of 0.1.
+
+@item max_iters
+The number of iterations at which the algorithm stops even if it hasn't 
reached the required
+error angle between the estimated illumination and white light @code{min_err}. 
Must be chosen in the
+range [1,100] with a default value of 10.
+
+@item kappa
+The power which is applied to the spectral weights to change the impact of 
weights on illuminant 
+estimation @code{kappa}. Must be chosen in the range [1,25] with a default 
value of 10.
+@end table
+
+@example
+ffmpeg -i mondrian.tif -vf 
"weighted_greyedge=minknorm=0:sigma=1:max_iters=50:min_err=0.02:kappa=10" 
mondrian_out.tif
+@end example
+
 @section xbr
 Apply the xBR high-quality magnification filter which is designed for pixel
 art. It follows a set of edge-detection rules, see
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 994a4172a3..6973452f8d 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -453,6 +453,7 @@ OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o 
framesync.o
 OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
 OBJS-$(CONFIG_WAVEFORM_FILTER)   += vf_waveform.o
 OBJS-$(CONFIG_WEAVE_FILTER)  += vf_weave.o
+OBJS-$(CONFIG_WEIGHTED_GREYEDGE_FILTER)  += vf_colorconstancy.o
 OBJS-$(CONFIG_XBR_FILTER)+= vf_xbr.o
 OBJS-$(CONFIG_XFADE_FILTER)  += vf_xfade.o
 OBJS-$(CONFIG_XFADE_OPENCL_FILTER)   += vf_xfade_opencl.o opencl.o 
opencl/xfade.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index f2a44b0090..ad2e07f9c5 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -432,6 +432,7 @@ extern AVFilter ff_vf_vstack;
 extern AVFilter ff_vf_w3fdif;
 extern AVFilter ff_vf_waveform;
 extern AVFilter ff_vf_weave;
+extern AVFilter ff_vf_weighted_greyedge;
 extern AVFilter ff_vf_xbr;
 extern AVFilter ff_vf_xfade;
 extern AVFilter ff_vf_xfade_opencl;
diff --git a/libavfilter/vf_colorconstancy.c b/libavfilter/vf_colorconstancy.c
index d974317a48..cd9d992fdd 100644
--- a/libavfilter/vf_colorconstancy.c
+++ b/libavfilter/vf_colorconstancy.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018 Mina Sami
+ * Copyright (c) 2020 Yatendra Singh
  *
  * This file is part of FFmpeg.
  *
@@ -26,6 +27,14 @@
  *
  * @cite
  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
+ *
+ * @cite
+ * J. van de Weijer, Th. Gevers, and J. Geusebroek,
+ * “Edge and corner detection by photometric quasi-invariants”.
+ *
+ * @cite
+ * A. Gijsenij, Th. Gevers, J. van de Weijer,
+ * "Improving Color Constancy by Photometric Edge Weighting".
  */
 
 #include "libavutil/imgutils.h"
@@ -40,8 +49,10 @@
 #include 
 
 #define GREY_EDGE "greyedge"
+#define WEIGHTED_GREY_EDGE "weighted_greyedge"
 
 #define SQRT3 1.73205080757
+#define NORMAL_WHITE 1/SQRT3
 
 #define NUM_PLANES3
 #define MAX_DIFF_ORD  2
@@ -77,12 +88,16 @@ typedef struct ColorConstancyContext {
 
 int difford;
 int minknorm; /**< @minknorm = 0 : getMax instead */
+int kappa;
 double sigma;
 
 int nb_threads;
 int planeheight[4];
 int planewidth[4];
 
+double min_err;
+int max_iters;
+
 int filtersize;
 double *gauss[MAX_DIFF_ORD+1];
 
@@ -608,6 +623,170 @@ static void chromatic_adaptation(AVFilterContext *ctx, 
AVFrame *in, AVFrame *out
 ctx->internal->execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
 }
 
+/**
+ * Slice function for weighted grey edge algorithm that

Re: [FFmpeg-devel] [PATCH 2/2] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

2020-07-01 Thread Yatendra Singh
>
> If you are setting initial estimation to normal white, does it make any
>> difference to do a chromatic adaptation?
>>
>  I was trying to reproduce the same values as the official code in matlab
> and hence this was done.
>  It might provide a very minor improvement in speed but having an estimate
> beforehand might help converge faster.
>  I would update the next patch with chromatic adaptation.
>
 Sorry, I kind of misread this part while replying earlier.
 I was following the algorithm which was laid out in the paper which does
chromatic adaptation as the first step in the loop.
 So according to the algorithm it first performs correction based on the
initial estimate and then calculates the weights and new value of estimated
light.

>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] [PATCH 2/2] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

2020-06-30 Thread Yatendra Singh
>
> > +s->difford = 1;
>
> Why are you forcing this value?
>
The algorithm only uses first order differentials for the calculation of
the specular variant and it does not seem to be an option to use other
orders as far as I have understood from the paper and the official code.

>
> If you are setting initial estimation to normal white, does it make any
> difference to do a chromatic adaptation?
>
 I was trying to reproduce the same values as the official code in matlab
and hence this was done.
 It might provide a very minor improvement in speed but having an estimate
beforehand might help converge faster.
 I would update the next patch with chromatic adaptation.

>
> > +
> > +static const AVOption weighted_greyedge_options[] = {
> > +{ "minknorm",  "set Minkowski norm", OFFSET(minknorm),
> AV_OPT_TYPE_INT,{.i64=1},   0,20, FLAGS },
> > +{ "sigma", "set sigma",  OFFSET(sigma),
>  AV_OPT_TYPE_DOUBLE, {.dbl=1},   0.0,  1024.0, FLAGS },
> > +{ "min_err",   "set minimum angular error",  OFFSET(min_err),
>  AV_OPT_TYPE_DOUBLE, {.dbl=0.1}, 0.02, M_PI,   FLAGS },
> > +{ "max_iters", "set the maximum iterations", OFFSET(max_iters),
> AV_OPT_TYPE_INT,{.i64=10},  1,100,FLAGS },
> > +{ "kappa", "set the kappa for weights",  OFFSET(kappa),
>  AV_OPT_TYPE_INT,{.i64=10},  1,25,FLAGS },
>
> Why 25?
>
 Honestly, I have no idea as to what the actual upper limit on the power of
weights should be, so I used a random upper limit while testing.
 What would you suggest I should use?


I would also update the new patch with all the rest of the changes.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH 1/2] libavfilter/vf_colorconstancy.c : Cleanup code for new filter

2020-06-18 Thread Yatendra Singh
Signed-off-by: Yatendra Singh 
---
 libavfilter/vf_colorconstancy.c | 47 ++---
 1 file changed, 14 insertions(+), 33 deletions(-)

diff --git a/libavfilter/vf_colorconstancy.c b/libavfilter/vf_colorconstancy.c
index eae62204b5..d36400bd35 100644
--- a/libavfilter/vf_colorconstancy.c
+++ b/libavfilter/vf_colorconstancy.c
@@ -552,32 +552,6 @@ static void normalize_light(double *light)
 }
 }
 
-/**
- * Redirects to corresponding algorithm estimation function and performs 
normalization
- * after estimation.
- *
- * @param ctx the filter context.
- * @param in frame to perfrom estimation on.
- *
- * @return 0 in case of success, a negative value corresponding to an
- * AVERROR code in case of failure.
- */
-static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
-{
-ColorConstancyContext *s = ctx->priv;
-int ret;
-
-ret = filter_grey_edge(ctx, in);
-
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-normalize_light(s->white);
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f 
%f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-
-return ret;
-}
-
 /**
  * Performs simple correction via diagonal transformation model.
  *
@@ -682,12 +656,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 int ret;
 int direct = 0;
 
-ret = illumination_estimation(ctx, in);
-if (ret) {
-av_frame_free(&in);
-return ret;
-}
-
 if (av_frame_is_writable(in)) {
 direct = 1;
 out = in;
@@ -699,7 +667,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 }
 av_frame_copy_props(out, in);
 }
-chromatic_adaptation(ctx, in, out);
+
+if(!strcmp(ctx->filter->name, GREY_EDGE))
+{
+ColorConstancyContext *s = ctx->priv;
+ret = filter_grey_edge(ctx, in);
+
+normalize_light(s->white);
+
+if (ret) {
+av_frame_free(&in);
+return ret;
+}
+chromatic_adaptation(ctx, in, out);
+}
 
 if (!direct)
 av_frame_free(&in);
-- 
2.20.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] [PATCH 2/2] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

2020-06-18 Thread Yatendra Singh
Signed-off-by: Yatendra Singh 
---
 doc/filters.texi|  36 ++
 libavfilter/Makefile|   1 +
 libavfilter/allfilters.c|   1 +
 libavfilter/vf_colorconstancy.c | 215 
 4 files changed, 253 insertions(+)

diff --git a/doc/filters.texi b/doc/filters.texi
index 85a511b205..2946b5b9e6 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -20250,6 +20250,42 @@ 
separatefields,select=eq(mod(n,4),0)+eq(mod(n,4),3),weave
 @end example
 @end itemize
 
+@section weighted_greyedge
+Apply the color constancy filter which estimates illumination and updates the
+image colors accordingly.
+
+It accepts the following options:
+
+@table @option
+@item minknorm
+The Minkowski parameter to be used for calculating the Minkowski distance. Must
+be chosen in the range [0,20] and default value is 1. Set to 0 for getting
+max value instead of calculating Minkowski distance.
+
+@item sigma
+The standard deviation of Gaussian blur to be applied on the scene. Must be
+chosen in the range [0,1024.0] and default value = 1. floor( @var{sigma} * 
break_off_sigma(3) )
+can't be equal to 0 if @var{difford} is greater than 0.
+
+@item min_err
+The error angle between the estimated illumination and white light at which 
the algorithm stops even
+if it hasn't reached the required number of iterations @code{max_iters}. Must 
be chosen in the range
+[0.02,PI] radians with default of 0.1.
+
+@item max_iters
+The number of iterations at which the algorithm stops even if it hasn't 
reached the required
+error angle between the estimated illumination and white light @code{min_err}. 
Must be chosen in the
+range [1,100] with a default value of 10.
+
+@item kappa
+The power which is applied to the spectral weights to change the impact of 
weights on illuminant 
+estimation @code{kappa}. Must be chosen in the range [1,25] with a default 
value of 10.
+@end table
+
+@example
+ffmpeg -i mondrian.tif -vf 
"weighted_greyedge=minknorm=0:sigma=1:max_iters=50:min_err=0.02:kappa=10" 
mondrian_out.tif
+@end example
+
 @section xbr
 Apply the xBR high-quality magnification filter which is designed for pixel
 art. It follows a set of edge-detection rules, see
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 994a4172a3..6973452f8d 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -453,6 +453,7 @@ OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o 
framesync.o
 OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
 OBJS-$(CONFIG_WAVEFORM_FILTER)   += vf_waveform.o
 OBJS-$(CONFIG_WEAVE_FILTER)  += vf_weave.o
+OBJS-$(CONFIG_WEIGHTED_GREYEDGE_FILTER)  += vf_colorconstancy.o
 OBJS-$(CONFIG_XBR_FILTER)+= vf_xbr.o
 OBJS-$(CONFIG_XFADE_FILTER)  += vf_xfade.o
 OBJS-$(CONFIG_XFADE_OPENCL_FILTER)   += vf_xfade_opencl.o opencl.o 
opencl/xfade.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index f2a44b0090..ad2e07f9c5 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -432,6 +432,7 @@ extern AVFilter ff_vf_vstack;
 extern AVFilter ff_vf_w3fdif;
 extern AVFilter ff_vf_waveform;
 extern AVFilter ff_vf_weave;
+extern AVFilter ff_vf_weighted_greyedge;
 extern AVFilter ff_vf_xbr;
 extern AVFilter ff_vf_xfade;
 extern AVFilter ff_vf_xfade_opencl;
diff --git a/libavfilter/vf_colorconstancy.c b/libavfilter/vf_colorconstancy.c
index d36400bd35..e2e32b7ca3 100644
--- a/libavfilter/vf_colorconstancy.c
+++ b/libavfilter/vf_colorconstancy.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018 Mina Sami
+ * Copyright (c) 2020 Yatendra Singh
  *
  * This file is part of FFmpeg.
  *
@@ -26,6 +27,14 @@
  *
  * @cite
  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
+ *
+ * @cite
+ * J. van de Weijer, Th. Gevers, and J. Geusebroek,
+ * “Edge and corner detection by photometric quasi-invariants”.
+ *
+ * @cite
+ * A. Gijsenij, Th. Gevers, J. van de Weijer,
+ * "Improving Color Constancy by Photometric Edge Weighting".
  */
 
 #include "libavutil/imgutils.h"
@@ -40,8 +49,10 @@
 #include 
 
 #define GREY_EDGE "greyedge"
+#define WEIGHTED_GREY_EDGE "weighted_greyedge"
 
 #define SQRT3 1.73205080757
+#define NORMAL_WHITE 1/SQRT3
 
 #define NUM_PLANES3
 #define MAX_DIFF_ORD  2
@@ -77,12 +88,16 @@ typedef struct ColorConstancyContext {
 
 int difford;
 int minknorm; /**< @minknorm = 0 : getMax instead */
+int kappa;
 double sigma;
 
 int nb_threads;
 int planeheight[4];
 int planewidth[4];
 
+double min_err;
+int max_iters;
+
 int filtersize;
 double *gauss[MAX_DIFF_ORD+1];
 
@@ -608,6 +623,162 @@ static void chromatic_adaptation(AVFilterContext *ctx, 
AVFrame *in, AVFrame *out
 ctx->internal->execute(ctx, diagonal_transformation, &td, NULL, nb_jobs);
 }
 
+/**
+ * Slice function for weighted grey edge algorithm that

Re: [FFmpeg-devel] [PATCH] [GSOC] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

2020-04-18 Thread YATENDRA SINGH
>
> Not for the functions, rather for the use of the filter and its
> options. See doc/filters.texi.
>
I have updated the documentation accordingly.

Regards,
Yatendra Singh.

On Fri, Apr 17, 2020 at 12:51 PM Moritz Barsnick  wrote:

> On Thu, Apr 16, 2020 at 18:39:58 +0530, YATENDRA SINGH wrote:
> > > As Michael noted, please resend without broken like feeds. I can't read
> > > most of the diff the way it is now.
> > >
> > Sorry but I could not understand what broken by newlines mean. Can you
> > explain a little bit further?
>
> Your mailer introduced line wraps, making it impossible to apply the
> patch, and difficult to read.
>
> See what it looks like here:
> http://ffmpeg.org/pipermail/ffmpeg-devel/2020-April/260651.html
> (E.g. scroll to the bottom and check what stuff should be on one line,
> but isn't.)
>
> > > Documentation update missing (and eventually changelog).
> > >
> > Is this documentation supposed to be different from the autogenerated one
> > for the functions that I have placed?
>
> Not for the functions, rather for the use of the filter and its
> options. See doc/filters.texi.
>
> > Also git send-email is not working for some reason and I am always stuck
> on
> > the same SMTP error, so I have attached the patch updated based on the
> > suggestions.
>
> Yes, this patch is not corrupted anymore.
>
> Cheers,
> Moritz
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
From 465d4e4dff91167a362aa8f84ee4116cb7bd79c3 Mon Sep 17 00:00:00 2001
From: Yatendra Singh 
Date: Sat, 18 Apr 2020 13:31:03 +0530
Subject: [PATCH] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

Signed-off-by: Yatendra Singh 
---
 doc/filters.texi|  34 
 libavfilter/Makefile|   1 +
 libavfilter/allfilters.c|   1 +
 libavfilter/vf_colorconstancy.c | 266 +++-
 4 files changed, 267 insertions(+), 35 deletions(-)

diff --git a/doc/filters.texi b/doc/filters.texi
index a4f99ef376..bde1dd3ce3 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -20070,6 +20070,40 @@ separatefields,select=eq(mod(n,4),0)+eq(mod(n,4),3),weave
 @end example
 @end itemize
 
+@section weighted_greyedge
+Apply the color constancy filter which estimates illumination and updates the
+image colors accordingly.
+
+It accepts the following options:
+
+@table @option
+@item difford
+The order of differentiation to be applied on the scene. Must be chosen in the range
+[0,2] and default value is 1.
+
+@item minknorm
+The Minkowski parameter to be used for calculating the Minkowski distance. Must
+be chosen in the range [0,20] and default value is 1. Set to 0 for getting
+max value instead of calculating Minkowski distance.
+
+@item sigma
+The standard deviation of Gaussian blur to be applied on the scene. Must be
+chosen in the range [0,1024.0] and default value = 1. floor( @var{sigma} * break_off_sigma(3) )
+can't be equal to 0 if @var{difford} is greater than 0.
+
+@item min_err
+The minimum angular error at which the algoritm breaks off even if it has not reached the
+required number of iterations. Must be chosen in the range [0.02,PI] radians with default of 0.1.
+
+@item max_iters
+The maximum number of iterations the algorithm performs before giving the output. Must be in the
+range [1,100] with a default value of 10.
+@end table
+
+@example
+ffmpeg -i 1.tif -vf "weighted_greyedge=difford=1:minknorm=2:sigma=2:max_iters=50" 1o.tif
+@end example
+
 @section xbr
 Apply the xBR high-quality magnification filter which is designed for pixel
 art. It follows a set of edge-detection rules, see
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index ecbc628868..ba546c32b0 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -448,6 +448,7 @@ OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o
 OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
 OBJS-$(CONFIG_WAVEFORM_FILTER)   += vf_waveform.o
 OBJS-$(CONFIG_WEAVE_FILTER)  += vf_weave.o
+OBJS-$(CONFIG_WEIGHTED_GREYEDGE_FILTER)  += vf_colorconstancy.o
 OBJS-$(CONFIG_XBR_FILTER)+= vf_xbr.o
 OBJS-$(CONFIG_XFADE_FILTER)  += vf_xfade.o
 OBJS-$(CONFIG_XFADE_OPENCL_FILTER)   += vf_xfade_opencl.o opencl.o opencl/xfade.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index fb32bef788..da2adbed21 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -427,6 +427,7 @@ extern AVFilter ff_vf_vstack;
 extern AVFilter ff_vf_w3fdif;
 extern AVFilter ff_vf_waveform

Re: [FFmpeg-devel] [PATCH] [GSOC] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

2020-04-16 Thread YATENDRA SINGH
>
> As Michael noted, please resend without broken like feeds. I can't read
> most of the diff the way it is now.
>
> Sorry but I could not understand what broken by newlines mean. Can you
explain a little bit further?


> Documentation update missing (and eventually changelog).
>
Is this documentation supposed to be different from the autogenerated one
for the functions that I have placed?


> > +#if CONFIG_WEIGHTED_GREYEDGE_FILTER
> Shouldn't sections of your code also be disabled if this is not set,
> not only the options? And intermingled with #if CONFIG_GREYEDGE_FILTER?
>
> The specific parts of the code activated based on the filter name, and if
the filter is not set, wouldn't it be impossible to call that filter?

Also git send-email is not working for some reason and I am always stuck on
the same SMTP error, so I have attached the patch updated based on the
suggestions.
Please point out any mistakes I may have made as I do not have much
experience in submitting work through patches.

Regards,
Yatendra Singh.
From c19098133770e4ed59372d8f57fdc871723ac52c Mon Sep 17 00:00:00 2001
From: Yatendra Singh 
Date: Thu, 16 Apr 2020 18:22:55 +0530
Subject: [PATCH] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

Signed-off-by: Yatendra Singh 
---
 libavfilter/Makefile|   1 +
 libavfilter/allfilters.c|   1 +
 libavfilter/vf_colorconstancy.c | 265 +++-
 3 files changed, 232 insertions(+), 35 deletions(-)

diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index ecbc628868..ba546c32b0 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -448,6 +448,7 @@ OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o
 OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
 OBJS-$(CONFIG_WAVEFORM_FILTER)   += vf_waveform.o
 OBJS-$(CONFIG_WEAVE_FILTER)  += vf_weave.o
+OBJS-$(CONFIG_WEIGHTED_GREYEDGE_FILTER)  += vf_colorconstancy.o
 OBJS-$(CONFIG_XBR_FILTER)+= vf_xbr.o
 OBJS-$(CONFIG_XFADE_FILTER)  += vf_xfade.o
 OBJS-$(CONFIG_XFADE_OPENCL_FILTER)   += vf_xfade_opencl.o opencl.o opencl/xfade.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index fb32bef788..da2adbed21 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -427,6 +427,7 @@ extern AVFilter ff_vf_vstack;
 extern AVFilter ff_vf_w3fdif;
 extern AVFilter ff_vf_waveform;
 extern AVFilter ff_vf_weave;
+extern AVFilter ff_vf_weighted_greyedge;
 extern AVFilter ff_vf_xbr;
 extern AVFilter ff_vf_xfade;
 extern AVFilter ff_vf_xfade_opencl;
diff --git a/libavfilter/vf_colorconstancy.c b/libavfilter/vf_colorconstancy.c
index eae62204b5..27a2fc264e 100644
--- a/libavfilter/vf_colorconstancy.c
+++ b/libavfilter/vf_colorconstancy.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018 Mina Sami
+ * Copyright (c) 2020 Yatendra Singh
  *
  * This file is part of FFmpeg.
  *
@@ -26,6 +27,14 @@
  *
  * @cite
  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
+ *
+ * @cite
+ * J. van de Weijer, Th. Gevers, and J. Geusebroek,
+ * “Edge and corner detection by photometric quasi-invariants”.
+ *
+ * @cite
+ * A. Gijsenij, Th. Gevers, J. van de Weijer,
+ * "Improving Color Constancy by Photometric Edge Weighting".
  */
 
 #include "libavutil/imgutils.h"
@@ -40,8 +49,10 @@
 #include 
 
 #define GREY_EDGE "greyedge"
+#define WEIGHTED_GREY_EDGE "weighted_greyedge"
 
 #define SQRT3 1.73205080757
+#define NORAMAL_WHITE 1/SQRT3
 
 #define NUM_PLANES3
 #define MAX_DIFF_ORD  2
@@ -83,6 +94,11 @@ typedef struct ColorConstancyContext {
 int planeheight[4];
 int planewidth[4];
 
+double min_err;
+int max_iters;
+
+double *weight_info[2];
+
 int filtersize;
 double *gauss[MAX_DIFF_ORD+1];
 
@@ -552,32 +568,6 @@ static void normalize_light(double *light)
 }
 }
 
-/**
- * Redirects to corresponding algorithm estimation function and performs normalization
- * after estimation.
- *
- * @param ctx the filter context.
- * @param in frame to perfrom estimation on.
- *
- * @return 0 in case of success, a negative value corresponding to an
- * AVERROR code in case of failure.
- */
-static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
-{
-ColorConstancyContext *s = ctx->priv;
-int ret;
-
-ret = filter_grey_edge(ctx, in);
-
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-normalize_light(s->white);
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization= %f %f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-
-return ret;
-}
-
 /**
  * Performs simple correction via diagonal transformation model.
  *
@@ -634,6 +624,162 @@ static void chromatic_adaptation(AVFilterCon

[FFmpeg-devel] [PATCH] [GSOC] libavfilter/vf_colorconstancy.c : Adding weighted greyedge

2020-04-14 Thread YATENDRA SINGH
Signed-off-by: Yatendra Singh 
---
 libavfilter/Makefile|   1 +
 libavfilter/allfilters.c|   1 +
 libavfilter/vf_colorconstancy.c | 265 +++-
 3 files changed, 232 insertions(+), 35 deletions(-)

diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index e6cfcd9487..c90aadae98 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -447,6 +447,7 @@ OBJS-$(CONFIG_VSTACK_FILTER) +=
vf_stack.o framesync.o
 OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
 OBJS-$(CONFIG_WAVEFORM_FILTER)   += vf_waveform.o
 OBJS-$(CONFIG_WEAVE_FILTER)  += vf_weave.o
+OBJS-$(CONFIG_WEIGHTED_GREYEDGE_FILTER)  += vf_colorconstancy.o
 OBJS-$(CONFIG_XBR_FILTER)+= vf_xbr.o
 OBJS-$(CONFIG_XFADE_FILTER)  += vf_xfade.o
 OBJS-$(CONFIG_XFADE_OPENCL_FILTER)   += vf_xfade_opencl.o opencl.o
opencl/xfade.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 501e5d041b..594b497f0b 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -426,6 +426,7 @@ extern AVFilter ff_vf_vstack;
 extern AVFilter ff_vf_w3fdif;
 extern AVFilter ff_vf_waveform;
 extern AVFilter ff_vf_weave;
+extern AVFilter ff_vf_weighted_greyedge;
 extern AVFilter ff_vf_xbr;
 extern AVFilter ff_vf_xfade;
 extern AVFilter ff_vf_xfade_opencl;
diff --git a/libavfilter/vf_colorconstancy.c
b/libavfilter/vf_colorconstancy.c
index eae62204b5..49508ae65e 100644
--- a/libavfilter/vf_colorconstancy.c
+++ b/libavfilter/vf_colorconstancy.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018 Mina Sami
+ * Copyright (c) 2020 Yatendra Singh
  *
  * This file is part of FFmpeg.
  *
@@ -26,6 +27,14 @@
  *
  * @cite
  * J. van de Weijer, Th. Gevers, A. Gijsenij "Edge-Based Color Constancy".
+ *
+ * @cite
+ * J. van de Weijer, Th. Gevers, and J. Geusebroek,
+ * “Edge and corner detection by photometric quasi-invariants”.
+ *
+ * @cite
+ * A. Gijsenij, Th. Gevers, J. van de Weijer,
+ * "Improving Color Constancy by Photometric Edge Weighting".
  */

 #include "libavutil/imgutils.h"
@@ -40,8 +49,10 @@
 #include 

 #define GREY_EDGE "greyedge"
+#define WEIGHTED_GREY_EDGE "weighted_greyedge"

 #define SQRT3 1.73205080757
+#define NORAMAL_WHITE 1/SQRT3

 #define NUM_PLANES3
 #define MAX_DIFF_ORD  2
@@ -83,6 +94,11 @@ typedef struct ColorConstancyContext {
 int planeheight[4];
 int planewidth[4];

+double min_err;
+int max_iters;
+
+double *weight_info[2];
+
 int filtersize;
 double *gauss[MAX_DIFF_ORD+1];

@@ -552,32 +568,6 @@ static void normalize_light(double *light)
 }
 }

-/**
- * Redirects to corresponding algorithm estimation function and performs
normalization
- * after estimation.
- *
- * @param ctx the filter context.
- * @param in frame to perfrom estimation on.
- *
- * @return 0 in case of success, a negative value corresponding to an
- * AVERROR code in case of failure.
- */
-static int illumination_estimation(AVFilterContext *ctx, AVFrame *in)
-{
-ColorConstancyContext *s = ctx->priv;
-int ret;
-
-ret = filter_grey_edge(ctx, in);
-
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination= %f %f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-normalize_light(s->white);
-av_log(ctx, AV_LOG_DEBUG, "Estimated illumination after normalization=
%f %f %f\n",
-   s->white[0], s->white[1], s->white[2]);
-
-return ret;
-}
-
 /**
  * Performs simple correction via diagonal transformation model.
  *
@@ -634,6 +624,162 @@ static void chromatic_adaptation(AVFilterContext
*ctx, AVFrame *in, AVFrame *out
 ctx->internal->execute(ctx, diagonal_transformation, &td, NULL,
nb_jobs);
 }

+/**
+ * Slice function for weighted grey edge algorithm that does partial
summing/maximizing
+ * of gaussian derivatives.
+ *
+ * @param ctx the filter context.
+ * @param arg data to be passed between threads.
+ * @param jobnr current job nubmer.
+ * @param nb_jobs total number of jobs.
+ *
+ * @return 0.
+ */
+static int filter_slice_weighted_greyedge(AVFilterContext* ctx, void* arg,
int jobnr, int nb_jobs)
+{
+ColorConstancyContext *s = ctx->priv;
+ThreadData *td = arg;
+AVFrame *in= td->in;
+int minknorm   = s->minknorm;
+const uint8_t thresh = 255;
+int plane;
+
+int height_max  = FFMAX3(s->planeheight[0], s->planeheight[1],
s->planeheight[2]);
+int width_max   = FFMAX3(s->planewidth[0], s->planewidth[1],
s->planewidth[2]);
+
+memset(s->weight_info[0], 0, height_max * width_max * sizeof(double));
+memset(s->weight_info[1], 0, height_max * width_max * sizeof(double));
+
+for (plane = 0; plane < NUM_PLANES; plane++)
+{
+const int height= s->planeheight[plane];
+const int width = s->planewidth[plane];
+const int in_line

Re: [FFmpeg-devel] Regarding GSoC 2020 project proposal

2020-03-04 Thread YATENDRA SINGH
Thank you for explaining the procedure.
I have posted my own project proposal on the page you had instructed me to.
Looking forward to the feedback.

Regards,
Yatendra Singh.

On Tue, Mar 3, 2020 at 10:19 PM Pedro Arthur  wrote:

> Hi
>
> Em ter., 3 de mar. de 2020 às 09:24, YATENDRA SINGH
>  escreveu:
> >
> > Hi,
> > I am a third year CSE student at the Indian Institute of Technology
> Bhilai,
> > and would like to contribute to ffmpeg this year. I have
> > relevant experience with Machine Learning and would like to work on
> > improving the video frame interpolation already implemented. With such a
> > plethora of great Machine Learning Algorithms being published every year
> at
> > prestigious conferences I would aim to read the relevant academic papers
> > and implement the best suited technique for the task. For example, Depth
> > Aware Video Frame Interpolation (DAIN CVPR-2019) is supposedly the state
> of
> > the art method on Vimeo90k and MiddleBury
> > <https://paperswithcode.com/task/video-frame-interpolation> but at the
> same
> > time Frame Interpolation with Generative Adversarial Network(FIGAN), uses
> > not CNN but multi-scale synthesis( MS ) to get higher speeds.
> > Looking forward to hearing from you soon.
> >
> > Yatendra SIngh
> > Frame Interpolation with Multi-Scale Deep Loss Functions and Generative
> > Adversarial NetworksFrame Interpolation with Multi-Scale Deep Loss
> > Functions and Generative Adversarial NetworksFrame Interpolation with
> > Multi-Scale Deep Loss Functions and Generative Adversarial Networks
>
> I suppose this project is your own idea as it is not listed in the
> projects page, right?
>
> I think it would be good to add you idea under "Your Own Project Idea"
> section in [1] adding as much information as possible so that we can
> evaluate your idea and possible assign a mentor / backup mentor.
> A few things I think are important to evaluate your project are:
> *have a well defined "expected result", will it be a filter? or
> something else? we already have a dnn module and a dnn_processing
> filter, will your project be using it?
>
> *what is the amount of work that will be done during the project, more
> or less this is related to above "expected result"
>
> *define a qualification task, we can discuss it after the above is define
>
> *sell your idea (not strictly necessary but may help evaluating your
> project), why is it useful feature to have, what improvements it
> brings, etc
>
> [1] -
> https://trac.ffmpeg.org/wiki/SponsoringPrograms/GSoC/2020#YourOwnProjectIdea
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Re: [FFmpeg-devel] Regarding GSoC 2020 project proposal

2020-03-03 Thread YATENDRA SINGH
Sorry for unintended bold words ending the previous mail.

On Tue, Mar 3, 2020 at 5:53 PM YATENDRA SINGH 
wrote:

> Hi,
> I am a third year CSE student at the Indian Institute of Technology
> Bhilai, and would like to contribute to ffmpeg this year. I have
> relevant experience with Machine Learning and would like to work on
> improving the video frame interpolation already implemented. With such a
> plethora of great Machine Learning Algorithms being published every year at
> prestigious conferences I would aim to read the relevant academic papers
> and implement the best suited technique for the task. For example, Depth
> Aware Video Frame Interpolation (DAIN CVPR-2019) is supposedly the state of
> the art method on Vimeo90k and MiddleBury
> <https://paperswithcode.com/task/video-frame-interpolation> but at the
> same time Frame Interpolation with Generative Adversarial Network(FIGAN),
> uses not CNN but multi-scale synthesis( MS ) to get higher speeds.
> Looking forward to hearing from you soon.
>
> Yatendra SIngh
> Frame Interpolation with Multi-Scale Deep Loss Functions and Generative
> Adversarial NetworksFrame Interpolation with Multi-Scale Deep Loss
> Functions and Generative Adversarial NetworksFrame Interpolation with
> Multi-Scale Deep Loss Functions and Generative Adversarial Networks
>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

[FFmpeg-devel] Regarding GSoC 2020 project proposal

2020-03-03 Thread YATENDRA SINGH
Hi,
I am a third year CSE student at the Indian Institute of Technology Bhilai,
and would like to contribute to ffmpeg this year. I have
relevant experience with Machine Learning and would like to work on
improving the video frame interpolation already implemented. With such a
plethora of great Machine Learning Algorithms being published every year at
prestigious conferences I would aim to read the relevant academic papers
and implement the best suited technique for the task. For example, Depth
Aware Video Frame Interpolation (DAIN CVPR-2019) is supposedly the state of
the art method on Vimeo90k and MiddleBury
<https://paperswithcode.com/task/video-frame-interpolation> but at the same
time Frame Interpolation with Generative Adversarial Network(FIGAN), uses
not CNN but multi-scale synthesis( MS ) to get higher speeds.
Looking forward to hearing from you soon.

Yatendra SIngh
Frame Interpolation with Multi-Scale Deep Loss Functions and Generative
Adversarial NetworksFrame Interpolation with Multi-Scale Deep Loss
Functions and Generative Adversarial NetworksFrame Interpolation with
Multi-Scale Deep Loss Functions and Generative Adversarial Networks
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".