[FFmpeg-devel] [PATCH 3/4] libavcodec: v4l2m2m: adjust formatting

2018-08-03 Thread Lukas Rusak
just some simple formatting fixes that unify the code quality
---
 libavcodec/v4l2_buffers.c | 23 +++
 libavcodec/v4l2_buffers.h |  1 -
 2 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c
index e5c46ac81e..897c3c4636 100644
--- a/libavcodec/v4l2_buffers.c
+++ b/libavcodec/v4l2_buffers.c
@@ -401,7 +401,8 @@ static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, 
const uint8_t* data, i
 bytesused = FFMIN(size, out->plane_info[plane].length);
 length = out->plane_info[plane].length;
 
-memcpy(out->plane_info[plane].mm_addr, data, FFMIN(size, 
out->plane_info[plane].length));
+memcpy(out->plane_info[plane].mm_addr, data,
+   FFMIN(size, out->plane_info[plane].length));
 
 if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
 out->planes[plane].bytesused = bytesused;
@@ -425,7 +426,10 @@ int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, 
V4L2Buffer* out)
 int i, ret;
 
 for(i = 0; i < out->num_planes; i++) {
-ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, 
frame->buf[i]->size, frame->buf[i]);
+ret = v4l2_bufref_to_buf(out, i,
+frame->buf[i]->data,
+frame->buf[i]->size,
+frame->buf[i]);
 if (ret)
 return ret;
 }
@@ -480,8 +484,8 @@ int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, 
V4L2Buffer *avbuf)
 /* 2. get frame information */
 frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
 frame->color_primaries = v4l2_get_color_primaries(avbuf);
-frame->colorspace = v4l2_get_color_space(avbuf);
 frame->color_range = v4l2_get_color_range(avbuf);
+frame->colorspace = v4l2_get_color_space(avbuf);
 frame->color_trc = v4l2_get_color_trc(avbuf);
 frame->pts = v4l2_get_pts(avbuf);
 
@@ -507,7 +511,8 @@ int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer 
*avbuf)
 if (ret)
 return ret;
 
-pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? 
avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
+pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ?
+avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
 pkt->data = pkt->buf->data;
 
 if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
@@ -563,6 +568,7 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
 /* in MP, the V4L2 API states that buf.length means num_planes */
 if (avbuf->num_planes >= avbuf->buf.length)
 break;
+
 if (avbuf->buf.m.planes[avbuf->num_planes].length)
 avbuf->num_planes++;
 }
@@ -579,12 +585,14 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int 
index)
 avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
 avbuf->plane_info[i].mm_addr = mmap(NULL, 
avbuf->buf.m.planes[i].length,
PROT_READ | PROT_WRITE, MAP_SHARED,
-   buf_to_m2mctx(avbuf)->fd, 
avbuf->buf.m.planes[i].m.mem_offset);
+   buf_to_m2mctx(avbuf)->fd,
+   
avbuf->buf.m.planes[i].m.mem_offset);
 } else {
 avbuf->plane_info[i].length = avbuf->buf.length;
 avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
   PROT_READ | PROT_WRITE, MAP_SHARED,
-  buf_to_m2mctx(avbuf)->fd, 
avbuf->buf.m.offset);
+  buf_to_m2mctx(avbuf)->fd,
+  avbuf->buf.m.offset);
 }
 
 if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
@@ -594,9 +602,8 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
 avbuf->status = V4L2BUF_AVAILABLE;
 
 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
-avbuf->buf.m.planes = avbuf->planes;
 avbuf->buf.length   = avbuf->num_planes;
-
+avbuf->buf.m.planes = avbuf->planes;
 } else {
 avbuf->buf.bytesused = avbuf->planes[0].bytesused;
 avbuf->buf.length= avbuf->planes[0].length;
diff --git a/libavcodec/v4l2_buffers.h b/libavcodec/v4l2_buffers.h
index a8a50ecc65..c609a6c676 100644
--- a/libavcodec/v4l2_buffers.h
+++ b/libavcodec/v4l2_buffers.h
@@ -131,5 +131,4 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index);
  */
 int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf);
 
-
 #endif // AVCODEC_V4L2_BUFFERS_H
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 3/4] avcodec/ivi: Only clip samples when needed in ivi_output_plane()

2018-08-03 Thread Michael Niedermayer
435740 -> 396078 dezicycles

Signed-off-by: Michael Niedermayer 
---
 libavcodec/ivi.c | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/libavcodec/ivi.c b/libavcodec/ivi.c
index cea40d82ca..ebb7f03007 100644
--- a/libavcodec/ivi.c
+++ b/libavcodec/ivi.c
@@ -913,8 +913,15 @@ static void ivi_output_plane(IVIPlaneDesc *plane, uint8_t 
*dst, ptrdiff_t dst_pi
 return;
 
 for (y = 0; y < plane->height; y++) {
-for (x = 0; x < plane->width; x++)
-dst[x] = av_clip_uint8(src[x] + 128);
+int m = 0;
+for (x = 0; x < plane->width; x++) {
+int t = src[x] + 128;
+dst[x] = t;
+m |= t;
+}
+if (m & ~255)
+for (x = 0; x < plane->width; x++)
+dst[x] = av_clip_uint8(src[x] + 128);
 src += pitch;
 dst += dst_pitch;
 }
-- 
2.18.0

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 4/4] avcodec/ivi: Factor width dereference out of the loops in ivi_output_plane()

2018-08-03 Thread Michael Niedermayer
396078 -> 268468 dezicycles

Signed-off-by: Michael Niedermayer 
---
 libavcodec/ivi.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/libavcodec/ivi.c b/libavcodec/ivi.c
index ebb7f03007..b23d4af27e 100644
--- a/libavcodec/ivi.c
+++ b/libavcodec/ivi.c
@@ -914,13 +914,14 @@ static void ivi_output_plane(IVIPlaneDesc *plane, uint8_t 
*dst, ptrdiff_t dst_pi
 
 for (y = 0; y < plane->height; y++) {
 int m = 0;
-for (x = 0; x < plane->width; x++) {
+int w = plane->width;
+for (x = 0; x < w; x++) {
 int t = src[x] + 128;
 dst[x] = t;
 m |= t;
 }
 if (m & ~255)
-for (x = 0; x < plane->width; x++)
+for (x = 0; x < w; x++)
 dst[x] = av_clip_uint8(src[x] + 128);
 src += pitch;
 dst += dst_pitch;
-- 
2.18.0

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 2/4] avcodec/indeo5: require initial valid intra/gop headers not just later

2018-08-03 Thread Michael Niedermayer
Fixes: Timeout
Fixes: 
9308/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_INDEO5_fuzzer-5284853581873152

Found-by: continuous fuzzing process 
https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer 
---
 libavcodec/indeo5.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/libavcodec/indeo5.c b/libavcodec/indeo5.c
index b39cffd9a9..7b9da53df4 100644
--- a/libavcodec/indeo5.c
+++ b/libavcodec/indeo5.c
@@ -642,6 +642,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
 IVI45DecContext  *ctx = avctx->priv_data;
 int result;
 
+ctx->gop_invalid = 1;
+
 ff_ivi_init_static_vlc();
 
 /* copy rvmap tables in our context so we can apply changes to them */
-- 
2.18.0

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 1/4] avcodec/microdvddec: limit style characters in parsing

2018-08-03 Thread Michael Niedermayer
Fixes: Timeout
Fixes: 
9293/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_MICRODVD_fuzzer-5643972541153280

Found-by: continuous fuzzing process 
https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer 
---
 libavcodec/microdvddec.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavcodec/microdvddec.c b/libavcodec/microdvddec.c
index 4a34267793..dad0ec8a22 100644
--- a/libavcodec/microdvddec.c
+++ b/libavcodec/microdvddec.c
@@ -99,7 +99,7 @@ static char *microdvd_load_tags(struct microdvd_tag *tags, 
char *s)
 case 'Y':
 tag.persistent = MICRODVD_PERSISTENT_ON;
 case 'y':
-while (*s && *s != '}') {
+while (*s && *s != '}' && s - start < 256) {
 int style_index = indexof(MICRODVD_STYLES, *s);
 
 if (style_index >= 0)
-- 
2.18.0

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 4/4] libavcodec: v4l2m2m: fix error handling during buffer init

2018-08-03 Thread Lukas Rusak
From: Jorge Ramirez-Ortiz 

Signed-off-by: Jorge Ramirez-Ortiz 
---
 libavcodec/v4l2_context.c | 19 ---
 libavcodec/v4l2_m2m_dec.c |  9 +++--
 2 files changed, 23 insertions(+), 5 deletions(-)

diff --git a/libavcodec/v4l2_context.c b/libavcodec/v4l2_context.c
index 9457fadb1e..fd3161ce2f 100644
--- a/libavcodec/v4l2_context.c
+++ b/libavcodec/v4l2_context.c
@@ -263,6 +263,12 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, 
int timeout)
 /* if we are draining and there are no more capture buffers queued in the 
driver we are done */
 if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
 for (i = 0; i < ctx->num_buffers; i++) {
+/* catpture buffer initialization happens during decode hence
+ * detection happens at runtime
+ */
+if (!ctx->buffers)
+break;
+
 if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
 goto start;
 }
@@ -724,9 +730,8 @@ int ff_v4l2_context_init(V4L2Context* ctx)
 ctx->buffers[i].context = ctx;
 ret = ff_v4l2_buffer_initialize(>buffers[i], i);
 if (ret < 0) {
-av_log(logger(ctx), AV_LOG_ERROR, "%s buffer initialization 
(%s)\n", ctx->name, av_err2str(ret));
-av_free(ctx->buffers);
-return ret;
+av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization 
(%s)\n", ctx->name, i, av_err2str(ret));
+goto error;
 }
 }
 
@@ -739,4 +744,12 @@ int ff_v4l2_context_init(V4L2Context* ctx)
 V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? 
ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : 
ctx->format.fmt.pix.bytesperline);
 
 return 0;
+
+error:
+v4l2_release_buffers(ctx);
+
+av_free(ctx->buffers);
+ctx->buffers = NULL;
+
+return ret;
 }
diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c
index 29d894492f..c4f4f7837f 100644
--- a/libavcodec/v4l2_m2m_dec.c
+++ b/libavcodec/v4l2_m2m_dec.c
@@ -92,8 +92,8 @@ static int v4l2_try_start(AVCodecContext *avctx)
 if (!capture->buffers) {
 ret = ff_v4l2_context_init(capture);
 if (ret) {
-av_log(avctx, AV_LOG_DEBUG, "can't request output buffers\n");
-return ret;
+av_log(avctx, AV_LOG_ERROR, "can't request capture buffers\n");
+return AVERROR(ENOMEM);
 }
 }
 
@@ -157,6 +157,11 @@ static int v4l2_receive_frame(AVCodecContext *avctx, 
AVFrame *frame)
 ret = v4l2_try_start(avctx);
 if (ret) {
 av_packet_unref();
+
+/* cant recover */
+if (ret == AVERROR(ENOMEM))
+return ret;
+
 return 0;
 }
 }
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 1/4] libavcodec: v4l2m2m: fix indentation and add M2MDEC_CLASS

2018-08-03 Thread Lukas Rusak
This just makes the M2MDEC_CLASS similar to how it is done in rkmpp. It looks
clean and has proper indentation
---
 libavcodec/v4l2_m2m_dec.c | 46 ---
 1 file changed, 24 insertions(+), 22 deletions(-)

diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c
index 710e40efd8..7926e25efa 100644
--- a/libavcodec/v4l2_m2m_dec.c
+++ b/libavcodec/v4l2_m2m_dec.c
@@ -205,29 +205,31 @@ static const AVOption options[] = {
 { NULL},
 };
 
+#define M2MDEC_CLASS(NAME) \
+static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \
+.class_name = #NAME "_v4l2_m2m_decoder", \
+.item_name  = av_default_item_name, \
+.option = options, \
+.version= LIBAVUTIL_VERSION_INT, \
+};
+
 #define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \
-static const AVClass v4l2_m2m_ ## NAME ## _dec_class = {\
-.class_name = #NAME "_v4l2_m2m_decoder",\
-.item_name  = av_default_item_name,\
-.option = options,\
-.version= LIBAVUTIL_VERSION_INT,\
-};\
-\
-AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
-.name   = #NAME "_v4l2m2m" ,\
-.long_name  = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " decoder 
wrapper"),\
-.type   = AVMEDIA_TYPE_VIDEO,\
-.id = CODEC ,\
-.priv_data_size = sizeof(V4L2m2mPriv),\
-.priv_class = _m2m_ ## NAME ## _dec_class,\
-.init   = v4l2_decode_init,\
-.receive_frame  = v4l2_receive_frame,\
-.close  = ff_v4l2_m2m_codec_end,\
-.bsfs   = bsf_name, \
-.capabilities   = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | \
-  AV_CODEC_CAP_AVOID_PROBING, \
-.wrapper_name   = "v4l2m2m", \
-};
+M2MDEC_CLASS(NAME) \
+AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
+.name   = #NAME "_v4l2m2m" , \
+.long_name  = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " 
decoder wrapper"), \
+.type   = AVMEDIA_TYPE_VIDEO, \
+.id = CODEC , \
+.priv_data_size = sizeof(V4L2m2mPriv), \
+.priv_class = _m2m_ ## NAME ## _dec_class, \
+.init   = v4l2_decode_init, \
+.receive_frame  = v4l2_receive_frame, \
+.close  = ff_v4l2_m2m_codec_end, \
+.bsfs   = bsf_name, \
+.capabilities   = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY, \
+ AV_CODEC_CAP_AVOID_PROBING, \
+.wrapper_name   = "v4l2m2m", \
+};
 
 M2MDEC(h264,  "H.264", AV_CODEC_ID_H264,   "h264_mp4toannexb");
 M2MDEC(hevc,  "HEVC",  AV_CODEC_ID_HEVC,   "hevc_mp4toannexb");
-- 
2.17.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 2/4] libavcodec: v4l2m2m: output AVDRMFrameDescriptor

2018-08-03 Thread Lukas Rusak
This allows for a zero-copy output by exporting the v4l2 buffer then wrapping 
that buffer
in the AVDRMFrameDescriptor like it is done in rkmpp.

This has been in use for quite some time with great success on many platforms 
including:
 - Amlogic S905
 - Raspberry Pi
 - i.MX6
 - Dragonboard 410c

This was developed in conjunction with Kodi to allow handling the zero-copy 
buffer rendering.
A simply utility for testing is also available here: 
https://github.com/BayLibre/ffmpeg-drm

todo:
 - allow selecting pixel format output from decoder
 - allow configuring amount of output and capture buffers

V2:
 - allow selecting AV_PIX_FMT_DRM_PRIME

V3:
 - use get_format to select AV_PIX_FMT_DRM_PRIME
 - use hw_configs
 - add handling of AV_PIX_FMT_YUV420P format (for raspberry pi)
 - add handling of AV_PIX_FMT_YUYV422 format (for i.MX6 coda decoder)
---
 libavcodec/v4l2_buffers.c | 216 --
 libavcodec/v4l2_buffers.h |   4 +
 libavcodec/v4l2_context.c |  40 ++-
 libavcodec/v4l2_m2m.c |   4 +-
 libavcodec/v4l2_m2m.h |   3 +
 libavcodec/v4l2_m2m_dec.c |  23 
 6 files changed, 253 insertions(+), 37 deletions(-)

diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c
index aef911f3bb..e5c46ac81e 100644
--- a/libavcodec/v4l2_buffers.c
+++ b/libavcodec/v4l2_buffers.c
@@ -21,6 +21,7 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#include 
 #include 
 #include 
 #include 
@@ -29,6 +30,7 @@
 #include 
 #include "libavcodec/avcodec.h"
 #include "libavcodec/internal.h"
+#include "libavutil/hwcontext.h"
 #include "v4l2_context.h"
 #include "v4l2_buffers.h"
 #include "v4l2_m2m.h"
@@ -203,7 +205,79 @@ static enum AVColorTransferCharacteristic 
v4l2_get_color_trc(V4L2Buffer *buf)
 return AVCOL_TRC_UNSPECIFIED;
 }
 
-static void v4l2_free_buffer(void *opaque, uint8_t *unused)
+static uint8_t * v4l2_get_drm_frame(V4L2Buffer *avbuf)
+{
+AVDRMFrameDescriptor *drm_desc = >drm_frame;
+AVDRMLayerDescriptor *layer;
+
+/* fill the DRM frame descriptor */
+drm_desc->nb_objects = avbuf->num_planes;
+drm_desc->nb_layers = 1;
+
+layer = _desc->layers[0];
+layer->nb_planes = avbuf->num_planes;
+
+for (int i = 0; i < avbuf->num_planes; i++) {
+layer->planes[i].object_index = i;
+layer->planes[i].offset = 0;
+layer->planes[i].pitch = avbuf->plane_info[i].bytesperline;
+}
+
+switch (avbuf->context->av_pix_fmt) {
+case AV_PIX_FMT_YUYV422:
+
+layer->format = DRM_FORMAT_YUYV;
+layer->nb_planes = 1;
+
+break;
+
+case AV_PIX_FMT_NV12:
+case AV_PIX_FMT_NV21:
+
+layer->format = avbuf->context->av_pix_fmt == AV_PIX_FMT_NV12 ?
+DRM_FORMAT_NV12 : DRM_FORMAT_NV21;
+
+if (avbuf->num_planes > 1)
+break;
+
+layer->nb_planes = 2;
+
+layer->planes[1].object_index = 0;
+layer->planes[1].offset = avbuf->plane_info[0].bytesperline *
+avbuf->context->format.fmt.pix.height;
+layer->planes[1].pitch = avbuf->plane_info[0].bytesperline;
+break;
+
+case AV_PIX_FMT_YUV420P:
+
+layer->format = DRM_FORMAT_YUV420;
+
+if (avbuf->num_planes > 1)
+break;
+
+layer->nb_planes = 3;
+
+layer->planes[1].object_index = 0;
+layer->planes[1].offset = avbuf->plane_info[0].bytesperline *
+avbuf->context->format.fmt.pix.height;
+layer->planes[1].pitch = avbuf->plane_info[0].bytesperline >> 1;
+
+layer->planes[2].object_index = 0;
+layer->planes[2].offset = layer->planes[1].offset +
+((avbuf->plane_info[0].bytesperline *
+  avbuf->context->format.fmt.pix.height) >> 2);
+layer->planes[2].pitch = avbuf->plane_info[0].bytesperline >> 1;
+break;
+
+default:
+drm_desc->nb_layers = 0;
+break;
+}
+
+return (uint8_t *) drm_desc;
+}
+
+static void v4l2_free_buffer(void *opaque, uint8_t *data)
 {
 V4L2Buffer* avbuf = opaque;
 V4L2m2mContext *s = buf_to_m2mctx(avbuf);
@@ -227,27 +301,47 @@ static void v4l2_free_buffer(void *opaque, uint8_t 
*unused)
 }
 }
 
-static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
+static int v4l2_buffer_export_drm(V4L2Buffer* avbuf)
 {
-V4L2m2mContext *s = buf_to_m2mctx(in);
+struct v4l2_exportbuffer expbuf;
+int i, ret;
 
-if (plane >= in->num_planes)
-return AVERROR(EINVAL);
+for (i = 0; i < avbuf->num_planes; i++) {
+memset(, 0, sizeof(expbuf));
 
-/* even though most encoders return 0 in data_offset encoding vp8 does 
require this value */
-*buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + 
in->planes[plane].data_offset,
-in->plane_info[plane].length, v4l2_free_buffer, 
in, 0);
-if (!*buf)
-return AVERROR(ENOMEM);
+expbuf.index = avbuf->buf.index;
+expbuf.type = 

Re: [FFmpeg-devel] [PATCH 4/7] Adds gray floating-point pixel formats.

2018-08-03 Thread Sergey Lavrushkin
2018-08-04 0:11 GMT+03:00 Michael Niedermayer :

> On Fri, Aug 03, 2018 at 10:33:00PM +0300, Sergey Lavrushkin wrote:
> > 2018-08-03 16:07 GMT+03:00 Michael Niedermayer :
> >
> > > On Thu, Aug 02, 2018 at 09:52:45PM +0300, Sergey Lavrushkin wrote:
> > > > This patch adds two floating-point gray formats to use them in sr
> filter
> > > for
> > > > conversion with libswscale. I added conversion from uint gray to
> float
> > > and
> > > > backwards in swscale_unscaled.c, that is enough for sr filter. But
> for
> > > > proper format addition, should I add anything else?
> > > >
> > > > ---
> > > >  libavutil/pixdesc.c   | 22 ++
> > > >  libavutil/pixfmt.h|  5 
> > > >  libswscale/swscale_internal.h |  7 ++
> > > >  libswscale/swscale_unscaled.c | 54 ++
> > > +++--
> > > >  libswscale/utils.c|  5 +++-
> > >
> > > please split this in a patch or libavutil and one for libswscale
> > > they also need some version.h bump
> > >
> >
> > Ok.
> >
> > also fate tests need an update, (make fate) fails otherwise, the update
> > > should
> > > be part of the patch that causes the failure otherwise
> >
> >
> > In one test for these formats I get:
> >
> > filter-pixfmts-scale
> > grayf32be   grayf32le   monob
> >  f01cb0b623357387827902d9d0963435
> >
> > I guess, it is because I only implemented conversion in swscale_unscaled.
> > What can I do to fix it? Should I implement conversion for scaling or
> maybe
> > change something in the test, so it would not check these formats (if it
> is
> > possible).
> > Anyway, I need to know what changes should I do and where.
>
> well, swscale shouldnt really have formats only half supported
> so for any supported format in and out it should work with any
> width / height in / out
>
> Theres a wide range of possibilities how to implement this.
> The correct / ideal way is of course to implement a full floating point
> path
> for scaling along side the integer code.
> a simpler aprouch would be to convert from/to float to/from  integers and
> use
> the existing code. (this of course has the disadvantage of loosing
> precission)
>

Well, I want to implement simpler approach, as I still have to finish
correcting sr filter.
But I need some explanations regarding what I should add. If I understand
correcly,
I need to add conversion from float to the ff_sws_init_input_funcs function
in libswscale/input.c
and conversion to float to the ff_sws_init_output_funcs function in
libswscale/output.c
If I am not mistaken, in the first case I need to provide c->lumToYV12 and
in the second case -
yuv2plane1 and yuv2planeX. So, in the first case, to what format should I
add
conversion, specifically what number of bits per pixel should be used? As I
look through other
conversion functions, it seems that somewhere uint8 is used and somewhere -
uint16.
Is it somehow determined later during scaling? If I am going to convert to
uint8 from
my float format, should I define it somewhere, that I am converting to
uint8?
And in the second case, I don't completely understand, what these two
functions are
doing, especially tha last one with filters. Is it also just simple
conversions or
these functions also cover something else? And in their descriptions it is
written, that:

 * @param src scaled source data, 15 bits for 8-10-bit output,
 *19 bits for 16-bit output (in int32_t)
 * @param destpointer to the output plane. For >8-bit
 *output, this is in uint16_t

In my case, the output is 32-bit. Does this mean, that float type,
basically, is not
supported and I also have to modify something in scaling? If so, what
should I add?



> [...]
> > > +const uint8_t *srcPtr = src[0];
> > > > +float *dstPtr = (float *)(dst[0] + dstStride[0] * srcSliceY);
> > > > +
> > > > +for (y = 0; y < srcSliceH; ++y){
> > > > +for (x = 0; x < c->srcW; ++x){
> > > > +dstPtr[x] = (float)srcPtr[x] / 255.0f;
> > >
> > > division is slow. This should either be a multiplication with the
> > > inverse or a LUT with 8bit index changing to float.
> > >
> > > The faster of them should be used
> > >
> >
> > LUT seems to be faster. Can I place it in SwsContext and initialize it in
> > sws_init_context when necessary?
>
> yes of course
>
> thanks
>
>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 4/7] Adds gray floating-point pixel formats.

2018-08-03 Thread Michael Niedermayer
On Fri, Aug 03, 2018 at 10:33:00PM +0300, Sergey Lavrushkin wrote:
> 2018-08-03 16:07 GMT+03:00 Michael Niedermayer :
> 
> > On Thu, Aug 02, 2018 at 09:52:45PM +0300, Sergey Lavrushkin wrote:
> > > This patch adds two floating-point gray formats to use them in sr filter
> > for
> > > conversion with libswscale. I added conversion from uint gray to float
> > and
> > > backwards in swscale_unscaled.c, that is enough for sr filter. But for
> > > proper format addition, should I add anything else?
> > >
> > > ---
> > >  libavutil/pixdesc.c   | 22 ++
> > >  libavutil/pixfmt.h|  5 
> > >  libswscale/swscale_internal.h |  7 ++
> > >  libswscale/swscale_unscaled.c | 54 ++
> > +++--
> > >  libswscale/utils.c|  5 +++-
> >
> > please split this in a patch or libavutil and one for libswscale
> > they also need some version.h bump
> >
> 
> Ok.
> 
> also fate tests need an update, (make fate) fails otherwise, the update
> > should
> > be part of the patch that causes the failure otherwise
> 
> 
> In one test for these formats I get:
> 
> filter-pixfmts-scale
> grayf32be   grayf32le   monob
>  f01cb0b623357387827902d9d0963435
> 
> I guess, it is because I only implemented conversion in swscale_unscaled.
> What can I do to fix it? Should I implement conversion for scaling or maybe
> change something in the test, so it would not check these formats (if it is
> possible).
> Anyway, I need to know what changes should I do and where.

well, swscale shouldnt really have formats only half supported
so for any supported format in and out it should work with any
width / height in / out

Theres a wide range of possibilities how to implement this.
The correct / ideal way is of course to implement a full floating point path
for scaling along side the integer code.
a simpler aprouch would be to convert from/to float to/from  integers and use
the existing code. (this of course has the disadvantage of loosing precission)


[...]
> > +const uint8_t *srcPtr = src[0];
> > > +float *dstPtr = (float *)(dst[0] + dstStride[0] * srcSliceY);
> > > +
> > > +for (y = 0; y < srcSliceH; ++y){
> > > +for (x = 0; x < c->srcW; ++x){
> > > +dstPtr[x] = (float)srcPtr[x] / 255.0f;
> >
> > division is slow. This should either be a multiplication with the
> > inverse or a LUT with 8bit index changing to float.
> >
> > The faster of them should be used
> >
> 
> LUT seems to be faster. Can I place it in SwsContext and initialize it in
> sws_init_context when necessary?

yes of course

thanks

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

It is dangerous to be right in matters on which the established authorities
are wrong. -- Voltaire


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avformat/tls_schannel: Fix use of uninitialized variable

2018-08-03 Thread Paweł Wegner

One could copy the initialization to the top from the while loop:
  init_sec_buffer([0], SECBUFFER_TOKEN, NULL, 0);
  init_sec_buffer([1], SECBUFFER_ALERT, NULL, 0);
  init_sec_buffer([2], SECBUFFER_EMPTY, NULL, 0);
  init_sec_buffer_desc(_desc, outbuf, 3);

But memset is shorter. Current code will crash when there is any failure
before this initialization.

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH]lavf/dashdec: Do not copy url on init copy

2018-08-03 Thread Colin NG
How about having different memory location for init_section of each 
presentation?


rep_dest->init_section = (struct fragment *) av_mallocz(sizeof(struct 
fragment));



From: ffmpeg-devel  on behalf of Carl Eugen 
Hoyos 
Sent: July 31, 2018 6:06 PM
To: FFmpeg development discussions and patches
Subject: [FFmpeg-devel] [PATCH]lavf/dashdec: Do not copy url on init copy

Hi!

Attached patch fixes ticket #7338, better fix welcome!

Please comment, Carl Eugen
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 4/7] Adds gray floating-point pixel formats.

2018-08-03 Thread Sergey Lavrushkin
2018-08-03 16:07 GMT+03:00 Michael Niedermayer :

> On Thu, Aug 02, 2018 at 09:52:45PM +0300, Sergey Lavrushkin wrote:
> > This patch adds two floating-point gray formats to use them in sr filter
> for
> > conversion with libswscale. I added conversion from uint gray to float
> and
> > backwards in swscale_unscaled.c, that is enough for sr filter. But for
> > proper format addition, should I add anything else?
> >
> > ---
> >  libavutil/pixdesc.c   | 22 ++
> >  libavutil/pixfmt.h|  5 
> >  libswscale/swscale_internal.h |  7 ++
> >  libswscale/swscale_unscaled.c | 54 ++
> +++--
> >  libswscale/utils.c|  5 +++-
>
> please split this in a patch or libavutil and one for libswscale
> they also need some version.h bump
>

Ok.

also fate tests need an update, (make fate) fails otherwise, the update
> should
> be part of the patch that causes the failure otherwise


In one test for these formats I get:

filter-pixfmts-scale
grayf32be   grayf32le   monob
 f01cb0b623357387827902d9d0963435

I guess, it is because I only implemented conversion in swscale_unscaled.
What can I do to fix it? Should I implement conversion for scaling or maybe
change something in the test, so it would not check these formats (if it is
possible).
Anyway, I need to know what changes should I do and where.


> >  5 files changed, 90 insertions(+), 3 deletions(-)
> >
> > diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
> > index 96e079584a..7d307d9120 100644
> > --- a/libavutil/pixdesc.c
> > +++ b/libavutil/pixdesc.c
> > @@ -2198,6 +2198,28 @@ static const AVPixFmtDescriptor
> av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
> >  .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA |
> >   AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
> >  },
> > +[AV_PIX_FMT_GRAYF32BE] = {
> > +.name = "grayf32be",
> > +.nb_components = 1,
> > +.log2_chroma_w = 0,
> > +.log2_chroma_h = 0,
> > +.comp = {
> > +{ 0, 4, 0, 0, 32, 3, 31, 1 },   /* Y */
> > +},
> > +.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_FLOAT,
> > +.alias = "yf32be",
> > +},
> > +[AV_PIX_FMT_GRAYF32LE] = {
> > +.name = "grayf32le",
> > +.nb_components = 1,
> > +.log2_chroma_w = 0,
> > +.log2_chroma_h = 0,
> > +.comp = {
> > +{ 0, 4, 0, 0, 32, 3, 31, 1 },   /* Y */
> > +},
> > +.flags = AV_PIX_FMT_FLAG_FLOAT,
> > +.alias = "yf32le",
> > +},
> >  [AV_PIX_FMT_DRM_PRIME] = {
> >  .name = "drm_prime",
> >  .flags = AV_PIX_FMT_FLAG_HWACCEL,
>
> > diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
> > index 2b3307845e..aa9a4f60c1 100644
> > --- a/libavutil/pixfmt.h
> > +++ b/libavutil/pixfmt.h
> > @@ -320,6 +320,9 @@ enum AVPixelFormat {
> >  AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA
> 4:4:4:4, 128bpp, big-endian
> >  AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA
> 4:4:4:4, 128bpp, little-endian
> >
> > +AV_PIX_FMT_GRAYF32BE,  ///< IEEE-754 single precision Y, 32bpp,
> big-endian
> > +AV_PIX_FMT_GRAYF32LE,  ///< IEEE-754 single precision Y, 32bpp,
> little-endian
> > +
> >  /**
> >   * DRM-managed buffers exposed through PRIME buffer sharing.
> >   *
>
> new enum values can only be added in such a way that no value of an
> existing
> enum changes. This would change the value of the following enums


Ok.

> @@ -405,6 +408,8 @@ enum AVPixelFormat {
> >  #define AV_PIX_FMT_GBRPF32AV_PIX_FMT_NE(GBRPF32BE,  GBRPF32LE)
> >  #define AV_PIX_FMT_GBRAPF32   AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
> >
> > +#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
> > +
> >  #define AV_PIX_FMT_YUVA420P9  AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
> >  #define AV_PIX_FMT_YUVA422P9  AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
> >  #define AV_PIX_FMT_YUVA444P9  AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
> > diff --git a/libswscale/swscale_internal.h
> b/libswscale/swscale_internal.h
> > index 1703856ab2..4a2cdfe658 100644
> > --- a/libswscale/swscale_internal.h
> > +++ b/libswscale/swscale_internal.h
> > @@ -764,6 +764,13 @@ static av_always_inline int isAnyRGB(enum
> AVPixelFormat pix_fmt)
> >  pix_fmt == AV_PIX_FMT_MONOBLACK || pix_fmt ==
> AV_PIX_FMT_MONOWHITE;
> >  }
> >
> > +static av_always_inline int isFloat(enum AVPixelFormat pix_fmt)
> > +{
> > +const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
> > +av_assert0(desc);
> > +return desc->flags & AV_PIX_FMT_FLAG_FLOAT;
> > +}
> > +
> >  static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
> >  {
> >  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
>
> > diff --git a/libswscale/swscale_unscaled.c
> b/libswscale/swscale_unscaled.c
> > index 6480070cbf..f5b4c9be9d 100644
> > --- 

Re: [FFmpeg-devel] Mixture of homographies video stabilization

2018-08-03 Thread Michael Niedermayer
On Fri, Aug 03, 2018 at 01:29:27PM +0100, Matthew Lai wrote:
> Hi Michael,
> 
> The operations needed are multiplications, SVD, inversion, scaling, and
> additions, as far as I can tell. Convolutions would probably also be
> useful. They are certainly possible to implement directly, but it's
> unlikely that any naive implementation will be within an order of magnitude
> in performance to an optimized BLAS library. That may or may not matter - I
> don't have a good idea of where the performance bottlenecks will be yet.
> The matrices are 4x4.

How do you intend to organize the data for these 4x4 matrices ?
i mean if you have a rather large number of them and pass them all
together to an external lib which then sends them to the GPU to process
that should be faster than some native implementation (which would be CPU
based presumably)
OTOH one matrix at a time i dont see how the use of an external lib would make
any sense.
matrix addition, subtraction and scaling are just adding or scalin the
16 elements. i would expect a naive implementation to beat a external lib
as long as both do at on the CPU. Because an external lib has to work with
any NxM sized matrix we would implement this for just 4x4 so its trivial
to fully unroll the loop or just do 4 load, add, store in SIMD or even
skip the load/store and keep things in SIMD registers.
about SVD. do you really need SVD? I mean is that the correct tool here and
not just slower than alternatives ?
These 4x4 matrixes are general matrixes or these have some other properties
like being positive definite of something ?
I guess i should read the paper, i might do that later ...




> 
> Motion estimation is one way, but most state of the art implementations
> don't use it because it's too noisy (motion from low contrast regions).
> Most use something like a corner detector to find sharp feature points, and
> track them (this can potentially be implemented using motion estimation,
> but only around those points).

Thats what i meant. The area that you want to matchup could use the existing
code or share something with it. 
Other people had previously complained loudly about duplication of ME code.
And while its not always possible to avoid "duplication", i think this should
be looked into if it can be done in this case.

thx


> 
> Matthew
> 
> On Fri, Aug 3, 2018 at 2:05 AM Michael Niedermayer 
> wrote:
> 
> > On Thu, Aug 02, 2018 at 05:24:08PM +0100, Matthew Lai wrote:
> > > Ah ok thanks! I'm surprised no one has need a linear algebra library. I
> > > guess there's OpenCV and people use it to do the heavy lifting?
> > >
> > > Will look into the API more.
> >
> > alot ot linear algebra we needed has been implemented directly.
> > Some of it is shared and available from libavutil, like lls.*
> >
> > Can you elaboarte what exact linear algebra operations are needed?
> > also what amounts of data (matrix types/sizes) and speed requirements this
> > has
> >
> > Also IIUC (please correct me if iam wrong) this uses motion estimation
> > at one step. IIRC ronald and others) want motion estimation to be
> > factored and shared and not duplicated. The current motion estimation is
> > in libavcodec/motion_est*.
> >
> > Thanks
> >
> > >
> > > Thanks
> > > matthew
> > >
> > > On Thu, Aug 2, 2018 at 3:31 PM Paul B Mahol  wrote:
> > >
> > > > On 8/2/18, Matthew Lai  wrote:
> > > > > Hello!
> > > > >
> > > > > I want to write a more advanced video stabilizer for libavfilter (*),
> > > > > implementing the algorithm described here -
> > > > >
> > > >
> > https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37744.pdf
> > > > > The focus of the paper is rolling shutter removal, but it builds on
> > top
> > > > of
> > > > > another algorithm that does full frame stabilization, and the new
> > > > algorithm
> > > > > does that as well.
> > > > >
> > > > > This is the algorithm used in YouTube's stabilizing filter, and is
> > state
> > > > of
> > > > > the art. Adobe calls it Warp Stabilizer (it's the same thing as far
> > as I
> > > > > can tell from public information anyways).
> > > > >
> > > > > 3 questions:
> > > > > 1. Is there a linear algebra library already in use? I didn't see
> > > > anything
> > > > > in configure, but would be surprised if none of the existing filters
> > work
> > > > > with matrices?
> > > >
> > > > There is no such library here used. There are indeed video/audio
> > > > filters that work with matrices.
> > > >
> > > > > 2. Is there anything to watch out for re. a high frame delay (say a
> > few
> > > > > hundred frames)? Looking at the API, I don't see a callback to flush
> > out
> > > > > remaining frames when input frames are finished? Is doing it in two
> > > > passes
> > > > > the only option?
> > > >
> > > > It is handled internally, there are two internal APIs, activate one and
> > > > legacy.
> > > > With legacy you can flush frames when you receive last frame from
> > input.
> > > > With newer, activate API, its similar.

Re: [FFmpeg-devel] [PATCH 1/2] docs/filters: add documentation to all existing OpenCL filters

2018-08-03 Thread Gyan Doshi



On 03-08-2018 08:46 PM, Danil Iashchenko wrote:


+For most general-purpose filters, no conversion is required.
+
+In case a file contains an alpha channel supported by the format, then 
explicit conversion is a requirement.


This is unclear. So, if the input's pixel format has alpha, we should 
convert? to what? In the example below, the conversion target also has 
alpha. Does RGB24 need to be converted?



+@item
+Insert a PNG logo in the top-left corner of the INPUT. Explicit format 
conversion is a must.
+@example
+-i INPUT -i LOGO -filter_complex "[0:v]hwupload[a], [1:v]format=yuva420p, 
hwupload[b], [a][b]overlay_opencl, hwdownload" OUTPUT


PNGs, afaik, are always full chroma. Shouldn't we convert to yuva444p?

Regards,
Gyan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avformat/tls_schannel: Fix use of uninitialized variable

2018-08-03 Thread Paweł Wegner

One could copy the initialization to the top from the while loop:
  init_sec_buffer([0], SECBUFFER_TOKEN, NULL, 0);
  init_sec_buffer([1], SECBUFFER_ALERT, NULL, 0);
  init_sec_buffer([2], SECBUFFER_EMPTY, NULL, 0);
  init_sec_buffer_desc(_desc, outbuf, 3);

But memset is shorter. Current code will crash when there is any failure
before this initialization.

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] First Patch for hlsenc.c for https://trac.ffmpeg.org/ticket/7281

2018-08-03 Thread Ronak
>> I have read this patch some problem for this patch.
>> 
>> 1. maybe there will have a problem when duration is not same when every 
>> fragment, for example:
>> liuqideMacBook-Pro:xxx liuqi$ ./ffmpeg -v quiet -i 
>> ~/Movies/Test/bbb_sunflower_1080p_30fps_normal.mp4 -c copy -f hls 
>> -hls_list_size 0 output_test.m3u8
>> liuqideMacBook-Pro:xxx liuqi$ head -n 10  output_test.m3u8
>> #EXTM3U
>> #EXT-X-VERSION:3
>> #EXT-X-TARGETDURATION:8
>> #EXT-X-MEDIA-SEQUENCE:0
>> #EXTINF:3.87,
>> output_test0.ts
>> #EXTINF:7.30,
>> output_test1.ts
>> #EXTINF:8.33,
>> output_test2.ts
>> 
>> the output_test0.ts’s duration is short than output_test1.ts, the 
>> #EXT-X-TARGETDURATION need update to the longest duration.
>> this operation (check the longest duration) will happen at every 
>> fragment write complete.
>> it will not update when move the update option to the hls_write_header,
>> 
> 
> This is a problem in the code that splits the mpegts files. I've filed a 
> separate issue for this here: https://trac.ffmpeg.org/ticket/7341. Mpegts 
> segmentation should be following the hls_time parameter (or the default 
> length).
 This is whatever hls_time, is decide by keyframe position, this is happen 
 when GOP size is not a permanent t position.
 
> 
> This is happening now with fMP4 assets, but not with mpegts.
 Whatever fmp4 or mpegts, all of them need fix the problem of duration 
 refresh.
 
 for example:
 
 liuqideMacBook-Pro:xxx liuqi$ ./ffmpeg -ss -v quiet -i 
 ~/Movies/Test/bbb_sunflower_1080p_30fps_normal.mp4 -c copy -f hls 
 -hls_list_size 0 -hls_segment_type fmp4 -hls_time 3 output_test.m3u8
 liuqideMacBook-Pro:xxx liuqi$ head -n 10  output_test.m3u8
 #EXTM3U
 #EXT-X-VERSION:7
 #EXT-X-TARGETDURATION:8
 #EXT-X-MEDIA-SEQUENCE:0
 #EXT-X-MAP:URI="init.mp4"
 #EXTINF:3.87,
 output_test0.m4s
 #EXTINF:7.30,
 output_test1.m4s
 #EXTINF:8.33,
 liuqideMacBook-Pro:xxx liuqi$
>>> 
>>> This is after your patch:
>>> liuqideMacBook-Pro:xxx liuqi$  ./ffmpeg -ss 17 -v quiet -i 
>>> ~/Movies/Test/bbb_sunflower_1080p_30fps_normal.mp4 -c copy -f hls 
>>> -hls_list_size 0 -hls_segment_type fmp4 -hls_time 3 output_test.m3u8
>>> liuqideMacBook-Pro:xxx liuqi$ head -n 10  output_test.m3u8
>>> #EXTM3U
>>> #EXT-X-VERSION:7
>>> #EXT-X-TARGETDURATION:3
>>> #EXT-X-MEDIA-SEQUENCE:0
>>> #EXT-X-MAP:URI="init.mp4"
>>> #EXTINF:3.87,
>>> output_test0.m4s
>>> #EXTINF:7.30,
>>> output_test1.m4s
>>> #EXTINF:8.33,
>>> 
>>> The RFC https://www.rfc-editor.org/rfc/rfc8216.txt describe:
>>> 
>>> 4.3.3.1.  EXT-X-TARGETDURATION
>>> 
>>> The EXT-X-TARGETDURATION tag specifies the maximum Media Segment
>>> duration.  The EXTINF duration of each Media Segment in the Playlist
>>> file, when rounded to the nearest integer, MUST be less than or equal
>>> to the target duration; longer segments can trigger playback stalls
>>> or other errors.  It applies to the entire Playlist file.  Its format
>>> is:
>>> 
>>> #EXT-X-TARGETDURATION:
>>> 
>>> where s is a decimal-integer indicating the target duration in
>>> seconds.  The EXT-X-TARGETDURATION tag is REQUIRED.
>>> 
>>> your patch make the EXT-X-TARGETDURATION less than EXTINF:7.30 
>>> EXTINF:8.33
>> 
>> 
>> 2. the version maybe will update when use hls_segment_type or 
>> append_list etc. when the operation is support from different version 
>> m3u8.
> 
> I don't follow what you mean here. The version number is known up front, 
> based on the options that were passed in. It should be illegal to switch 
> between versions when trying to update an existing manifest. When can 
> this legitimately happen?
 there maybe have some player cannot support high version of m3u8, for 
 example old parser or player just support the VERSION 3,
 this must think about all of the player or parser, because ffmpeg is not 
 used only by myself.
 
 Or what about get the #EXT-X-VERSION position, to update it? looks like 
 flvenc.c or movenc.c date shift
 
> 
>> 3. need update segments vs->segments when hls_list_size option is set.
>> 
> 
> What do you mean by this and where should I do it?
 for example, hls_list_size is 4, the m3u8 list should refresh every time 
 when make a new fragment.
 
 first time:
 1.m4s
 2.m4s
 3.m4s
 4.m4s
 
 sencond time:
 2.m4s
 3.m4s
 4.m4s
 5.m4s
>>> 
>>> after your patch:
>>> 
>>> liuqideMacBook-Pro:xxx liuqi$  ./ffmpeg -v quiet -i 
>>> ~/Movies/Test/bbb_sunflower_1080p_30fps_normal.mp4 -c copy -f hls 
>>> -hls_list_size 4 -hls_segment_type fmp4 -hls_time 3 -t 50 output_test.m3u8
>>> liuqideMacBook-Pro:xxx liuqi$ cat output_test.m3u8
>>> #EXTM3U
>>> #EXT-X-VERSION:7
>>> #EXT-X-TARGETDURATION:3
>>> 

Re: [FFmpeg-devel] [PATCH 1/4] vf_tonemap: Update the default peak values

2018-08-03 Thread Song, Ruiling


> -Original Message-
> From: ffmpeg-devel [mailto:ffmpeg-devel-boun...@ffmpeg.org] On Behalf Of
> Vittorio Giovara
> Sent: Wednesday, July 25, 2018 8:47 AM
> To: ffmpeg-devel@ffmpeg.org
> Subject: [FFmpeg-devel] [PATCH 1/4] vf_tonemap: Update the default peak
> values
> 
> When there is no metadata attached to a frame, take into account both
> the PQ and HLG transfers, and change the HLG default value to 10:
> the value of 12 is the maximum range in scene referred light, but
> the reference OOTF maps this from 0 to 1000 cd/m² on the ideal HLG
> monitor.
The patch-set looks good to me.

Ruiling
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avfilter/vf_hue: 10bit support

2018-08-03 Thread Reto Kromer
Michael Niedermayer wrote:

>Signed-off-by: Michael Niedermayer 
>---
> libavfilter/vf_hue.c | 103 +++
> +++-
> 1 file changed, 92 insertions(+), 11 deletions(-)

On my side it works fine, but don't have any official status in
the project.

Best regards, Reto


AV Preservation by reto.ch
chemin du Suchet 5 | 1024 Ecublens | Switzerland
Web:  | Twitter: @retoch

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 1/2] docs/filters: add documentation to all existing OpenCL filters

2018-08-03 Thread Danil Iashchenko
docs/filters: add documentation to all existing OpenCL filters

---

Thanks, fixed!

Danil.

 doc/filters.texi | 415 +++
 1 file changed, 415 insertions(+)

diff --git a/doc/filters.texi b/doc/filters.texi
index 705d48e..363fe7d 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -17545,6 +17545,421 @@ pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} 
is 1.
 
 @c man end VIDEO FILTERS
 
+@chapter OpenCL Video Filters
+@c man begin OPENCL VIDEO FILTERS
+
+Below is a description of the currently available OpenCL video filters.
+
+To enable compilation of these filters you need to configure FFmpeg with
+@code{--enable-opencl}.
+
+Running OpenCL filters requires you to initialize a hardware device and to 
pass that device to all filters in any filter graph.
+@table @option
+
+@item -init_hw_device opencl[=@var{name}][:@var{device}[,@var{key=value}...]]
+Initialise a new hardware device of type @var{opencl} called @var{name}, using 
the
+given device parameters.
+
+@item -filter_hw_device @var{name}
+Pass the hardware device called @var{name} to all filters in any filter graph.
+
+@end table
+
+For more detailed information see 
@url{https://www.ffmpeg.org/ffmpeg.html#Advanced-Video-options}
+
+@itemize
+@item
+Example of choosing the first device on the second platform and running 
avgblur_opencl filter with default parameters on it.
+@example
+-init_hw_device opencl=gpu:1.0 -filter_hw_device gpu -i INPUT -vf "hwupload, 
avgblur_opencl, hwdownload" OUTPUT
+@end example
+@end itemize
+
+For most general-purpose filters, no conversion is required.
+
+In case a file contains an alpha channel supported by the format, then 
explicit conversion is a requirement.
+
+@itemize
+@item
+Insert a JPG logo in the top-left corner of the INPUT. No conversion needed.
+@example
+-i INPUT -i LOGO -filter_complex "[0:v]hwupload[a], [1:v]hwupload[b], 
[a][b]overlay_opencl, hwdownload" OUTPUT
+@end example
+@item
+Insert a PNG logo in the top-left corner of the INPUT. Explicit format 
conversion is a must.
+@example
+-i INPUT -i LOGO -filter_complex "[0:v]hwupload[a], [1:v]format=yuva420p, 
hwupload[b], [a][b]overlay_opencl, hwdownload" OUTPUT
+@end example
+@end itemize
+
+@section avgblur_opencl
+
+Apply average blur filter.
+
+The filter accepts the following options:
+
+@table @option
+@item sizeX
+Set horizontal radius size.
+Range is @code{[1, 1024]} and default value is @code{1}.
+
+@item planes
+Set which planes to filter. Default value is @code{0xf}, by which all planes 
are processed.
+
+@item sizeY
+Set vertical radius size. Range is @code{[1, 1024]} and default value is 
@code{0}. If zero, @code{sizeX} value will be used.
+@end table
+
+@subsection Example
+
+@itemize
+@item
+Apply average blur filter with horizontal and vertical size of 3, setting each 
pixel of the output to the average value of the 7x7 region centered on it in 
the input. For pixels on the edges of the image, the region does not extend 
beyond the image boundaries, and so out-of-range coordinates are not used in 
the calculations.
+@example
+-i INPUT -vf "hwupload, avgblur_opencl=3, hwdownload" OUTPUT
+@end example
+@end itemize
+
+@section boxblur_opencl
+
+Apply a boxblur algorithm to the input video.
+
+It accepts the following parameters:
+
+@table @option
+
+@item luma_radius, lr
+@item luma_power, lp
+@item chroma_radius, cr
+@item chroma_power, cp
+@item alpha_radius, ar
+@item alpha_power, ap
+
+@end table
+
+A description of the accepted options follows.
+
+@table @option
+@item luma_radius, lr
+@item chroma_radius, cr
+@item alpha_radius, ar
+Set an expression for the box radius in pixels used for blurring the
+corresponding input plane.
+
+The radius value must be a non-negative number, and must not be
+greater than the value of the expression @code{min(w,h)/2} for the
+luma and alpha planes, and of @code{min(cw,ch)/2} for the chroma
+planes.
+
+Default value for @option{luma_radius} is "2". If not specified,
+@option{chroma_radius} and @option{alpha_radius} default to the
+corresponding value set for @option{luma_radius}.
+
+The expressions can contain the following constants:
+@table @option
+@item w
+@item h
+The input width and height in pixels.
+
+@item cw
+@item ch
+The input chroma image width and height in pixels.
+
+@item hsub
+@item vsub
+The horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p", @var{hsub} is 2 and @var{vsub} is 1.
+@end table
+
+@item luma_power, lp
+@item chroma_power, cp
+@item alpha_power, ap
+Specify how many times the boxblur filter is applied to the
+corresponding plane.
+
+Default value for @option{luma_power} is 2. If not specified,
+@option{chroma_power} and @option{alpha_power} default to the
+corresponding value set for @option{luma_power}.
+
+A value of 0 will disable the effect.
+@end table
+
+@subsection Examples
+
+Apply boxblur filter, setting each pixel of the output to the average value of 

Re: [FFmpeg-devel] [PATCH] avfilter/vf_hue: 10bit support

2018-08-03 Thread Tobias Rapp

On 03.08.2018 16:34, Michael Niedermayer wrote:

Signed-off-by: Michael Niedermayer 
---
  libavfilter/vf_hue.c | 103 ++-
  1 file changed, 92 insertions(+), 11 deletions(-)

[...]


Tested here successfully with 10-bit yuvtestsrc data and different hue 
filter option values for h/s/b.


Regards,
Tobias

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] avfilter/vf_hue: 10bit support

2018-08-03 Thread Michael Niedermayer
Signed-off-by: Michael Niedermayer 
---
 libavfilter/vf_hue.c | 103 ++-
 1 file changed, 92 insertions(+), 11 deletions(-)

diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c
index 45a5a1a92f..32b33c 100644
--- a/libavfilter/vf_hue.c
+++ b/libavfilter/vf_hue.c
@@ -80,6 +80,9 @@ typedef struct HueContext {
 uint8_t  lut_l[256];
 uint8_t  lut_u[256][256];
 uint8_t  lut_v[256][256];
+uint16_t  lut_l16[65536];
+uint16_t  lut_u10[1024][1024];
+uint16_t  lut_v10[1024][1024];
 } HueContext;
 
 #define OFFSET(x) offsetof(HueContext, x)
@@ -117,6 +120,9 @@ static inline void create_luma_lut(HueContext *h)
 for (i = 0; i < 256; i++) {
 h->lut_l[i] = av_clip_uint8(i + b * 25.5);
 }
+for (i = 0; i < 65536; i++) {
+h->lut_l16[i] = av_clip_uintp2(i + b * 102.4, 10);
+}
 }
 
 static inline void create_chrominance_lut(HueContext *h, const int32_t c,
@@ -148,6 +154,25 @@ static inline void create_chrominance_lut(HueContext *h, 
const int32_t c,
 h->lut_v[i][j] = av_clip_uint8(new_v);
 }
 }
+for (i = 0; i < 1024; i++) {
+for (j = 0; j < 1024; j++) {
+u = i - 512;
+v = j - 512;
+/*
+ * Apply the rotation of the vector : (c * u) - (s * v)
+ *(s * u) + (c * v)
+ * De-normalize the components (without forgetting to scale 512
+ * by << 16)
+ * Finally scale back the result by >> 16
+ */
+new_u = ((c * u) - (s * v) + (1 << 15) + (512 << 16)) >> 16;
+new_v = ((s * u) + (c * v) + (1 << 15) + (512 << 16)) >> 16;
+
+/* Prevent a potential overflow */
+h->lut_u10[i][j] = av_clip_uintp2(new_u, 10);
+h->lut_v10[i][j] = av_clip_uintp2(new_v, 10);
+}
+}
 }
 
 static int set_expr(AVExpr **pexpr_ptr, char **expr_ptr,
@@ -231,6 +256,11 @@ static int query_formats(AVFilterContext *ctx)
 AV_PIX_FMT_YUV410P,  AV_PIX_FMT_YUV440P,
 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
 AV_PIX_FMT_YUVA420P,
+AV_PIX_FMT_YUV444P10,  AV_PIX_FMT_YUV422P10,
+AV_PIX_FMT_YUV420P10,
+AV_PIX_FMT_YUV440P10,
+AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10,
+AV_PIX_FMT_YUVA420P10,
 AV_PIX_FMT_NONE
 };
 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
@@ -271,6 +301,22 @@ static void apply_luma_lut(HueContext *s,
 }
 }
 
+static void apply_luma_lut10(HueContext *s,
+ uint16_t *ldst, const int dst_linesize,
+ uint16_t *lsrc, const int src_linesize,
+ int w, int h)
+{
+int i;
+
+while (h--) {
+for (i = 0; i < w; i++)
+ldst[i] = s->lut_l16[lsrc[i]];
+
+lsrc += src_linesize;
+ldst += dst_linesize;
+}
+}
+
 static void apply_lut(HueContext *s,
   uint8_t *udst, uint8_t *vdst, const int dst_linesize,
   uint8_t *usrc, uint8_t *vsrc, const int src_linesize,
@@ -294,6 +340,29 @@ static void apply_lut(HueContext *s,
 }
 }
 
+static void apply_lut10(HueContext *s,
+  uint16_t *udst, uint16_t *vdst, const int dst_linesize,
+  uint16_t *usrc, uint16_t *vsrc, const int src_linesize,
+  int w, int h)
+{
+int i;
+
+while (h--) {
+for (i = 0; i < w; i++) {
+const int u = av_clip_uintp2(usrc[i], 10);
+const int v = av_clip_uintp2(vsrc[i], 10);
+
+udst[i] = s->lut_u10[u][v];
+vdst[i] = s->lut_v10[u][v];
+}
+
+usrc += src_linesize;
+vsrc += src_linesize;
+udst += dst_linesize;
+vdst += dst_linesize;
+}
+}
+
 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
 #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
 
@@ -305,6 +374,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame 
*inpic)
 const int32_t old_hue_sin = hue->hue_sin, old_hue_cos = hue->hue_cos;
 const float old_brightness = hue->brightness;
 int direct = 0;
+const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+const int bps = desc->comp[0].depth > 8 ? 2 : 1;
 
 if (av_frame_is_writable(inpic)) {
 direct = 1;
@@ -367,21 +438,31 @@ static int filter_frame(AVFilterLink *inlink, AVFrame 
*inpic)
 if (!direct) {
 if (!hue->brightness)
 av_image_copy_plane(outpic->data[0], outpic->linesize[0],
-inpic->data[0],  inpic->linesize[0],
-inlink->w, inlink->h);
+inpic->data[0],   inpic->linesize[0],
+inlink->w * bps, inlink->h);
 if (inpic->data[3])
 

Re: [FFmpeg-devel] Mixture of homographies video stabilization

2018-08-03 Thread Matthew Lai
I just realized: OpenCV has functions to do all the homography hard work
already, so I'll probably use that since OpenCV is already a dependency.

On Fri, Aug 3, 2018 at 1:29 PM Matthew Lai  wrote:

> Hi Michael,
>
> The operations needed are multiplications, SVD, inversion, scaling, and
> additions, as far as I can tell. Convolutions would probably also be
> useful. They are certainly possible to implement directly, but it's
> unlikely that any naive implementation will be within an order of magnitude
> in performance to an optimized BLAS library. That may or may not matter - I
> don't have a good idea of where the performance bottlenecks will be yet.
> The matrices are 4x4.
>
> Motion estimation is one way, but most state of the art implementations
> don't use it because it's too noisy (motion from low contrast regions).
> Most use something like a corner detector to find sharp feature points, and
> track them (this can potentially be implemented using motion estimation,
> but only around those points).
>
> Matthew
>
> On Fri, Aug 3, 2018 at 2:05 AM Michael Niedermayer 
> wrote:
>
>> On Thu, Aug 02, 2018 at 05:24:08PM +0100, Matthew Lai wrote:
>> > Ah ok thanks! I'm surprised no one has need a linear algebra library. I
>> > guess there's OpenCV and people use it to do the heavy lifting?
>> >
>> > Will look into the API more.
>>
>> alot ot linear algebra we needed has been implemented directly.
>> Some of it is shared and available from libavutil, like lls.*
>>
>> Can you elaboarte what exact linear algebra operations are needed?
>> also what amounts of data (matrix types/sizes) and speed requirements
>> this has
>>
>> Also IIUC (please correct me if iam wrong) this uses motion estimation
>> at one step. IIRC ronald and others) want motion estimation to be
>> factored and shared and not duplicated. The current motion estimation is
>> in libavcodec/motion_est*.
>>
>> Thanks
>>
>> >
>> > Thanks
>> > matthew
>> >
>> > On Thu, Aug 2, 2018 at 3:31 PM Paul B Mahol  wrote:
>> >
>> > > On 8/2/18, Matthew Lai  wrote:
>> > > > Hello!
>> > > >
>> > > > I want to write a more advanced video stabilizer for libavfilter
>> (*),
>> > > > implementing the algorithm described here -
>> > > >
>> > >
>> https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37744.pdf
>> > > > The focus of the paper is rolling shutter removal, but it builds on
>> top
>> > > of
>> > > > another algorithm that does full frame stabilization, and the new
>> > > algorithm
>> > > > does that as well.
>> > > >
>> > > > This is the algorithm used in YouTube's stabilizing filter, and is
>> state
>> > > of
>> > > > the art. Adobe calls it Warp Stabilizer (it's the same thing as far
>> as I
>> > > > can tell from public information anyways).
>> > > >
>> > > > 3 questions:
>> > > > 1. Is there a linear algebra library already in use? I didn't see
>> > > anything
>> > > > in configure, but would be surprised if none of the existing
>> filters work
>> > > > with matrices?
>> > >
>> > > There is no such library here used. There are indeed video/audio
>> > > filters that work with matrices.
>> > >
>> > > > 2. Is there anything to watch out for re. a high frame delay (say a
>> few
>> > > > hundred frames)? Looking at the API, I don't see a callback to
>> flush out
>> > > > remaining frames when input frames are finished? Is doing it in two
>> > > passes
>> > > > the only option?
>> > >
>> > > It is handled internally, there are two internal APIs, activate one
>> and
>> > > legacy.
>> > > With legacy you can flush frames when you receive last frame from
>> input.
>> > > With newer, activate API, its similar.
>> > >
>> > > > 3. doc/writing_filters.txt says only slice threading is available.
>> That's
>> > > > not really possible with this filter, but frame threading is. Can I
>> just
>> > > > buffer frames internally (which I need to do anyways to smooth out
>> > > motion),
>> > > > and do my own threading?
>> > >
>> > > You could do it.
>> > >
>> > > >
>> > > > * vid.stab is good for what it does, but it only does rotation and
>> > > > translation, and doesn't handle zoom, perspective distortion, or
>> rolling
>> > > > shutter. This means it's limited when it comes to things like scuba
>> > > diving
>> > > > videos, where the camera is filming the seabed at a small distance
>> and at
>> > > > an angle.
>> > > >
>> > > > Thanks!
>> > > > Matthew
>> > > > ___
>> > > > ffmpeg-devel mailing list
>> > > > ffmpeg-devel@ffmpeg.org
>> > > > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>> > > >
>> > > ___
>> > > ffmpeg-devel mailing list
>> > > ffmpeg-devel@ffmpeg.org
>> > > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>> > >
>> > ___
>> > ffmpeg-devel mailing list
>> > ffmpeg-devel@ffmpeg.org
>> > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> --
>> Michael GnuPG fingerprint: 

Re: [FFmpeg-devel] [PATCH 1/7] libavfilter: Adds on the fly generation of default DNN models for tensorflow backend instead of storing binary model.

2018-08-03 Thread Michael Niedermayer
On Thu, Aug 02, 2018 at 09:52:42PM +0300, Sergey Lavrushkin wrote:
> This patch provides on the fly generation of default DNN models for 
> tensorflow backend,
> that eliminates data duplication for model weights. Also, files with internal 
> weights
> were replaced with automatically generated files for models I trained. 
> Scripts for 
> training and generating these files can be found here:
> https://github.com/HighVoltageRocknRoll/sr
> 

[...]
> +static TF_Operation* add_conv_layers(TFModel* tf_model, const float** 
> consts, const int64_t** consts_dims,
> + const int* consts_dims_len, const 
> char** activations,
> + TF_Operation* input_op, int layers_num)
> +{
> +int i;
> +TF_OperationDescription* op_desc;
> +TF_Operation* op;
> +TF_Operation* transpose_op;
> +TF_Output input;
> +int64_t strides[] = {1, 1, 1, 1};
> +int32_t* transpose_perm;
> +TF_Tensor* tensor;
> +int64_t transpose_perm_shape[] = {4};
> +char name_buffer[256];
> +
> +op_desc = TF_NewOperation(tf_model->graph, "Const", "transpose_perm");
> +TF_SetAttrType(op_desc, "dtype", TF_INT32);
> +tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 * 
> sizeof(int32_t));
> +transpose_perm = (int32_t*)TF_TensorData(tensor);
> +transpose_perm[0] = 1;
> +transpose_perm[1] = 2;
> +transpose_perm[2] = 3;
> +transpose_perm[3] = 0;
> +TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
> +if (TF_GetCode(tf_model->status) != TF_OK){
> +return NULL;
> +}
> +transpose_op = TF_FinishOperation(op_desc, tf_model->status);
> +if (TF_GetCode(tf_model->status) != TF_OK){
> +return NULL;
> +}
> +
> +input.index = 0;
> +for (i = 0; i < layers_num; ++i){

> +sprintf(name_buffer, "conv_kernel%d", i);

sprintf() should normally not be used as its too easy to end up
overwriting the output.
snprintf() is a safer alternative

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Complexity theory is the science of finding the exact solution to an
approximation. Benchmarking OTOH is finding an approximation of the exact


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 4/7] Adds gray floating-point pixel formats.

2018-08-03 Thread Michael Niedermayer
On Thu, Aug 02, 2018 at 09:52:45PM +0300, Sergey Lavrushkin wrote:
> This patch adds two floating-point gray formats to use them in sr filter for 
> conversion with libswscale. I added conversion from uint gray to float and
> backwards in swscale_unscaled.c, that is enough for sr filter. But for
> proper format addition, should I add anything else?
> 
> ---
>  libavutil/pixdesc.c   | 22 ++
>  libavutil/pixfmt.h|  5 
>  libswscale/swscale_internal.h |  7 ++
>  libswscale/swscale_unscaled.c | 54 
> +--
>  libswscale/utils.c|  5 +++-

please split this in a patch or libavutil and one for libswscale
they also need some version.h bump

also fate tests need an update, (make fate) fails otherwise, the update should
be part of the patch that causes the failure otherwise


>  5 files changed, 90 insertions(+), 3 deletions(-)
> 
> diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
> index 96e079584a..7d307d9120 100644
> --- a/libavutil/pixdesc.c
> +++ b/libavutil/pixdesc.c
> @@ -2198,6 +2198,28 @@ static const AVPixFmtDescriptor 
> av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
>  .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA |
>   AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
>  },
> +[AV_PIX_FMT_GRAYF32BE] = {
> +.name = "grayf32be",
> +.nb_components = 1,
> +.log2_chroma_w = 0,
> +.log2_chroma_h = 0,
> +.comp = {
> +{ 0, 4, 0, 0, 32, 3, 31, 1 },   /* Y */
> +},
> +.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_FLOAT,
> +.alias = "yf32be",
> +},
> +[AV_PIX_FMT_GRAYF32LE] = {
> +.name = "grayf32le",
> +.nb_components = 1,
> +.log2_chroma_w = 0,
> +.log2_chroma_h = 0,
> +.comp = {
> +{ 0, 4, 0, 0, 32, 3, 31, 1 },   /* Y */
> +},
> +.flags = AV_PIX_FMT_FLAG_FLOAT,
> +.alias = "yf32le",
> +},
>  [AV_PIX_FMT_DRM_PRIME] = {
>  .name = "drm_prime",
>  .flags = AV_PIX_FMT_FLAG_HWACCEL,

> diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
> index 2b3307845e..aa9a4f60c1 100644
> --- a/libavutil/pixfmt.h
> +++ b/libavutil/pixfmt.h
> @@ -320,6 +320,9 @@ enum AVPixelFormat {
>  AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 
> 4:4:4:4, 128bpp, big-endian
>  AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 
> 4:4:4:4, 128bpp, little-endian
>  
> +AV_PIX_FMT_GRAYF32BE,  ///< IEEE-754 single precision Y, 32bpp, 
> big-endian
> +AV_PIX_FMT_GRAYF32LE,  ///< IEEE-754 single precision Y, 32bpp, 
> little-endian
> +
>  /**
>   * DRM-managed buffers exposed through PRIME buffer sharing.
>   *

new enum values can only be added in such a way that no value of an existing
enum changes. This would change the value of the following enums



> @@ -405,6 +408,8 @@ enum AVPixelFormat {
>  #define AV_PIX_FMT_GBRPF32AV_PIX_FMT_NE(GBRPF32BE,  GBRPF32LE)
>  #define AV_PIX_FMT_GBRAPF32   AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
>  
> +#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
> +
>  #define AV_PIX_FMT_YUVA420P9  AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
>  #define AV_PIX_FMT_YUVA422P9  AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
>  #define AV_PIX_FMT_YUVA444P9  AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
> diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h
> index 1703856ab2..4a2cdfe658 100644
> --- a/libswscale/swscale_internal.h
> +++ b/libswscale/swscale_internal.h
> @@ -764,6 +764,13 @@ static av_always_inline int isAnyRGB(enum AVPixelFormat 
> pix_fmt)
>  pix_fmt == AV_PIX_FMT_MONOBLACK || pix_fmt == 
> AV_PIX_FMT_MONOWHITE;
>  }
>  
> +static av_always_inline int isFloat(enum AVPixelFormat pix_fmt)
> +{
> +const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
> +av_assert0(desc);
> +return desc->flags & AV_PIX_FMT_FLAG_FLOAT;
> +}
> +
>  static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
>  {
>  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);

> diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c
> index 6480070cbf..f5b4c9be9d 100644
> --- a/libswscale/swscale_unscaled.c
> +++ b/libswscale/swscale_unscaled.c
> @@ -1467,6 +1467,46 @@ static int yvu9ToYv12Wrapper(SwsContext *c, const 
> uint8_t *src[],
>  return srcSliceH;
>  }
>  
> +static int uint_y_to_float_y_wrapper(SwsContext *c, const uint8_t *src[],
> + int srcStride[], int srcSliceY,
> + int srcSliceH, uint8_t *dst[], int 
> dstStride[])
> +{
> +int y, x;
> +int dstStrideFloat = dstStride[0] >> 2;;

theres a ; too much
also newly added strides should probably be ptrdiff_t


> +const uint8_t *srcPtr = src[0];
> +float *dstPtr = (float *)(dst[0] + dstStride[0] * srcSliceY);
> +
> +

Re: [FFmpeg-devel] Mixture of homographies video stabilization

2018-08-03 Thread Matthew Lai
Hi Michael,

The operations needed are multiplications, SVD, inversion, scaling, and
additions, as far as I can tell. Convolutions would probably also be
useful. They are certainly possible to implement directly, but it's
unlikely that any naive implementation will be within an order of magnitude
in performance to an optimized BLAS library. That may or may not matter - I
don't have a good idea of where the performance bottlenecks will be yet.
The matrices are 4x4.

Motion estimation is one way, but most state of the art implementations
don't use it because it's too noisy (motion from low contrast regions).
Most use something like a corner detector to find sharp feature points, and
track them (this can potentially be implemented using motion estimation,
but only around those points).

Matthew

On Fri, Aug 3, 2018 at 2:05 AM Michael Niedermayer 
wrote:

> On Thu, Aug 02, 2018 at 05:24:08PM +0100, Matthew Lai wrote:
> > Ah ok thanks! I'm surprised no one has need a linear algebra library. I
> > guess there's OpenCV and people use it to do the heavy lifting?
> >
> > Will look into the API more.
>
> alot ot linear algebra we needed has been implemented directly.
> Some of it is shared and available from libavutil, like lls.*
>
> Can you elaboarte what exact linear algebra operations are needed?
> also what amounts of data (matrix types/sizes) and speed requirements this
> has
>
> Also IIUC (please correct me if iam wrong) this uses motion estimation
> at one step. IIRC ronald and others) want motion estimation to be
> factored and shared and not duplicated. The current motion estimation is
> in libavcodec/motion_est*.
>
> Thanks
>
> >
> > Thanks
> > matthew
> >
> > On Thu, Aug 2, 2018 at 3:31 PM Paul B Mahol  wrote:
> >
> > > On 8/2/18, Matthew Lai  wrote:
> > > > Hello!
> > > >
> > > > I want to write a more advanced video stabilizer for libavfilter (*),
> > > > implementing the algorithm described here -
> > > >
> > >
> https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37744.pdf
> > > > The focus of the paper is rolling shutter removal, but it builds on
> top
> > > of
> > > > another algorithm that does full frame stabilization, and the new
> > > algorithm
> > > > does that as well.
> > > >
> > > > This is the algorithm used in YouTube's stabilizing filter, and is
> state
> > > of
> > > > the art. Adobe calls it Warp Stabilizer (it's the same thing as far
> as I
> > > > can tell from public information anyways).
> > > >
> > > > 3 questions:
> > > > 1. Is there a linear algebra library already in use? I didn't see
> > > anything
> > > > in configure, but would be surprised if none of the existing filters
> work
> > > > with matrices?
> > >
> > > There is no such library here used. There are indeed video/audio
> > > filters that work with matrices.
> > >
> > > > 2. Is there anything to watch out for re. a high frame delay (say a
> few
> > > > hundred frames)? Looking at the API, I don't see a callback to flush
> out
> > > > remaining frames when input frames are finished? Is doing it in two
> > > passes
> > > > the only option?
> > >
> > > It is handled internally, there are two internal APIs, activate one and
> > > legacy.
> > > With legacy you can flush frames when you receive last frame from
> input.
> > > With newer, activate API, its similar.
> > >
> > > > 3. doc/writing_filters.txt says only slice threading is available.
> That's
> > > > not really possible with this filter, but frame threading is. Can I
> just
> > > > buffer frames internally (which I need to do anyways to smooth out
> > > motion),
> > > > and do my own threading?
> > >
> > > You could do it.
> > >
> > > >
> > > > * vid.stab is good for what it does, but it only does rotation and
> > > > translation, and doesn't handle zoom, perspective distortion, or
> rolling
> > > > shutter. This means it's limited when it comes to things like scuba
> > > diving
> > > > videos, where the camera is filming the seabed at a small distance
> and at
> > > > an angle.
> > > >
> > > > Thanks!
> > > > Matthew
> > > > ___
> > > > ffmpeg-devel mailing list
> > > > ffmpeg-devel@ffmpeg.org
> > > > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> > > >
> > > ___
> > > ffmpeg-devel mailing list
> > > ffmpeg-devel@ffmpeg.org
> > > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> > >
> > ___
> > ffmpeg-devel mailing list
> > ffmpeg-devel@ffmpeg.org
> > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> --
> Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB
>
> It is dangerous to be right in matters on which the established authorities
> are wrong. -- Voltaire
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
___
ffmpeg-devel mailing 

[FFmpeg-devel] [PATCH v2] lavf: add raw AVS2 demuxer

2018-08-03 Thread hwren
Signed-off-by: hwren 
---
 libavformat/Makefile |  1 +
 libavformat/allformats.c |  1 +
 libavformat/davs2.c  | 71 
 3 files changed, 73 insertions(+)
 create mode 100644 libavformat/davs2.c

diff --git a/libavformat/Makefile b/libavformat/Makefile
index f2f3aab..c4534b8 100644
--- a/libavformat/Makefile
+++ b/libavformat/Makefile
@@ -110,6 +110,7 @@ OBJS-$(CONFIG_AST_DEMUXER)   += ast.o astdec.o
 OBJS-$(CONFIG_AST_MUXER) += ast.o astenc.o
 OBJS-$(CONFIG_AU_DEMUXER)+= au.o pcm.o
 OBJS-$(CONFIG_AU_MUXER)  += au.o rawenc.o
+OBJS-$(CONFIG_AVS2_DEMUXER)  += davs2.o rawdec.o
 OBJS-$(CONFIG_AVI_DEMUXER)   += avidec.o
 OBJS-$(CONFIG_AVI_MUXER) += avienc.o mpegtsenc.o avlanguage.o 
rawutils.o
 OBJS-$(CONFIG_AVM2_MUXER)+= swfenc.o swf.o
diff --git a/libavformat/allformats.c b/libavformat/allformats.c
index adcc8d9..38eaeea 100644
--- a/libavformat/allformats.c
+++ b/libavformat/allformats.c
@@ -76,6 +76,7 @@ extern AVInputFormat  ff_avisynth_demuxer;
 extern AVOutputFormat ff_avm2_muxer;
 extern AVInputFormat  ff_avr_demuxer;
 extern AVInputFormat  ff_avs_demuxer;
+extern AVInputFormat  ff_avs2_demuxer;
 extern AVInputFormat  ff_bethsoftvid_demuxer;
 extern AVInputFormat  ff_bfi_demuxer;
 extern AVInputFormat  ff_bintext_demuxer;
diff --git a/libavformat/davs2.c b/libavformat/davs2.c
new file mode 100644
index 000..73daa69
--- /dev/null
+++ b/libavformat/davs2.c
@@ -0,0 +1,71 @@
+/*
+ * AVS2 video stream probe.
+ *
+ * Copyright (C) 2018 Huiwen Ren, 
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "rawdec.h"
+#include "libavcodec/internal.h"
+#include "libavutil/intreadwrite.h"
+
+#define ISSQH(x)  ((x) == 0xB0 )
+#define ISEND(x)  ((x) == 0xB1 )
+#define ISPIC(x)  ((x) == 0xB3 || (x) == 0xB6)
+#define ISUNIT(x) ( ISSQH(x) || ISEND(x) || (x) == 0xB2 || ISPIC(x) || (x) == 
0xB5 || (x) == 0xB7 )
+#define ISAVS2(x) ((x) == 0x20 || (x) == 0x22 || (x) == 0x30 || (x) == 0x32 )
+
+static int avs2_probe(AVProbeData *p)
+{
+if (AV_RB32(p->buf) != 0x1B0){
+return 0;
+}
+uint32_t code= -1, hds=0, pic=0, seq=0;
+uint8_t state=0;
+const uint8_t *ptr = p->buf, *end = p->buf + p->buf_size, *sqb=0;
+
+while (ptr < end) {
+ptr = avpriv_find_start_code(ptr, end, );
+state = code & 0xFF;
+if ((code & 0xff00) == 0x100) {
+if (ISUNIT(state)) {
+if (sqb && !hds) {
+hds = ptr - sqb;
+}
+if (ISSQH(state)) {
+if (!ISAVS2(*ptr))
+return 0;
+sqb = ptr;
+seq++;
+} else if (ISPIC(state)) {
+pic++;
+} else if (ISEND(state)) {
+break;
+} 
+}
+}
+}
+if (seq && hds >= 21 && pic){
+return AVPROBE_SCORE_EXTENSION + 2; // more than cavs
+}
+
+return 0;
+}
+
+FF_DEF_RAWVIDEO_DEMUXER(avs2, "raw AVS2-P2/IEEE1857.4", avs2_probe, 
"avs,avs2", AV_CODEC_ID_AVS2)
-- 
2.7.4

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel