This patchset enhances Alexis Ballier's original patch and validates
    it using Qualcomm's Venus hardware (driver recently landed upstream
    [1]).

    This has been tested on Qualcomm's DragonBoard 410c and 820c
    Configure/make scripts have been validated on Ubuntu 10.04 and
    16.04.

    Tested decoders:
           - h264
           - h263
           - mpeg4
           - vp8
           - vp9
           - hevc

    Tested encoders:
           - h264
           - h263
           - mpeg4

    Tested transcoding (concurrent encoding/decoding)

    Some of the changes introduced:
        - v4l2: code cleanup and abstractions added
        - v4l2: follow the new encode/decode api.
        - v4l2: fix display size for NV12 output pool.
        - v4l2: handle EOS.
        - v4l2: vp8 and mpeg4 decoding and encoding.
        - v4l2: hevc and vp9 support.
        - v4l2: generate EOF on dequeue errors.
        - v4l2: h264_mp4toannexb filtering.
        - v4l2: fixed make install and fate issues.
        - v4l2: codecs enabled/disabled depending on pixfmt defined
        - v4l2: pass timebase/framerate to the context
        - v4l2: runtime decoder reconfiguration.
        - v4l2: add more frame information
        - v4l2: free hardware resources on last reference being released
        - v4l2: encoding: disable b-frames for upstreaming (patch required)

    [1] https://lwn.net/Articles/697956/

    Reviewed-by: Jorge Ramirez <jorge.ramirez-or...@linaro.org>
    Reviewed-by: Alexis Ballier <aball...@gentoo.org>
    Tested-by: Jorge Ramirez <jorge.ramirez-or...@linaro.org>
---
 Changelog                 |   1 +
 configure                 |  28 +++
 libavcodec/Makefile       |  15 ++
 libavcodec/allcodecs.c    |   9 +
 libavcodec/v4l2_buffers.c | 448 ++++++++++++++++++++++++++++++++++
 libavcodec/v4l2_buffers.h | 121 ++++++++++
 libavcodec/v4l2_context.c | 604 ++++++++++++++++++++++++++++++++++++++++++++++
 libavcodec/v4l2_context.h | 206 ++++++++++++++++
 libavcodec/v4l2_fmt.c     | 182 ++++++++++++++
 libavcodec/v4l2_fmt.h     |  34 +++
 libavcodec/v4l2_m2m.c     | 331 +++++++++++++++++++++++++
 libavcodec/v4l2_m2m.h     |  94 ++++++++
 libavcodec/v4l2_m2m_dec.c | 232 ++++++++++++++++++
 libavcodec/v4l2_m2m_enc.c | 350 +++++++++++++++++++++++++++
 14 files changed, 2655 insertions(+)
 create mode 100644 libavcodec/v4l2_buffers.c
 create mode 100644 libavcodec/v4l2_buffers.h
 create mode 100644 libavcodec/v4l2_context.c
 create mode 100644 libavcodec/v4l2_context.h
 create mode 100644 libavcodec/v4l2_fmt.c
 create mode 100644 libavcodec/v4l2_fmt.h
 create mode 100644 libavcodec/v4l2_m2m.c
 create mode 100644 libavcodec/v4l2_m2m.h
 create mode 100644 libavcodec/v4l2_m2m_dec.c
 create mode 100644 libavcodec/v4l2_m2m_enc.c

diff --git a/Changelog b/Changelog
index cae5254..95f70f0 100644
--- a/Changelog
+++ b/Changelog
@@ -43,6 +43,7 @@ version <next>:
 - add --disable-autodetect build switch
 - drop deprecated qtkit input device (use avfoundation instead)
 - despill video filter
+- V4L2 mem2mem HW assisted codecs
 
 version 3.3:
 - CrystalHD decoder moved to new decode API
diff --git a/configure b/configure
index 2f3fa2b..93ec18c 100755
--- a/configure
+++ b/configure
@@ -185,6 +185,7 @@ Individual component options:
   --enable-filter=NAME     enable filter NAME
   --disable-filter=NAME    disable filter NAME
   --disable-filters        disable all filters
+  --disable-v4l2_m2m       disable V4L2 mem2mem code [autodetect]
 
 External library support:
 
@@ -1609,6 +1610,7 @@ HWACCEL_AUTODETECT_LIBRARY_LIST="
     vda
     vdpau
     videotoolbox_hwaccel
+    v4l2_m2m
     xvmc
 "
 
@@ -2740,6 +2742,7 @@ omx_rpi_select="omx"
 qsvdec_select="qsv"
 qsvenc_select="qsv"
 vaapi_encode_deps="vaapi"
+v4l2_m2m_deps_any="linux_videodev2_h"
 
 hwupload_cuda_filter_deps="cuda"
 scale_npp_filter_deps="cuda libnpp"
@@ -2749,6 +2752,8 @@ nvenc_deps="cuda"
 nvenc_deps_any="dlopen LoadLibrary"
 nvenc_encoder_deps="nvenc"
 
+h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
+h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
 h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
 h264_cuvid_decoder_deps="cuda cuvid"
 h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
@@ -2767,6 +2772,8 @@ h264_vda_decoder_deps="vda"
 h264_vda_decoder_select="h264_decoder"
 h264_vdpau_decoder_deps="vdpau"
 h264_vdpau_decoder_select="h264_decoder"
+h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"
+h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"
 hevc_cuvid_decoder_deps="cuda cuvid"
 hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
 hevc_mediacodec_decoder_deps="mediacodec"
@@ -2778,12 +2785,15 @@ hevc_qsv_encoder_deps="libmfx"
 hevc_qsv_encoder_select="hevcparse qsvenc"
 hevc_vaapi_encoder_deps="VAEncPictureParameterBufferHEVC"
 hevc_vaapi_encoder_select="vaapi_encode golomb"
+hevc_v4l2m2m_decoder_deps="v4l2_m2m hevc_v4l2_m2m"
+hevc_v4l2m2m_encoder_deps="v4l2_m2m hevc_v4l2_m2m"
 mjpeg_cuvid_decoder_deps="cuda cuvid"
 mjpeg_vaapi_encoder_deps="VAEncPictureParameterBufferJPEG"
 mjpeg_vaapi_encoder_select="vaapi_encode jpegtables"
 mpeg1_cuvid_decoder_deps="cuda cuvid"
 mpeg1_vdpau_decoder_deps="vdpau"
 mpeg1_vdpau_decoder_select="mpeg1video_decoder"
+mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"
 mpeg2_crystalhd_decoder_select="crystalhd"
 mpeg2_cuvid_decoder_deps="cuda cuvid"
 mpeg2_mmal_decoder_deps="mmal"
@@ -2794,6 +2804,7 @@ mpeg2_qsv_encoder_deps="libmfx"
 mpeg2_qsv_encoder_select="qsvenc"
 mpeg2_vaapi_encoder_deps="VAEncPictureParameterBufferMPEG2"
 mpeg2_vaapi_encoder_select="vaapi_encode"
+mpeg2_v4l2m2m_decoder_deps="v4l2_m2m mpeg2_v4l2_m2m"
 mpeg4_crystalhd_decoder_select="crystalhd"
 mpeg4_cuvid_decoder_deps="cuda cuvid"
 mpeg4_mediacodec_decoder_deps="mediacodec"
@@ -2801,6 +2812,8 @@ mpeg4_mmal_decoder_deps="mmal"
 mpeg4_omx_encoder_deps="omx"
 mpeg4_vdpau_decoder_deps="vdpau"
 mpeg4_vdpau_decoder_select="mpeg4_decoder"
+mpeg4_v4l2m2m_decoder_deps="v4l2_m2m mpeg4_v4l2_m2m"
+mpeg4_v4l2m2m_encoder_deps="v4l2_m2m mpeg4_v4l2_m2m"
 mpeg_vdpau_decoder_deps="vdpau"
 mpeg_vdpau_decoder_select="mpeg2video_decoder"
 msmpeg4_crystalhd_decoder_select="crystalhd"
@@ -2811,16 +2824,20 @@ vc1_cuvid_decoder_deps="cuda cuvid"
 vc1_mmal_decoder_deps="mmal"
 vc1_vdpau_decoder_deps="vdpau"
 vc1_vdpau_decoder_select="vc1_decoder"
+vc1_v4l2m2m_decoder_deps="v4l2_m2m vc1_v4l2_m2m"
 vp8_cuvid_decoder_deps="cuda cuvid"
 vp8_mediacodec_decoder_deps="mediacodec"
 vp8_qsv_decoder_deps="libmfx"
 vp8_qsv_decoder_select="qsvdec vp8_qsv_hwaccel vp8_parser"
 vp8_vaapi_encoder_deps="VAEncPictureParameterBufferVP8"
 vp8_vaapi_encoder_select="vaapi_encode"
+vp8_v4l2m2m_decoder_deps="v4l2_m2m vp8_v4l2_m2m"
+vp8_v4l2m2m_encoder_deps="v4l2_m2m vp8_v4l2_m2m"
 vp9_cuvid_decoder_deps="cuda cuvid"
 vp9_mediacodec_decoder_deps="mediacodec"
 vp9_vaapi_encoder_deps="VAEncPictureParameterBufferVP9"
 vp9_vaapi_encoder_select="vaapi_encode"
+vp9_v4l2m2m_decoder_deps="v4l2_m2m vp9_v4l2_m2m"
 wmv3_crystalhd_decoder_select="crystalhd"
 wmv3_vdpau_decoder_select="vc1_vdpau_decoder"
 
@@ -6079,9 +6096,20 @@ perl -v            > /dev/null 2>&1 && enable perl      
|| disable perl
 pod2man --help     > /dev/null 2>&1 && enable pod2man   || disable pod2man
 rsync --help 2> /dev/null | grep -q 'contimeout' && enable rsync_contimeout || 
disable rsync_contimeout
 
+# check V4L2 codecs available in the API
 check_header linux/fb.h
 check_header linux/videodev2.h
 check_code cc linux/videodev2.h "struct v4l2_frmsizeenum vfse; 
vfse.discrete.width = 0;" && enable_safe struct_v4l2_frmivalenum_discrete
+check_code cc linux/videodev2.h "int i = V4L2_CAP_VIDEO_M2M_MPLANE | 
V4L2_CAP_VIDEO_M2M | V4L2_BUF_FLAG_LAST;" || disable v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_VC1_ANNEX_G;" && enable 
vc1_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG1;" && enable 
mpeg1_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2;" && enable 
mpeg2_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG4;" && enable 
mpeg4_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC;" && enable 
hevc_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_H263;" && enable 
h263_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_H264;" && enable 
h264_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_VP8;" && enable 
vp8_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;" && enable 
vp9_v4l2_m2m
 
 check_header sys/videoio.h
 check_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width 
= 0;" && enable_safe struct_v4l2_frmivalenum_discrete
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 999632c..68193ec 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -137,6 +137,7 @@ OBJS-$(CONFIG_VIDEODSP)                += videodsp.o
 OBJS-$(CONFIG_VP3DSP)                  += vp3dsp.o
 OBJS-$(CONFIG_VP56DSP)                 += vp56dsp.o
 OBJS-$(CONFIG_VP8DSP)                  += vp8dsp.o
+OBJS-$(CONFIG_V4L2_M2M)                += v4l2_m2m.o v4l2_context.o 
v4l2_buffers.o v4l2_fmt.o
 OBJS-$(CONFIG_WMA_FREQS)               += wma_freqs.o
 OBJS-$(CONFIG_WMV2DSP)                 += wmv2dsp.o
 
@@ -323,6 +324,8 @@ OBJS-$(CONFIG_H263_DECODER)            += h263dec.o h263.o 
ituh263dec.o        \
                                           intelh263dec.o h263data.o
 OBJS-$(CONFIG_H263_ENCODER)            += mpeg4videoenc.o mpeg4video.o  \
                                           h263.o ituh263enc.o flvenc.o 
h263data.o
+OBJS-$(CONFIG_H263_V4L2M2M_DECODER)    += v4l2_m2m_dec.o
+OBJS-$(CONFIG_H263_V4L2M2M_ENCODER)    += v4l2_m2m_enc.o
 OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
                                           h264_direct.o h264_loopfilter.o  \
                                           h264_mb.o h264_picture.o \
@@ -340,6 +343,8 @@ OBJS-$(CONFIG_H264_QSV_DECODER)        += qsvdec_h2645.o
 OBJS-$(CONFIG_H264_QSV_ENCODER)        += qsvenc_h264.o
 OBJS-$(CONFIG_H264_VAAPI_ENCODER)      += vaapi_encode_h264.o 
vaapi_encode_h26x.o
 OBJS-$(CONFIG_H264_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o
+OBJS-$(CONFIG_H264_V4L2M2M_DECODER)    += v4l2_m2m_dec.o
+OBJS-$(CONFIG_H264_V4L2M2M_ENCODER)    += v4l2_m2m_enc.o
 OBJS-$(CONFIG_HAP_DECODER)             += hapdec.o hap.o
 OBJS-$(CONFIG_HAP_ENCODER)             += hapenc.o hap.o
 OBJS-$(CONFIG_HEVC_DECODER)            += hevcdec.o hevc_mvs.o \
@@ -353,6 +358,8 @@ OBJS-$(CONFIG_HEVC_QSV_DECODER)        += qsvdec_h2645.o
 OBJS-$(CONFIG_HEVC_QSV_ENCODER)        += qsvenc_hevc.o hevc_ps_enc.o       \
                                           hevc_data.o
 OBJS-$(CONFIG_HEVC_VAAPI_ENCODER)      += vaapi_encode_h265.o 
vaapi_encode_h26x.o
+OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER)    += v4l2_m2m_dec.o
+OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER)    += v4l2_m2m_enc.o
 OBJS-$(CONFIG_HNM4_VIDEO_DECODER)      += hnm4video.o
 OBJS-$(CONFIG_HQ_HQA_DECODER)          += hq_hqa.o hq_hqadata.o hq_hqadsp.o \
                                           canopus.o
@@ -422,6 +429,7 @@ OBJS-$(CONFIG_MPC8_DECODER)            += mpc8.o mpc.o
 OBJS-$(CONFIG_MPEGVIDEO_DECODER)       += mpeg12dec.o mpeg12.o mpeg12data.o
 OBJS-$(CONFIG_MPEG1VIDEO_DECODER)      += mpeg12dec.o mpeg12.o mpeg12data.o
 OBJS-$(CONFIG_MPEG1VIDEO_ENCODER)      += mpeg12enc.o mpeg12.o
+OBJS-$(CONFIG_MPEG1_V4L2M2M_DECODER)   += v4l2_m2m_dec.o
 OBJS-$(CONFIG_MPEG2_MMAL_DECODER)      += mmaldec.o
 OBJS-$(CONFIG_MPEG2_QSV_DECODER)       += qsvdec_other.o
 OBJS-$(CONFIG_MPEG2_QSV_ENCODER)       += qsvenc_mpeg2.o
@@ -429,9 +437,12 @@ OBJS-$(CONFIG_MPEG2VIDEO_DECODER)      += mpeg12dec.o 
mpeg12.o mpeg12data.o
 OBJS-$(CONFIG_MPEG2VIDEO_ENCODER)      += mpeg12enc.o mpeg12.o
 OBJS-$(CONFIG_MPEG2_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_MPEG2_VAAPI_ENCODER)     += vaapi_encode_mpeg2.o
+OBJS-$(CONFIG_MPEG2_V4L2M2M_DECODER)   += v4l2_m2m_dec.o
 OBJS-$(CONFIG_MPEG4_DECODER)           += xvididct.o
 OBJS-$(CONFIG_MPEG4_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_MPEG4_OMX_ENCODER)       += omx.o
+OBJS-$(CONFIG_MPEG4_V4L2M2M_DECODER)   += v4l2_m2m_dec.o
+OBJS-$(CONFIG_MPEG4_V4L2M2M_ENCODER)   += v4l2_m2m_enc.o
 OBJS-$(CONFIG_MPL2_DECODER)            += mpl2dec.o ass.o
 OBJS-$(CONFIG_MSA1_DECODER)            += mss3.o
 OBJS-$(CONFIG_MSCC_DECODER)            += mscc.o
@@ -605,6 +616,7 @@ OBJS-$(CONFIG_VC1_DECODER)             += vc1dec.o 
vc1_block.o vc1_loopfilter.o
 OBJS-$(CONFIG_VC1_CUVID_DECODER)       += cuvid.o
 OBJS-$(CONFIG_VC1_MMAL_DECODER)        += mmaldec.o
 OBJS-$(CONFIG_VC1_QSV_DECODER)         += qsvdec_other.o
+OBJS-$(CONFIG_VC1_V4L2M2M_DECODER)     += v4l2_m2m_dec.o
 OBJS-$(CONFIG_VC2_ENCODER)             += vc2enc.o vc2enc_dwt.o diractab.o
 OBJS-$(CONFIG_VCR1_DECODER)            += vcr1.o
 OBJS-$(CONFIG_VMDAUDIO_DECODER)        += vmdaudio.o
@@ -624,6 +636,8 @@ OBJS-$(CONFIG_VP8_CUVID_DECODER)       += cuvid.o
 OBJS-$(CONFIG_VP8_MEDIACODEC_DECODER)  += mediacodecdec.o
 OBJS-$(CONFIG_VP8_QSV_DECODER)         += qsvdec_other.o
 OBJS-$(CONFIG_VP8_VAAPI_ENCODER)       += vaapi_encode_vp8.o
+OBJS-$(CONFIG_VP8_V4L2M2M_DECODER)     += v4l2_m2m_dec.o
+OBJS-$(CONFIG_VP8_V4L2M2M_ENCODER)     += v4l2_m2m_enc.o
 OBJS-$(CONFIG_VP9_DECODER)             += vp9.o vp9data.o vp9dsp.o vp9lpf.o 
vp9recon.o \
                                           vp9block.o vp9prob.o vp9mvs.o 
vp56rac.o \
                                           vp9dsp_8bpp.o vp9dsp_10bpp.o 
vp9dsp_12bpp.o
@@ -631,6 +645,7 @@ OBJS-$(CONFIG_VP9_CUVID_DECODER)       += cuvid.o
 OBJS-$(CONFIG_VP9_MEDIACODEC_DECODER)  += mediacodecdec.o
 OBJS-$(CONFIG_VP9_VAAPI_ENCODER)       += vaapi_encode_vp9.o
 OBJS-$(CONFIG_VPLAYER_DECODER)         += textdec.o ass.o
+OBJS-$(CONFIG_VP9_V4L2M2M_DECODER)     += v4l2_m2m_dec.o
 OBJS-$(CONFIG_VQA_DECODER)             += vqavideo.o
 OBJS-$(CONFIG_WAVPACK_DECODER)         += wavpack.o
 OBJS-$(CONFIG_WAVPACK_ENCODER)         += wavpackenc.o
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index ce0bc7e..756ce56 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -208,8 +208,10 @@ static void register_all(void)
     REGISTER_ENCDEC (H263,              h263);
     REGISTER_DECODER(H263I,             h263i);
     REGISTER_ENCDEC (H263P,             h263p);
+    REGISTER_ENCDEC (H263_V4L2M2M,      h263_v4l2m2m);
     REGISTER_DECODER(H264,              h264);
     REGISTER_DECODER(H264_CRYSTALHD,    h264_crystalhd);
+    REGISTER_ENCDEC (H264_V4L2M2M,      h264_v4l2m2m);
     REGISTER_DECODER(H264_MEDIACODEC,   h264_mediacodec);
     REGISTER_DECODER(H264_MMAL,         h264_mmal);
     REGISTER_DECODER(H264_QSV,          h264_qsv);
@@ -220,6 +222,7 @@ static void register_all(void)
     REGISTER_ENCDEC (HAP,               hap);
     REGISTER_DECODER(HEVC,              hevc);
     REGISTER_DECODER(HEVC_QSV,          hevc_qsv);
+    REGISTER_ENCDEC (HEVC_V4L2M2M,      hevc_v4l2m2m);
     REGISTER_DECODER(HNM4_VIDEO,        hnm4_video);
     REGISTER_DECODER(HQ_HQA,            hq_hqa);
     REGISTER_DECODER(HQX,               hqx);
@@ -254,6 +257,7 @@ static void register_all(void)
     REGISTER_ENCDEC (MPEG2VIDEO,        mpeg2video);
     REGISTER_ENCDEC (MPEG4,             mpeg4);
     REGISTER_DECODER(MPEG4_CRYSTALHD,   mpeg4_crystalhd);
+    REGISTER_ENCDEC (MPEG4_V4L2M2M,     mpeg4_v4l2m2m);
     REGISTER_DECODER(MPEG4_MMAL,        mpeg4_mmal);
 #if FF_API_VDPAU
     REGISTER_DECODER(MPEG4_VDPAU,       mpeg4_vdpau);
@@ -263,8 +267,10 @@ static void register_all(void)
     REGISTER_DECODER(MPEG_VDPAU,        mpeg_vdpau);
     REGISTER_DECODER(MPEG1_VDPAU,       mpeg1_vdpau);
 #endif
+    REGISTER_DECODER(MPEG1_V4L2M2M,     mpeg1_v4l2m2m);
     REGISTER_DECODER(MPEG2_MMAL,        mpeg2_mmal);
     REGISTER_DECODER(MPEG2_CRYSTALHD,   mpeg2_crystalhd);
+    REGISTER_DECODER(MPEG2_V4L2M2M,     mpeg2_v4l2m2m);
     REGISTER_DECODER(MPEG2_QSV,         mpeg2_qsv);
     REGISTER_DECODER(MPEG2_MEDIACODEC,  mpeg2_mediacodec);
     REGISTER_DECODER(MSA1,              msa1);
@@ -362,6 +368,7 @@ static void register_all(void)
     REGISTER_DECODER(VC1IMAGE,          vc1image);
     REGISTER_DECODER(VC1_MMAL,          vc1_mmal);
     REGISTER_DECODER(VC1_QSV,           vc1_qsv);
+    REGISTER_DECODER(VC1_V4L2M2M,       vc1_v4l2m2m);
     REGISTER_ENCODER(VC2,               vc2);
     REGISTER_DECODER(VCR1,              vcr1);
     REGISTER_DECODER(VMDVIDEO,          vmdvideo);
@@ -373,7 +380,9 @@ static void register_all(void)
     REGISTER_DECODER(VP6F,              vp6f);
     REGISTER_DECODER(VP7,               vp7);
     REGISTER_DECODER(VP8,               vp8);
+    REGISTER_ENCDEC (VP8_V4L2M2M,       vp8_v4l2m2m);
     REGISTER_DECODER(VP9,               vp9);
+    REGISTER_DECODER(VP9_V4L2M2M,       vp9_v4l2m2m);
     REGISTER_DECODER(VQA,               vqa);
     REGISTER_DECODER(BITPACKED,         bitpacked);
     REGISTER_DECODER(WEBP,              webp);
diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c
new file mode 100644
index 0000000..939bbd4
--- /dev/null
+++ b/libavcodec/v4l2_buffers.c
@@ -0,0 +1,448 @@
+/*
+ * V4L2 buffer helper functions.
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+#include "libavcodec/avcodec.h"
+#include "libavcodec/internal.h"
+#include "v4l2_context.h"
+#include "v4l2_buffers.h"
+#include "v4l2_m2m.h"
+
+#define USEC_PER_SEC 1000000
+
+static inline V4L2m2mContext *buf_to_m2mctx(V4L2Buffer *buf)
+{
+    return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
+        container_of(buf->context, V4L2m2mContext, output) :
+        container_of(buf->context, V4L2m2mContext, capture);
+}
+
+static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
+{
+    V4L2m2mContext *s = buf_to_m2mctx(out);
+    AVRational v4l2_timebase = { 1, USEC_PER_SEC };
+    int64_t v4l2_pts;
+
+    if (pts == AV_NOPTS_VALUE)
+        pts = 0;
+
+    /* convert pts to v4l2 timebase */
+    v4l2_pts = av_rescale_q(pts, s->avctx->time_base, v4l2_timebase);
+    out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
+    out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
+}
+
+static inline uint64_t v4l2_get_pts(V4L2Buffer *avbuf)
+{
+    V4L2m2mContext *s = buf_to_m2mctx(avbuf);
+    AVRational v4l2_timebase = { 1, USEC_PER_SEC };
+    int64_t v4l2_pts;
+
+    /* convert pts back to encoder timebase */
+    v4l2_pts = avbuf->buf.timestamp.tv_sec * USEC_PER_SEC + 
avbuf->buf.timestamp.tv_usec;
+
+    return av_rescale_q(v4l2_pts, v4l2_timebase, s->avctx->time_base);
+}
+
+static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
+{
+    enum v4l2_ycbcr_encoding ycbcr;
+    enum v4l2_colorspace cs;
+
+    cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.colorspace :
+        buf->context->format.fmt.pix.colorspace;
+
+    ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.ycbcr_enc:
+        buf->context->format.fmt.pix.ycbcr_enc;
+
+    switch(ycbcr) {
+    case V4L2_YCBCR_ENC_XV709:
+    case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
+    case V4L2_YCBCR_ENC_XV601:
+    case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
+    default:
+        break;
+    }
+
+    switch(cs) {
+    case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
+    case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
+    case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
+    case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
+    default:
+        break;
+    }
+
+    return AVCOL_PRI_UNSPECIFIED;
+}
+
+static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
+{
+    enum v4l2_quantization qt;
+
+    qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.quantization :
+        buf->context->format.fmt.pix.quantization;
+
+    switch (qt) {
+    case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
+    case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
+    default:
+        break;
+    }
+
+     return AVCOL_RANGE_UNSPECIFIED;
+}
+
+static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
+{
+    enum v4l2_ycbcr_encoding ycbcr;
+    enum v4l2_colorspace cs;
+
+    cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.colorspace :
+        buf->context->format.fmt.pix.colorspace;
+
+    ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.ycbcr_enc:
+        buf->context->format.fmt.pix.ycbcr_enc;
+
+    switch(cs) {
+    case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
+    case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
+    case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
+    case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
+    case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
+    case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
+    case V4L2_COLORSPACE_BT2020:
+        if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
+            return AVCOL_SPC_BT2020_CL;
+        else
+             return AVCOL_SPC_BT2020_NCL;
+    default:
+        break;
+    }
+
+    return AVCOL_SPC_UNSPECIFIED;
+}
+
+static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
+{
+    enum v4l2_ycbcr_encoding ycbcr;
+    enum v4l2_xfer_func xfer;
+    enum v4l2_colorspace cs;
+
+    cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.colorspace :
+        buf->context->format.fmt.pix.colorspace;
+
+    ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.ycbcr_enc:
+        buf->context->format.fmt.pix.ycbcr_enc;
+
+    xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
+        buf->context->format.fmt.pix_mp.xfer_func:
+        buf->context->format.fmt.pix.xfer_func;
+
+    switch (xfer) {
+    case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
+    case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
+    default:
+        break;
+    }
+
+    switch (cs) {
+    case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
+    case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
+    case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
+    case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
+    default:
+        break;
+    }
+
+    switch (ycbcr) {
+    case V4L2_YCBCR_ENC_XV709:
+    case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
+    default:
+        break;
+    }
+
+    return AVCOL_TRC_UNSPECIFIED;
+}
+
+static void v4l2_free_buffer(void *opaque, uint8_t *unused)
+{
+    V4L2Buffer* avbuf = opaque;
+    V4L2m2mContext *s = buf_to_m2mctx(avbuf);
+
+    atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
+    if (s->reinit) {
+        if (!atomic_load(&s->refcount))
+            sem_post(&s->refsync);
+        return;
+    }
+
+    if (avbuf->context->streamon) {
+        ff_v4l2_buffer_enqueue(avbuf);
+        return;
+    }
+
+    if (!atomic_load(&s->refcount))
+        ff_v4l2_m2m_codec_end(s->avctx);
+}
+
+static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
+{
+    V4L2m2mContext *s = buf_to_m2mctx(in);
+
+    if (plane >= in->num_planes)
+        return AVERROR(EINVAL);
+
+    /* even though most encoders return 0 in data_offset encoding vp8 does 
require this value */
+    *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + 
in->planes[plane].data_offset,
+                            in->plane_info[plane].length, v4l2_free_buffer, 
in, 0);
+    if (!*buf)
+        return AVERROR(ENOMEM);
+
+    in->status = V4L2BUF_RET_USER;
+    atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
+
+    return 0;
+}
+
+static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, 
int size, AVBufferRef* bref)
+{
+    if (plane >= out->num_planes)
+        return AVERROR(EINVAL);
+
+    memcpy(out->plane_info[plane].mm_addr, data, FFMIN(size, 
out->plane_info[plane].length));
+
+    out->planes[plane].bytesused = FFMIN(size, out->plane_info[plane].length);
+    out->planes[plane].length = out->plane_info[plane].length;
+
+    return 0;
+}
+
+/******************************************************************************
+ *
+ *              V4L2uffer interface
+ *
+ 
******************************************************************************/
+
+int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer* out)
+{
+    int i, ret;
+
+    for(i = 0; i < out->num_planes; i++) {
+        ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, 
frame->buf[i]->size, frame->buf[i]);
+        if (ret)
+            return ret;
+    }
+
+    v4l2_set_pts(out, frame->pts);
+
+    return 0;
+}
+
+int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
+{
+    V4L2m2mContext *s = buf_to_m2mctx(avbuf);
+    int i, ret;
+
+    av_frame_unref(frame);
+
+    /* 1. get references to the actual data */
+    for (i = 0; i < avbuf->num_planes; i++) {
+        ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
+        if (ret)
+            return ret;
+
+        frame->linesize[i] = avbuf->plane_info[i].bytesperline;
+        frame->data[i] = frame->buf[i]->data;
+    }
+
+    /* 1.1 fixup special cases */
+    switch (avbuf->context->av_pix_fmt) {
+    case AV_PIX_FMT_NV12:
+        if (avbuf->num_planes > 1)
+            break;
+        frame->linesize[1] = avbuf->plane_info[0].bytesperline;
+        frame->data[1] = frame->buf[0]->data + 
avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
+        break;
+    default:
+        break;
+    }
+
+    /* 2. get frame information */
+    frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
+    frame->format = avbuf->context->av_pix_fmt;
+    frame->color_primaries = v4l2_get_color_primaries(avbuf);
+    frame->colorspace = v4l2_get_color_space(avbuf);
+    frame->color_range = v4l2_get_color_range(avbuf);
+    frame->color_trc = v4l2_get_color_trc(avbuf);
+    frame->pts = v4l2_get_pts(avbuf);
+
+    /* these two values are updated also during re-init in 
v4l2_process_driver_event */
+    frame->height = s->output.height;
+    frame->width = s->output.width;
+
+    /* 3. report errors upstream */
+    if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
+        av_log(avbuf->context->log_ctx, AV_LOG_ERROR, "%s: driver decode 
error\n", avbuf->context->name);
+        frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
+    }
+
+    return 0;
+}
+
+int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
+{
+    int ret;
+
+    av_packet_unref(pkt);
+    ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
+    if (ret)
+        return ret;
+
+    pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? 
avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
+    pkt->data = pkt->buf->data;
+
+    if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
+        pkt->flags |= AV_PKT_FLAG_KEY;
+
+    if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
+        av_log(avbuf->context->log_ctx, AV_LOG_ERROR, "%s driver encode 
error\n", avbuf->context->name);
+        pkt->flags |= AV_PKT_FLAG_CORRUPT;
+    }
+
+    pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
+
+    return 0;
+}
+
+int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
+{
+    int ret;
+
+    ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, pkt->buf);
+    if (ret)
+        return ret;
+
+    v4l2_set_pts(out, pkt->pts);
+
+    if (pkt->flags & AV_PKT_FLAG_KEY)
+        out->flags = V4L2_BUF_FLAG_KEYFRAME;
+
+    return 0;
+}
+
+int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
+{
+    V4L2Context *ctx = avbuf->context;
+    int ret, i;
+
+    avbuf->buf.memory = V4L2_MEMORY_MMAP;
+    avbuf->buf.type = ctx->type;
+    avbuf->buf.index = index;
+
+    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+        avbuf->buf.length = VIDEO_MAX_PLANES;
+        avbuf->buf.m.planes = avbuf->planes;
+    }
+
+    ret = ioctl(ctx->fd, VIDIOC_QUERYBUF, &avbuf->buf);
+    if (ret < 0)
+        return AVERROR(errno);
+
+    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+        avbuf->num_planes = 0;
+        for (;;) {
+            /* in MP, the V4L2 API states that buf.length means num_planes */
+            if (avbuf->num_planes >= avbuf->buf.length)
+                break;
+            if (avbuf->buf.m.planes[avbuf->num_planes].length)
+                avbuf->num_planes++;
+        }
+    } else
+        avbuf->num_planes = 1;
+
+    for (i = 0; i < avbuf->num_planes; i++) {
+
+        avbuf->plane_info[i].bytesperline = 
V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
+            ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
+            ctx->format.fmt.pix.bytesperline;
+
+        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+            avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
+            avbuf->plane_info[i].mm_addr = mmap(NULL, 
avbuf->buf.m.planes[i].length,
+                                           PROT_READ | PROT_WRITE, MAP_SHARED,
+                                           ctx->fd, 
avbuf->buf.m.planes[i].m.mem_offset);
+        } else {
+            avbuf->plane_info[i].length = avbuf->buf.length;
+            avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
+                                          PROT_READ | PROT_WRITE, MAP_SHARED,
+                                          ctx->fd, avbuf->buf.m.offset);
+        }
+
+        if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
+            return AVERROR(ENOMEM);
+    }
+
+    avbuf->status = V4L2BUF_AVAILABLE;
+
+    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
+        return 0;
+
+    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+        avbuf->buf.m.planes = avbuf->planes;
+        avbuf->buf.length   = avbuf->num_planes;
+
+    } else {
+        avbuf->buf.bytesused = avbuf->planes[0].bytesused;
+        avbuf->buf.length    = avbuf->planes[0].length;
+    }
+
+    return ff_v4l2_buffer_enqueue(avbuf);
+}
+
+int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf)
+{
+    int ret;
+
+    avbuf->buf.flags = avbuf->flags;
+
+    ret = ioctl(avbuf->context->fd, VIDIOC_QBUF, &avbuf->buf);
+    if (ret < 0)
+        return AVERROR(errno);
+
+    avbuf->status = V4L2BUF_IN_DRIVER;
+
+    return 0;
+}
diff --git a/libavcodec/v4l2_buffers.h b/libavcodec/v4l2_buffers.h
new file mode 100644
index 0000000..8901a09
--- /dev/null
+++ b/libavcodec/v4l2_buffers.h
@@ -0,0 +1,121 @@
+/*
+ * V4L2 buffer helper functions.
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_BUFFERS_H
+#define AVCODEC_V4L2_BUFFERS_H
+
+enum V4L2Buffer_status {
+    V4L2BUF_AVAILABLE,
+    V4L2BUF_IN_DRIVER,
+    V4L2BUF_RET_USER,
+};
+
+/**
+ * V4L2Buffer (wrapper for v4l2_buffer management)
+ */
+typedef struct V4L2Buffer {
+    /* each buffer needs to have a reference to its context */
+    struct V4L2Context *context;
+
+    /* keep track of the mmap address and mmap length */
+    struct V4L2Plane_info {
+        int bytesperline;
+        void * mm_addr;
+        size_t length;
+    } plane_info[VIDEO_MAX_PLANES];
+
+    int num_planes;
+
+    /* the v4l2_buffer buf.m.planes pointer uses the planes[] mem */
+    struct v4l2_buffer buf;
+    struct v4l2_plane planes[VIDEO_MAX_PLANES];
+
+    int flags;
+    enum V4L2Buffer_status status;
+
+} V4L2Buffer;
+
+/**
+ * Extracts the data from a V4L2Buffer to an AVFrame
+ *
+ * @param[in] frame The AVFRame to push the information to
+ * @param[in] buf The V4L2Buffer to get the information from
+ *
+ * @returns 0 in case of success, EINVAL if the number of planes is incorrect,
+ * ENOMEM if the AVBufferRef cant be created.
+ */
+int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *buf);
+
+/**
+ * Extracts the data from a V4L2Buffer to an AVPacket
+ *
+ * @param[in] pkt The AVPacket to push the information to
+ * @param[in] buf The V4L2Buffer to get the information from
+ *
+ * @returns 0 in case of success, EINVAL if the number of planes is incorrect,
+ * ENOMEM if the AVBufferRef cant be created.
+ *
+ */
+int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *buf);
+
+/**
+ * Extracts the data from an AVPacket to a V4L2Buffer
+ *
+ * @param[in]  frame AVPacket to get the data from
+ * @param[in]  avbuf V4L2Bfuffer to push the information to
+ *
+ * @returns 0 in case of success, negative otherwise
+ */
+int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out);
+
+/**
+ * Extracts the data from an AVFrame to a V4L2Buffer
+ *
+ * @param[in]  frame AVFrame to get the data from
+ * @param[in]  avbuf V4L2Bfuffer to push the information to
+ *
+ * @returns 0 in case of success, negative otherwise
+ */
+int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer* out);
+
+/**
+ * Initializes a V4L2Buffer
+ *
+ * @param[in]  avbuf V4L2Bfuffer to initialize
+ * @param[in]  index v4l2 buffer id
+ *
+ * @returns 0 in case of success, negative otherwise
+ */
+int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index);
+
+/**
+ * Enqueues a V4L2Buffer
+ *
+ * @param[in] avbuf V4L2Bfuffer to push to the driver
+ *
+ * @returns 0 in case of success, negative otherwise
+ */
+int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf);
+
+
+#endif // AVCODEC_V4L2_BUFFERS_H
diff --git a/libavcodec/v4l2_context.c b/libavcodec/v4l2_context.c
new file mode 100644
index 0000000..d6282f3
--- /dev/null
+++ b/libavcodec/v4l2_context.c
@@ -0,0 +1,604 @@
+/*
+ * V4L2 context helper functions.
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+#include "libavcodec/avcodec.h"
+#include "libavcodec/internal.h"
+#include "v4l2_buffers.h"
+#include "v4l2_fmt.h"
+#include "v4l2_m2m.h"
+
+static inline V4L2m2mContext *ctx_to_m2mctx(V4L2Context *ctx)
+{
+    return V4L2_TYPE_IS_OUTPUT(ctx->type) ?
+        container_of(ctx, V4L2m2mContext, output) :
+        container_of(ctx, V4L2m2mContext, capture);
+}
+
+static inline unsigned int v4l2_get_width(V4L2Context *ctx, struct v4l2_format 
*fmt)
+{
+    return V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? fmt->fmt.pix_mp.width : 
fmt->fmt.pix.width;
+}
+
+static inline unsigned int v4l2_get_height(V4L2Context *ctx, struct 
v4l2_format *fmt)
+{
+    return V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? fmt->fmt.pix_mp.height : 
fmt->fmt.pix.height;
+}
+
+static inline int v4l2_type_supported(V4L2Context *ctx)
+{
+    return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+        ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
+        ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+        ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
+}
+
+static inline void v4l2_save_to_context(V4L2Context* ctx, uint32_t v4l2_fmt)
+{
+    ctx->format.type = ctx->type;
+
+    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+        /* this is to handle the reconfiguration of the capture stream at 
runtime */
+        ctx->format.fmt.pix_mp.height = ctx->height;
+        ctx->format.fmt.pix_mp.width = ctx->width;
+        if (v4l2_fmt)
+            ctx->format.fmt.pix_mp.pixelformat = v4l2_fmt;
+    } else {
+        ctx->format.fmt.pix.height = ctx->height;
+        ctx->format.fmt.pix.width = ctx->width;
+        if (v4l2_fmt)
+            ctx->format.fmt.pix.pixelformat = v4l2_fmt;
+    }
+}
+
+static int v4l2_handle_event(V4L2Context *ctx)
+{
+    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
+    struct v4l2_format cap_fmt = s->capture.format;
+    struct v4l2_format out_fmt = s->output.format;
+    struct v4l2_event evt;
+    int ret;
+
+    ret = ioctl(ctx->fd, VIDIOC_DQEVENT, &evt);
+    if (ret < 0) {
+        av_log(ctx->log_ctx, AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name);
+        return 0;
+    }
+
+    ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt);
+    if (ret) {
+        av_log(s->capture.log_ctx, AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", 
s->capture.name);
+        return 0;
+    }
+
+    ret = ioctl(s->fd, VIDIOC_G_FMT, &out_fmt);
+    if (ret) {
+        av_log(s->output.log_ctx, AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", 
s->output.name);
+        return 0;
+    }
+
+    if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
+        return 0;
+
+    if (v4l2_get_height(&s->output, &s->output.format) != 
v4l2_get_height(&s->output, &out_fmt) ||
+        v4l2_get_width(&s->output, &s->output.format) != 
v4l2_get_width(&s->output, &out_fmt)) {
+
+        av_log(s->output.log_ctx, AV_LOG_DEBUG, "%s changed (%dx%d) -> 
(%dx%d)\n",
+            s->output.name,
+            v4l2_get_width(&s->output, &s->output.format), 
v4l2_get_height(&s->output, &s->output.format),
+            v4l2_get_width(&s->output, &out_fmt), v4l2_get_height(&s->output, 
&out_fmt));
+
+        /* full reinit required to queue new output buffers since the current 
ones
+           are too small */
+        s->full_reinit = 1;
+
+        s->output.height = v4l2_get_height(ctx, &out_fmt);
+        s->output.width = v4l2_get_width(ctx, &out_fmt);
+    }
+
+    if (v4l2_get_height(&s->capture, &s->capture.format) != 
v4l2_get_height(&s->capture, &cap_fmt) ||
+        v4l2_get_width(&s->capture, &s->capture.format) != 
v4l2_get_width(&s->capture, &cap_fmt)) {
+
+        av_log(s->capture.log_ctx, AV_LOG_DEBUG, "%s changed (%dx%d) -> 
(%dx%d)\n",
+            s->capture.name,
+            v4l2_get_width(&s->capture, &s->capture.format), 
v4l2_get_height(&s->capture, &s->capture.format),
+            v4l2_get_width(&s->capture, &cap_fmt), 
v4l2_get_height(&s->capture, &cap_fmt));
+
+        s->capture.height = v4l2_get_height(ctx, &cap_fmt);
+        s->capture.width = v4l2_get_width(ctx, &cap_fmt);
+
+        /* streamoff capture and unmap and remap new capture buffers */
+        ret = ff_v4l2_m2m_codec_reinit(s);
+        if (ret)
+            av_log(ctx->log_ctx, AV_LOG_ERROR, "avpriv_v4l2m2m_reinit\n");
+
+        /* let the caller function know that reinit was executed */
+        return 1;
+    }
+
+    return 0;
+}
+
+static int v4l2_stop_decode(V4L2Context *ctx)
+{
+    struct v4l2_decoder_cmd cmd = {
+        .cmd = V4L2_DEC_CMD_STOP,
+    };
+    int ret;
+
+    ret = ioctl(ctx->fd, VIDIOC_DECODER_CMD, &cmd);
+    if (ret) {
+        /* DECODER_CMD is optional */
+        if (errno == ENOTTY)
+            return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
+    }
+
+    return 0;
+}
+
+static int v4l2_stop_encode(V4L2Context *ctx)
+{
+    struct v4l2_encoder_cmd cmd = {
+        .cmd = V4L2_ENC_CMD_STOP,
+    };
+    int ret;
+
+    ret = ioctl(ctx->fd, VIDIOC_ENCODER_CMD, &cmd);
+    if (ret) {
+        /* ENCODER_CMD is optional */
+        if (errno == ENOTTY)
+            return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
+    }
+
+    return 0;
+}
+
+static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
+{
+    struct v4l2_plane planes[VIDEO_MAX_PLANES];
+    struct v4l2_buffer buf = { 0 };
+    V4L2Buffer* avbuf = NULL;
+    struct pollfd pfd = {
+        .events =  POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* 
default blocking capture */
+        .fd = ctx->fd,
+    };
+    int ret;
+
+    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
+        pfd.events =  POLLOUT | POLLWRNORM;
+
+    for (;;) {
+        ret = poll(&pfd, 1, timeout);
+        if (ret > 0)
+            break;
+        if (errno == EINTR)
+            continue;
+        return NULL;
+    }
+
+    /* 0. handle errors */
+    if (pfd.revents & POLLERR) {
+        av_log(ctx->log_ctx, AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
+        return NULL;
+    }
+
+    /* 1. handle resolution changes */
+    if (pfd.revents & POLLPRI) {
+        ret = v4l2_handle_event(ctx);
+        if (ret) {
+            /* drop the buffer (if there was one) since we had to reconfigure 
capture (unmap all buffers) */
+            return NULL;
+        }
+    }
+
+    /* 2. dequeue the buffer */
+    if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
+
+        if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
+            /* there is a capture buffer ready */
+            if (pfd.revents & (POLLIN | POLLRDNORM))
+                goto dequeue;
+
+            /* the driver is ready to accept more input; instead of waiting 
for the capture
+             * buffer to complete we return NULL so input can proceed (we are 
single threaded)
+             */
+            if (pfd.revents & (POLLOUT | POLLWRNORM))
+                return NULL;
+        }
+
+dequeue:
+        memset(&buf, 0, sizeof(buf));
+        buf.memory = V4L2_MEMORY_MMAP;
+        buf.type = ctx->type;
+        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+            memset(planes, 0, sizeof(planes));
+            buf.length = VIDEO_MAX_PLANES;
+            buf.m.planes = planes;
+        }
+
+        ret = ioctl(ctx->fd, VIDIOC_DQBUF, &buf);
+        if (ret) {
+            if (errno != EAGAIN) {
+                ctx->done = errno;
+                if (errno != EPIPE)
+                    av_log(ctx->log_ctx, AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno 
(%s)\n",
+                        ctx->name, av_err2str(AVERROR(errno)));
+            }
+        } else {
+            avbuf = &ctx->buffers[buf.index];
+            avbuf->status = V4L2BUF_AVAILABLE;
+            avbuf->buf = buf;
+            if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+                memcpy(avbuf->planes, planes, sizeof(planes));
+                avbuf->buf.m.planes = avbuf->planes;
+            }
+        }
+    }
+
+    return avbuf;
+}
+
+static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
+{
+    int timeout = 0; /* return when no more buffers to dequeue */
+    int i;
+
+    /* get back as many output buffers as possible */
+    if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
+          do {
+          } while (v4l2_dequeue_v4l2buf(ctx, timeout));
+    }
+
+    for (i = 0; i < ctx->num_buffers; i++) {
+        if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
+            return &ctx->buffers[i];
+    }
+
+    return NULL;
+}
+
+static int v4l2_release_buffers(V4L2Context* ctx)
+{
+    struct v4l2_requestbuffers req = {
+        .memory = V4L2_MEMORY_MMAP,
+        .type = ctx->type,
+        .count = 0, /* 0 -> unmaps buffers from the driver */
+    };
+    int i, j;
+
+    for (i = 0; i < ctx->num_buffers; i++) {
+        V4L2Buffer *buffer = &ctx->buffers[i];
+
+        for (j = 0; j < buffer->num_planes; j++) {
+            struct V4L2Plane_info *p = &buffer->plane_info[j];
+            if (p->mm_addr && p->length)
+                if (munmap(p->mm_addr, p->length) < 0)
+                    av_log(ctx->log_ctx, AV_LOG_ERROR, "%s unmap plane 
(%s))\n", ctx->name, av_err2str(AVERROR(errno)));
+        }
+    }
+
+    return ioctl(ctx->fd, VIDIOC_REQBUFS, &req);
+}
+
+static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat 
pixfmt)
+{
+    struct v4l2_format *fmt = &ctx->format;
+    uint32_t v4l2_fmt;
+    int ret;
+
+    v4l2_fmt = ff_v4l2_format_avfmt_to_v4l2(pixfmt);
+    if (!v4l2_fmt)
+        return AVERROR(EINVAL);
+
+    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type))
+        fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
+    else
+        fmt->fmt.pix.pixelformat = v4l2_fmt;
+
+    fmt->type = ctx->type;
+
+    ret = ioctl(ctx->fd, VIDIOC_TRY_FMT, fmt);
+    if (ret)
+        return AVERROR(EINVAL);
+
+    return 0;
+}
+
+static int v4l2_query_raw_format(V4L2Context* ctx, int set)
+{
+    enum AVPixelFormat pixfmt = ctx->av_pix_fmt;
+    struct v4l2_fmtdesc fdesc;
+    int ret;
+
+    memset(&fdesc, 0, sizeof(fdesc));
+    fdesc.type = ctx->type;
+
+    if (pixfmt != AV_PIX_FMT_NONE) {
+        ret = v4l2_try_raw_format(ctx, pixfmt);
+        if (ret)
+            pixfmt = AV_PIX_FMT_NONE;
+        else
+            return 0;
+    }
+
+    for (;;) {
+        ret = ioctl(ctx->fd, VIDIOC_ENUM_FMT, &fdesc);
+        if (ret)
+            return AVERROR(EINVAL);
+
+        pixfmt = ff_v4l2_format_v4l2_to_avfmt(fdesc.pixelformat, 
AV_CODEC_ID_RAWVIDEO);
+        ret = v4l2_try_raw_format(ctx, pixfmt);
+        if (ret){
+            fdesc.index++;
+            continue;
+        }
+
+        if (set)
+            ctx->av_pix_fmt = pixfmt;
+
+        return 0;
+    }
+
+    return AVERROR(EINVAL);
+}
+
+static int v4l2_query_coded_format(V4L2Context* ctx, uint32_t *p)
+{
+    struct v4l2_fmtdesc fdesc;
+    uint32_t v4l2_fmt;
+    int ret;
+
+    /* translate to a valid v4l2 format */
+    v4l2_fmt = ff_v4l2_format_avcodec_to_v4l2(ctx->av_codec_id);
+    if (!v4l2_fmt)
+        return AVERROR(EINVAL);
+
+    /* check if the driver supports this format */
+    memset(&fdesc, 0, sizeof(fdesc));
+    fdesc.type = ctx->type;
+
+    for (;;) {
+        ret = ioctl(ctx->fd, VIDIOC_ENUM_FMT, &fdesc);
+        if (ret)
+            return AVERROR(EINVAL);
+
+        if (fdesc.pixelformat == v4l2_fmt)
+            break;
+
+        fdesc.index++;
+    }
+
+    *p = v4l2_fmt;
+
+    return 0;
+}
+
+ /*****************************************************************************
+  *
+  *             V4L2 Context Interface
+  *
+  
*****************************************************************************/
+
+int ff_v4l2_context_set_status(V4L2Context* ctx, int cmd)
+{
+    int type = ctx->type;
+    int ret;
+
+    ret = ioctl(ctx->fd, cmd, &type);
+    if (ret < 0)
+        return AVERROR(errno);
+
+    ctx->streamon = (cmd == VIDIOC_STREAMON);
+
+    return 0;
+}
+
+int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* frame)
+{
+    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
+    V4L2Buffer* avbuf;
+    int ret;
+
+    if (!frame) {
+        ret = v4l2_stop_encode(ctx);
+        if (ret)
+            av_log(ctx->log_ctx, AV_LOG_ERROR, "%s stop_encode\n", ctx->name);
+        s->draining= 1;
+        return 0;
+    }
+
+    avbuf = v4l2_getfree_v4l2buf(ctx);
+    if (!avbuf)
+        return AVERROR(ENOMEM);
+
+    ret = ff_v4l2_buffer_avframe_to_buf(frame, avbuf);
+    if (ret)
+        return ret;
+
+    return ff_v4l2_buffer_enqueue(avbuf);
+}
+
+int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt)
+{
+    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
+    V4L2Buffer* avbuf;
+    int ret;
+
+    if (!pkt->size) {
+        ret = v4l2_stop_decode(ctx);
+        if (ret)
+            av_log(ctx->log_ctx, AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
+        s->draining = 1;
+        return 0;
+    }
+
+    avbuf = v4l2_getfree_v4l2buf(ctx);
+    if (!avbuf)
+        return AVERROR(ENOMEM);
+
+    ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
+    if (ret)
+        return ret;
+
+    return ff_v4l2_buffer_enqueue(avbuf);
+}
+
+int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame)
+{
+    V4L2Buffer* avbuf = NULL;
+
+    /* blocks until:
+     *  1. decoded frame available
+     *  2. an input buffer is ready to be dequeued
+     */
+    avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
+    if (!avbuf) {
+        if (ctx->done)
+            return AVERROR_EOF;
+
+        return AVERROR(EAGAIN);
+    }
+
+    return ff_v4l2_buffer_buf_to_avframe(frame, avbuf);
+}
+
+int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt)
+{
+    V4L2Buffer* avbuf = NULL;
+
+    /* blocks until:
+     *  1. encoded packet available
+     *  2. an input buffer ready to be dequeued
+     */
+    avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
+    if (!avbuf) {
+        if (ctx->done)
+            return AVERROR_EOF;
+
+        return AVERROR(EAGAIN);
+    }
+
+    return ff_v4l2_buffer_buf_to_avpkt(pkt, avbuf);
+}
+
+int ff_v4l2_context_format(V4L2Context* ctx, int set)
+{
+    uint32_t v4l2_fmt;
+    int ret;
+
+    if  (ctx->av_codec_id == AV_CODEC_ID_RAWVIDEO) {
+        ret = v4l2_query_raw_format(ctx, set);
+        if (ret)
+            return ret;
+
+        v4l2_save_to_context(ctx, 0);
+        if (set)
+            return ioctl(ctx->fd, VIDIOC_S_FMT, &ctx->format);
+
+        return ret;
+    }
+
+    ret = v4l2_query_coded_format(ctx, &v4l2_fmt);
+    if (ret)
+        return ret;
+
+    v4l2_save_to_context(ctx, v4l2_fmt);
+    if (set)
+        return ioctl(ctx->fd, VIDIOC_S_FMT, &ctx->format);
+
+    return ioctl(ctx->fd, VIDIOC_TRY_FMT, &ctx->format);
+}
+
+void ff_v4l2_context_release(V4L2Context* ctx)
+{
+    int ret;
+
+    if (!ctx->buffers)
+        return;
+
+    ret = v4l2_release_buffers(ctx);
+    if (ret)
+        av_log(ctx->log_ctx, AV_LOG_WARNING, "V4L2 failed to unmap the %s 
buffers\n", ctx->name);
+
+    av_free(ctx->buffers);
+    ctx->buffers = NULL;
+}
+
+int ff_v4l2_context_init(V4L2Context* ctx, int lazy_init)
+{
+    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
+    struct v4l2_requestbuffers req;
+    int ret, i;
+
+    if (!v4l2_type_supported(ctx)) {
+        av_log(ctx->log_ctx, AV_LOG_ERROR, "type %i not supported\n", 
ctx->type);
+        return AVERROR_PATCHWELCOME;
+    }
+
+    if (lazy_init)
+        return 0;
+
+    ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
+    if (ret)
+        av_log(ctx->log_ctx, AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", 
ctx->name);
+
+    memset(&req, 0, sizeof(req));
+    req.count = ctx->num_buffers;
+    req.memory = V4L2_MEMORY_MMAP;
+    req.type = ctx->type;
+    ret = ioctl(ctx->fd, VIDIOC_REQBUFS, &req);
+    if (ret < 0)
+        return AVERROR(errno);
+
+    ctx->num_buffers = req.count;
+    ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
+    if (!ctx->buffers) {
+            av_log(ctx->log_ctx, AV_LOG_ERROR, "%s malloc enomem\n", 
ctx->name);
+            return AVERROR(ENOMEM);
+    }
+
+    for (i = 0; i < req.count; i++) {
+        ctx->buffers[i].context = ctx;
+        ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
+        if (ret < 0) {
+            av_log(ctx->log_ctx, AV_LOG_ERROR, "%s buffer initialization 
(%s)\n", ctx->name, av_err2str(ret));
+            av_free(ctx->buffers);
+            return ret;
+        }
+    }
+
+    av_log(ctx->log_ctx, AV_LOG_DEBUG, "%s: %s %02d buffers initialized: 
%04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
+        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? 
av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : 
av_fourcc2str(ctx->format.fmt.pix.pixelformat),
+        req.count,
+        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.width : 
ctx->format.fmt.pix.width,
+        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.height: 
ctx->format.fmt.pix.height,
+        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? 
ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
+        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? 
ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : 
ctx->format.fmt.pix.bytesperline);
+
+    return 0;
+}
diff --git a/libavcodec/v4l2_context.h b/libavcodec/v4l2_context.h
new file mode 100644
index 0000000..e3b5489
--- /dev/null
+++ b/libavcodec/v4l2_context.h
@@ -0,0 +1,206 @@
+/*
+ * V4L2 context helper functions.
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_CONTEXT_H
+#define AVCODEC_V4L2_CONTEXT_H
+
+#include <stdatomic.h>
+#include "libavcodec/avcodec.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/frame.h"
+#include "libavutil/buffer.h"
+#include "v4l2_buffers.h"
+
+typedef struct V4L2Context {
+
+    /**
+     * Log context
+     */
+    void *log_ctx;
+
+    /**
+     * Lazy Initialization: set to one if the context can not initialize its
+     * buffers until it has queried the driver for formats and sizes.
+     */
+    int lazy_init;
+
+    /**
+     * context name: must be set before calling avpriv_v4l2_context_init().
+     */
+    const char* name;
+
+    /**
+     * File descriptor obtained from opening the associated device.
+     * Must be set before calling avpriv_v4l2_context_init().
+     * Readonly after init.
+     */
+    int fd;
+
+    /**
+     * Type of this buffer context.
+     * See V4L2_BUF_TYPE_VIDEO_* in videodev2.h
+     * Must be set before calling avpriv_v4l2_context_init().
+     * Readonly after init.
+     */
+    enum v4l2_buf_type type;
+
+    /**
+     * AVPixelFormat corresponding to this buffer context.
+     * AV_PIX_FMT_NONE means this is an encoded stream.
+     */
+    enum AVPixelFormat av_pix_fmt;
+
+    /**
+     * AVCodecID corresponding to this buffer context.
+     * AV_CODEC_ID_RAWVIDEO means this is a raw stream and av_pix_fmt must be 
set to a valid value.
+     */
+    enum AVCodecID av_codec_id;
+
+    /**
+     * Format returned by the driver after initializing the buffer context.
+     * Must be set before calling avpriv_v4l2_context_init().
+     * avpriv_v4l2m2m_format() can set it.
+     * Readonly after init.
+     */
+    struct v4l2_format format;
+
+    /**
+     * Width and height of the frames it produces (in case of a capture 
context, e.g. when decoding)
+     * or accepts (in case of an output context, e.g. when encoding).
+     *
+     * For output context, this must must be set before calling 
avpriv_v4l2_context_init().
+     * For capture context during decoding, it will be set after having 
received the
+     * information from the driver. at which point we can initialize the 
buffers.
+     */
+    int width, height;
+
+    /**
+     * Whether the stream has been started (VIDIOC_STREAMON has been sent).
+     */
+    int streamon;
+
+    /**
+     *
+     * Before calling avpriv_v4l2_context_init() this is the number of buffers 
we would like to have available.
+     * avpriv_v4l2_context_init() asks for (min_buffers + num_buffers) and 
sets this value to the actual number
+     * of buffers the driver gave us.
+     *
+     * Readonly after init.
+     */
+    int num_buffers;
+
+    /**
+     * Indexed array of V4L2Buffers
+     */
+    V4L2Buffer *buffers;
+
+    /**
+     *  Either no more buffers available or an unrecoverable error was notified
+     *  by the V4L2 kernel driver: once set the context has to be exited.
+     */
+    int done;
+
+} V4L2Context;
+
+/**
+ * Initializes a V4L2Context.
+ *
+ * @param[in] ctx A pointer to a V4L2Context. See V4L2Context description for 
required variables.
+ * @return 0 in case of success, a negative value representing the error 
otherwise.
+ */
+int ff_v4l2_context_init(V4L2Context* ctx, int lazy_init);
+
+/**
+ * Formats a V4L2Context.
+ *
+ * @param[in] ctx A pointer to a V4L2Context. See V4L2Context description for 
required variables.
+ * @param[in] set programs the format in the driver
+ *
+ * @return 0 in case of success, a negative value representing the error 
otherwise.
+ */
+int ff_v4l2_context_format(V4L2Context* ctx, int set);
+
+/**
+ * Releases a V4L2Context.
+ *
+ * @param[in] ctx A pointer to a V4L2Context.
+ *               The caller is reponsible for freeing it.
+ *               It must not be used after calling this function.
+ */
+void ff_v4l2_context_release(V4L2Context* ctx);
+
+/**
+ * Sets the status of a V4L2Context.
+ *
+ * @param[in] ctx A pointer to a V4L2Context.
+ * @param[in] cmd The status to set (VIDIOC_STREAMON or VIDIOC_STREAMOFF).
+ *                Warning: If VIDIOC_STREAMOFF is sent to a buffer context 
that still has some frames buffered,
+ *                those frames will be dropped.
+ * @return 0 in case of success, a negative value representing the error 
otherwise.
+ */
+int ff_v4l2_context_set_status(V4L2Context* ctx, int cmd);
+
+/**
+ * Dequeues a buffer from a V4L2Context to an AVPacket.
+ *
+ * The pkt must be non NULL.
+ * @param[in] ctx The V4L2Context to dequeue from.
+ * @param[inout] pkt The AVPacket to dequeue to.
+ * @return 0 in case of success, AVERROR(EAGAIN) if no buffer was ready, 
another negative error in case of error.
+ */
+int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt);
+
+/**
+ * Dequeues a buffer from a V4L2Context to an AVFrame.
+ *
+ * The frame must be non NULL.
+ * @param[in] ctx The V4L2Context to dequeue from.
+ * @param[inout] f The AVFrame to dequeue to.
+ * @return 0 in case of success, AVERROR(EAGAIN) if no buffer was ready, 
another negative error in case of error.
+ */
+int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* f);
+
+/**
+ * Enqueues a buffer to a V4L2Context from an AVPacket
+ *
+ * The packet must be non NULL.
+ * When the size of the pkt is null, the buffer is not queued but a 
V4L2_DEC_CMD_STOP command is sent instead to the driver.
+ *
+ * @param[in] ctx The V4L2Context to enqueue to.
+ * @param[in] pkt A pointer to an AVPacket.
+ * @return 0 in case of success, a negative error otherwise.
+ */
+int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt);
+
+/**
+ * Enqueues a buffer to a V4L2Context from an AVFrame
+ *
+ * The frame must be non NULL.
+ *
+ * @param[in] ctx The V4L2Context to enqueue to.
+ * @param[in] f A pointer to an AVFrame to enqueue.
+ * @return 0 in case of success, a negative error otherwise.
+ */
+int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* f);
+
+#endif // AVCODEC_V4L2_CONTEXT_H
diff --git a/libavcodec/v4l2_fmt.c b/libavcodec/v4l2_fmt.c
new file mode 100644
index 0000000..a7ce308
--- /dev/null
+++ b/libavcodec/v4l2_fmt.c
@@ -0,0 +1,182 @@
+/*
+ * V4L2 format helper functions
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <search.h>
+#include "v4l2_fmt.h"
+
+#define V4L2_FMT(x) V4L2_PIX_FMT_##x
+#define AV_CODEC(x) AV_CODEC_ID_##x
+#define AV_FMT(x)   AV_PIX_FMT_##x
+
+static const struct fmt_conversion {
+    enum AVPixelFormat avfmt;
+    enum AVCodecID avcodec;
+    uint32_t v4l2_fmt;
+} fmt_map[] = {
+    { AV_FMT(RGB555LE),    AV_CODEC(RAWVIDEO),    V4L2_FMT(RGB555) },
+    { AV_FMT(RGB555BE),    AV_CODEC(RAWVIDEO),    V4L2_FMT(RGB555X) },
+    { AV_FMT(RGB565LE),    AV_CODEC(RAWVIDEO),    V4L2_FMT(RGB565) },
+    { AV_FMT(RGB565BE),    AV_CODEC(RAWVIDEO),    V4L2_FMT(RGB565X) },
+    { AV_FMT(BGR24),       AV_CODEC(RAWVIDEO),    V4L2_FMT(BGR24) },
+    { AV_FMT(RGB24),       AV_CODEC(RAWVIDEO),    V4L2_FMT(RGB24) },
+    { AV_FMT(BGR0),        AV_CODEC(RAWVIDEO),    V4L2_FMT(BGR32) },
+    { AV_FMT(0RGB),        AV_CODEC(RAWVIDEO),    V4L2_FMT(RGB32) },
+    { AV_FMT(GRAY8),       AV_CODEC(RAWVIDEO),    V4L2_FMT(GREY) },
+    { AV_FMT(YUV420P),     AV_CODEC(RAWVIDEO),    V4L2_FMT(YUV420) },
+    { AV_FMT(YUYV422),     AV_CODEC(RAWVIDEO),    V4L2_FMT(YUYV) },
+    { AV_FMT(UYVY422),     AV_CODEC(RAWVIDEO),    V4L2_FMT(UYVY) },
+    { AV_FMT(YUV422P),     AV_CODEC(RAWVIDEO),    V4L2_FMT(YUV422P) },
+    { AV_FMT(YUV411P),     AV_CODEC(RAWVIDEO),    V4L2_FMT(YUV411P) },
+    { AV_FMT(YUV410P),     AV_CODEC(RAWVIDEO),    V4L2_FMT(YUV410) },
+    { AV_FMT(YUV410P),     AV_CODEC(RAWVIDEO),    V4L2_FMT(YVU410) },
+    { AV_FMT(NV12),        AV_CODEC(RAWVIDEO),    V4L2_FMT(NV12) },
+    { AV_FMT(NONE),        AV_CODEC(MJPEG),       V4L2_FMT(MJPEG) },
+    { AV_FMT(NONE),        AV_CODEC(MJPEG),       V4L2_FMT(JPEG) },
+#ifdef V4L2_PIX_FMT_SRGGB8
+    { AV_FMT(BAYER_BGGR8), AV_CODEC(RAWVIDEO),    V4L2_FMT(SBGGR8) },
+    { AV_FMT(BAYER_GBRG8), AV_CODEC(RAWVIDEO),    V4L2_FMT(SGBRG8) },
+    { AV_FMT(BAYER_GRBG8), AV_CODEC(RAWVIDEO),    V4L2_FMT(SGRBG8) },
+    { AV_FMT(BAYER_RGGB8), AV_CODEC(RAWVIDEO),    V4L2_FMT(SRGGB8) },
+#endif
+#ifdef V4L2_PIX_FMT_Y16
+    { AV_FMT(GRAY16LE),    AV_CODEC(RAWVIDEO),    V4L2_FMT(Y16) },
+#endif
+#ifdef V4L2_PIX_FMT_NV12M
+    { AV_FMT(NV12),        AV_CODEC(RAWVIDEO),    V4L2_FMT(NV12M) },
+#endif
+#ifdef V4L2_PIX_FMT_NV21M
+    { AV_FMT(NV21),        AV_CODEC(RAWVIDEO),    V4L2_FMT(NV21M) },
+#endif
+#ifdef V4L2_PIX_FMT_YUV420M
+    { AV_FMT(YUV420P),     AV_CODEC(RAWVIDEO),    V4L2_FMT(YUV420M) },
+#endif
+#ifdef V4L2_PIX_FMT_NV16M
+    { AV_FMT(NV16),        AV_CODEC(RAWVIDEO),    V4L2_FMT(NV16M) },
+#endif
+#ifdef V4L2_PIX_FMT_H263
+    { AV_FMT(NONE),        AV_CODEC(H263),        V4L2_FMT(H263) },
+#endif
+#ifdef V4L2_PIX_FMT_H264
+    { AV_FMT(NONE),        AV_CODEC(H264),        V4L2_FMT(H264) },
+#endif
+#ifdef V4L2_PIX_FMT_MPEG4
+    { AV_FMT(NONE),        AV_CODEC(MPEG4),       V4L2_FMT(MPEG4) },
+#endif
+#ifdef V4L2_PIX_FMT_CPIA1
+    { AV_FMT(NONE),        AV_CODEC(CPIA),        V4L2_FMT(CPIA1) },
+#endif
+#ifdef V4L2_PIX_FMT_DV
+    { AV_FMT(NONE),        AV_CODEC(DVVIDEO),     V4L2_FMT(DV) },
+#endif
+#ifdef V4L2_PIX_FMT_MPEG1
+    { AV_FMT(NONE),        AV_CODEC(MPEG1VIDEO),  V4L2_FMT(MPEG1) },
+#endif
+#ifdef V4L2_PIX_FMT_MPEG2
+    { AV_FMT(NONE),        AV_CODEC(MPEG2VIDEO),  V4L2_FMT(MPEG2) },
+#endif
+#ifdef V4L2_PIX_FMT_VP8
+    { AV_FMT(NONE),        AV_CODEC(VP8),         V4L2_FMT(VP8) },
+#endif
+#ifdef V4L2_PIX_FMT_VP9
+    { AV_FMT(NONE),        AV_CODEC(VP9),         V4L2_FMT(VP9) },
+#endif
+#ifdef V4L2_PIX_FMT_HEVC
+    { AV_FMT(NONE),        AV_CODEC(HEVC),        V4L2_FMT(HEVC) },
+#endif
+#ifdef V4L2_PIX_FMT_VC1_ANNEX_G
+    { AV_FMT(NONE),        AV_CODEC(VC1),         V4L2_FMT(VC1_ANNEX_G) },
+#endif
+};
+
+static int match_codec(const void *a, const void *b)
+{
+    if (*(enum AVCodecID *)a == ((struct fmt_conversion *)b)->avcodec)
+        return 0;
+
+    return 1;
+}
+
+uint32_t ff_v4l2_format_avcodec_to_v4l2(enum AVCodecID avcodec)
+{
+    size_t len = FF_ARRAY_ELEMS(fmt_map);
+    struct fmt_conversion *item;
+
+    item = lfind(&avcodec, fmt_map, &len, sizeof(fmt_map[0]), match_codec);
+    if (item)
+        return item->v4l2_fmt;
+
+    return 0;
+}
+
+static int match_fmt(const void *a, const void *b)
+{
+    if ( *(enum AVPixelFormat *)a == ((struct fmt_conversion *)b)->avfmt)
+        return 0;
+
+    return 1;
+}
+
+uint32_t ff_v4l2_format_avfmt_to_v4l2(enum AVPixelFormat avfmt)
+{
+    size_t len = FF_ARRAY_ELEMS(fmt_map);
+    struct fmt_conversion *item;
+
+    item = lfind(&avfmt, fmt_map, &len, sizeof(fmt_map[0]), match_fmt);
+    if (item)
+        return item->v4l2_fmt;
+
+    return 0;
+}
+
+struct v4l2fmt_avcodec_pair {
+    enum AVCodecID avcodec;
+    uint32_t v4l2_fmt;
+};
+
+static int match_codecfmt(const void *a, const void *b)
+{
+    struct v4l2fmt_avcodec_pair *key = (struct v4l2fmt_avcodec_pair *) a;
+    struct fmt_conversion *item = (struct fmt_conversion *) b;
+
+    if (key->avcodec == item->avcodec && key->v4l2_fmt == item->v4l2_fmt)
+        return 0;
+
+    return 1;
+}
+
+enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum 
AVCodecID avcodec)
+{
+    struct v4l2fmt_avcodec_pair const key = {
+        .v4l2_fmt = v4l2_fmt,
+        .avcodec = avcodec,
+    };
+    size_t len = FF_ARRAY_ELEMS(fmt_map);
+    struct fmt_conversion *item;
+
+    item = lfind(&key, fmt_map, &len, sizeof(fmt_map[0]), match_codecfmt);
+    if (item)
+        return item->avfmt;
+
+    return AV_PIX_FMT_NONE;
+}
diff --git a/libavcodec/v4l2_fmt.h b/libavcodec/v4l2_fmt.h
new file mode 100644
index 0000000..0136002
--- /dev/null
+++ b/libavcodec/v4l2_fmt.h
@@ -0,0 +1,34 @@
+/*
+ * V4L2 format helper functions
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_FMT_H
+#define AVCODEC_V4L2_FMT_H
+
+#include "libavcodec/avcodec.h"
+#include "libavutil/pixfmt.h"
+
+enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum 
AVCodecID avcodec);
+uint32_t ff_v4l2_format_avcodec_to_v4l2(enum AVCodecID avcodec);
+uint32_t ff_v4l2_format_avfmt_to_v4l2(enum AVPixelFormat avfmt);
+
+#endif /* AVCODEC_V4L2_FMT_H*/
diff --git a/libavcodec/v4l2_m2m.c b/libavcodec/v4l2_m2m.c
new file mode 100644
index 0000000..87827d8
--- /dev/null
+++ b/libavcodec/v4l2_m2m.c
@@ -0,0 +1,331 @@
+/*
+ * V4L mem2mem
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include "libavcodec/avcodec.h"
+#include "libavcodec/internal.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixfmt.h"
+#include "v4l2_context.h"
+#include "v4l2_fmt.h"
+#include "v4l2_m2m.h"
+
+static inline int v4l2_splane_video(struct v4l2_capability *cap)
+{
+    if (cap->capabilities & (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT) &&
+        cap->capabilities & V4L2_CAP_STREAMING)
+        return 1;
+
+    if (cap->capabilities & V4L2_CAP_VIDEO_M2M)
+        return 1;
+
+    return 0;
+}
+
+static inline int v4l2_mplane_video(struct v4l2_capability *cap)
+{
+    if (cap->capabilities & (V4L2_CAP_VIDEO_CAPTURE_MPLANE | 
V4L2_CAP_VIDEO_OUTPUT_MPLANE) &&
+        cap->capabilities & V4L2_CAP_STREAMING)
+        return 1;
+
+    if (cap->capabilities & V4L2_CAP_VIDEO_M2M_MPLANE)
+        return 1;
+
+    return 0;
+}
+
+static int v4l2_prepare_contexts(V4L2m2mContext* s)
+{
+    struct v4l2_capability cap;
+    int ret;
+
+    s->capture.log_ctx = s->output.log_ctx = s->avctx;;
+    s->capture.done = s->output.done = 0;
+    s->capture.fd = s->output.fd = s->fd;
+    s->capture.name = "capture";
+    s->output.name = "output ";
+    atomic_init(&s->refcount, 0);
+    sem_init(&s->refsync, 0, 0);
+
+    memset(&cap, 0, sizeof(cap));
+    ret = ioctl(s->fd, VIDIOC_QUERYCAP, &cap);
+    if (ret < 0)
+        return ret;
+
+    av_log(s->avctx, AV_LOG_INFO, "driver '%s' on card '%s'\n", cap.driver, 
cap.card);
+
+    if (v4l2_mplane_video(&cap)) {
+        s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+        s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+        return 0;
+    }
+
+    if (v4l2_splane_video(&cap)) {
+        s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+        s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+        return 0;
+    }
+
+    return AVERROR(EINVAL);
+}
+
+static int v4l2_probe_driver(V4L2m2mContext* s)
+{
+    int ret;
+
+    s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0);
+    if (s->fd < 0)
+        return AVERROR(errno);
+
+    ret = v4l2_prepare_contexts(s);
+    if (ret < 0)
+        goto done;
+
+    ret = ff_v4l2_context_format(&s->output, 0);
+    if (ret) {
+        av_log(s->avctx, AV_LOG_DEBUG, "input format not supported\n");
+        goto done;
+    }
+
+    ret = ff_v4l2_context_format(&s->capture, 0);
+    if (ret) {
+        av_log(s->avctx, AV_LOG_DEBUG, "output format not supported\n");
+        goto done;
+    }
+
+done:
+    while(close(s->fd) == -1 && errno == EINTR);
+    s->fd = -1;
+
+    return ret;
+}
+
+static int v4l2_configure_contexts(V4L2m2mContext* s)
+{
+    void *log_ctx = s->avctx;
+    int ret;
+
+    s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0);
+    if (s->fd < 0)
+        return AVERROR(errno);
+
+    ret = v4l2_prepare_contexts(s);
+    if (ret < 0)
+        goto error;
+
+    ret = ff_v4l2_context_format(&s->output, 1);
+    if (ret) {
+        av_log(log_ctx, AV_LOG_ERROR, "can't set input format\n");
+        goto error;
+    }
+
+    ret = ff_v4l2_context_format(&s->capture, 1);
+    if (ret) {
+        av_log(log_ctx, AV_LOG_ERROR, "can't to set output format\n");
+        goto error;
+    }
+
+    ret = ff_v4l2_context_init(&s->output, s->output.lazy_init);
+    if (ret) {
+        av_log(log_ctx, AV_LOG_ERROR, "no output context's buffers\n");
+        goto error;
+    }
+
+    ret = ff_v4l2_context_init(&s->capture, s->capture.lazy_init);
+    if (ret) {
+        av_log(log_ctx, AV_LOG_ERROR, "no capture context's buffers\n");
+        goto error;
+    }
+
+error:
+    if (ret) {
+        while(close(s->fd) == -1 && errno == EINTR);
+        s->fd = -1;
+    }
+
+    return 0;
+}
+
+/******************************************************************************
+ *
+ *                  V4L2 M2M Interface
+ *
+ 
******************************************************************************/
+
+int ff_v4l2_m2m_codec_reinit(V4L2m2mContext* s)
+{
+    int ret;
+
+    if (s->full_reinit) {
+        av_log(s->avctx, AV_LOG_DEBUG, "%s full reinit\n", s->devname);
+
+        s->reinit = 1;
+        if (atomic_load(&s->refcount))
+            while(sem_wait(&s->refsync) == -1 && errno == EINTR);
+
+        /* close the driver */
+        ff_v4l2_m2m_codec_end(s->avctx);
+
+        /* start again now that we know the stream dimensions */
+        s->full_reinit = 0;
+        s->draining = 0;
+        s->reinit = 0;
+
+        ret = ff_v4l2_m2m_codec_init(s->avctx);
+        if (ret)
+            av_log(s->avctx, AV_LOG_ERROR, "%s full reinit failed\n", 
s->devname);
+
+        return 0;
+    }
+
+    /* 1. reinit in progress */
+    s->reinit = 1;
+
+    av_log(s->avctx, AV_LOG_DEBUG, "reinit context\n");
+
+    /* 2. streamoff */
+    ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF);
+    if (ret)
+        av_log(s->avctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF\n");
+
+    /* 3. unmap the capture buffers (v4l2 and ffmpeg):
+     *    we must wait for all references to be released before being allowed
+     *    to queue new buffers.
+     */
+    av_log(s->avctx, AV_LOG_DEBUG, "waiting for user to release 
AVBufferRefs\n");
+    if (atomic_load(&s->refcount))
+        while(sem_wait(&s->refsync) == -1 && errno == EINTR);
+
+    ff_v4l2_context_release(&s->capture);
+
+    /* 4. query the new format */
+    ret = ff_v4l2_context_format(&s->capture, 1);
+    if (ret) {
+        av_log(s->avctx, AV_LOG_ERROR, "setting capture format\n");
+        return ret;
+    }
+
+    /* 5. do lazy initialization */
+    ret = ff_v4l2_context_init(&s->capture, s->capture.lazy_init);
+    if (ret) {
+        av_log(s->avctx, AV_LOG_ERROR, "capture buffers lazy init\n");
+        return ret;
+    }
+
+    /* 6. update AVCodecContext */
+    ret = ff_set_dimensions(s->avctx, s->capture.width, s->capture.height);
+    if (ret < 0)
+        av_log(s->avctx, AV_LOG_WARNING, "update avcodec height and width\n");
+
+    /* 7. complete reinit */
+    sem_destroy(&s->refsync);
+    sem_init(&s->refsync, 0, 0);
+    s->draining = 0;
+    s->reinit = 0;
+
+    return 0;
+}
+
+int ff_v4l2_m2m_codec_end(AVCodecContext *avctx)
+{
+    V4L2m2mContext* s = avctx->priv_data;
+    int ret;
+
+    ret = ff_v4l2_context_set_status(&s->output, VIDIOC_STREAMOFF);
+    if (ret)
+            av_log(s->output.log_ctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", 
s->output.name);
+
+    ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF);
+    if (ret)
+        av_log(s->capture.log_ctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", 
s->capture.name);
+
+    ff_v4l2_context_release(&s->output);
+
+    if (atomic_load(&s->refcount)) {
+        av_log(s->capture.log_ctx, AV_LOG_DEBUG, "avpriv_v4l2m2m_end leaving 
pending buffers \n");
+
+        return 0;
+    }
+
+    ff_v4l2_context_release(&s->capture);
+    sem_destroy(&s->refsync);
+
+    /* release the hardware */
+    while (close(s->fd) == -1 && errno == EINTR);
+    s->fd = -1;
+
+    return 0;
+}
+
+int ff_v4l2_m2m_codec_init(AVCodecContext *avctx)
+{
+    int ret = AVERROR(EINVAL);
+    struct dirent *entry;
+    char node[PATH_MAX];
+    DIR *dirp;
+
+    V4L2m2mContext *s = avctx->priv_data;
+    s->avctx = avctx;
+
+    if (s->devname[0] != 0)
+        return v4l2_configure_contexts(s);
+
+    dirp = opendir("/dev");
+    if (!dirp)
+        return AVERROR(errno);
+
+    for (entry = readdir(dirp); entry; entry = readdir(dirp)) {
+
+        if (strncmp(entry->d_name, "video", 5))
+            continue;
+
+        snprintf(node, sizeof(node), "/dev/%s", entry->d_name);
+        av_log(s->avctx, AV_LOG_DEBUG, "probing device %s\n", node);
+        strncpy(s->devname, node, strlen(node) + 1);
+        ret = v4l2_probe_driver(s);
+        if (!ret)
+                break;
+    }
+
+    closedir(dirp);
+
+    if (ret) {
+        av_log(s->avctx, AV_LOG_ERROR, "Could not find a valid device\n");
+        memset(s->devname, 0, sizeof(s->devname));
+
+        return ret;
+    }
+
+    av_log(s->avctx, AV_LOG_INFO, "Using device %s\n", node);
+    ret = v4l2_configure_contexts(s);
+    if (ret)
+        memset(s->devname, 0, sizeof(s->devname));
+
+    return ret;
+}
diff --git a/libavcodec/v4l2_m2m.h b/libavcodec/v4l2_m2m.h
new file mode 100644
index 0000000..f09cea5
--- /dev/null
+++ b/libavcodec/v4l2_m2m.h
@@ -0,0 +1,94 @@
+/*
+ * V4L2 mem2mem helper functions
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_M2M_H
+#define AVCODEC_V4L2_M2M_H
+
+#include <semaphore.h>
+#include <unistd.h>
+#include <dirent.h>
+#include "libavcodec/avcodec.h"
+#include "v4l2_context.h"
+
+#define container_of(ptr, type, member) ({ \
+        const __typeof__(((type *)0)->member ) *__mptr = (ptr); \
+        (type *)((char *)__mptr - offsetof(type,member) );})
+
+#define V4L_M2M_DEFAULT_OPTS \
+    { "num_output_buffers", "Number of buffers in the output context",\
+        OFFSET(output.num_buffers), AV_OPT_TYPE_INT, { .i64 = 16 }, 6, 
INT_MAX, FLAGS }
+
+typedef struct V4L2m2mContext
+{
+    AVClass *class;
+    char devname[PATH_MAX];
+    int fd;
+
+    /* the codec context queues */
+    V4L2Context capture;
+    V4L2Context output;
+
+    /* refcount of buffers held by the user */
+    atomic_uint refcount;
+
+    /* dynamic stream reconfig */
+    AVCodecContext *avctx;
+    sem_t refsync;
+    int full_reinit;
+    int reinit;
+
+    /* null frame or packet received */
+    int draining;
+} V4L2m2mContext;
+
+/**
+ * Probes the video nodes looking for the required codec capabilities.
+ *
+ * @param[in] ctx The AVCodecContext instantiated by the encoder/decoder.
+ *
+ * @returns 0 if a driver is found, a negative number otherwise.
+ */
+int ff_v4l2_m2m_codec_init(AVCodecContext *avctx);
+
+/**
+ * Releases all the codec resources if all AVBufferRefs have been returned to 
the
+ * ctx. Otherwise keep the driver open.
+ *
+ * @param[in] The AVCodecContext instantiated by the encoder/decoder.
+ *
+ * @returns 0
+ *
+ */
+int ff_v4l2_m2m_codec_end(AVCodecContext *avctx);
+
+/**
+ * Reinitializes the V4L2m2mContext when the driver cant continue processing
+ * with the  running parameters (ie, changes in format or resolution).
+ *
+ * @param[in] ctx The V4L2m2mContext instantiated by the encoder/decoder.
+ *
+ * @returns 0 in case of success, negative number otherwise
+ */
+int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *ctx);
+
+#endif /* AVCODEC_V4L2_M2M_H */
diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c
new file mode 100644
index 0000000..25f3a26
--- /dev/null
+++ b/libavcodec/v4l2_m2m_dec.c
@@ -0,0 +1,232 @@
+/*
+ * V4L2 mem2mem decoders
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include "libavutil/pixfmt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/decode.h"
+
+#include "v4l2_context.h"
+#include "v4l2_m2m.h"
+#include "v4l2_fmt.h"
+
+static int v4l2_try_start(AVCodecContext *avctx)
+{
+    V4L2m2mContext *s = avctx->priv_data;
+    V4L2Context *const capture = &s->capture;
+    V4L2Context *const output = &s->output;
+    struct v4l2_selection selection;
+    int ret;
+
+    if (output->streamon && capture->streamon)
+        return 0;
+
+    /* 1. start the output process */
+    if (!output->streamon) {
+        ret = ff_v4l2_context_set_status(output, VIDIOC_STREAMON);
+        if (ret < 0) {
+            av_log(avctx, AV_LOG_DEBUG, "VIDIOC_STREAMON on output context\n");
+            return ret;
+        }
+    }
+
+    /* 2. get the capture format */
+    capture->format.type = capture->type;
+    ret = ioctl(capture->fd, VIDIOC_G_FMT, &capture->format);
+    if (ret) {
+        av_log(avctx, AV_LOG_WARNING, "VIDIOC_G_FMT ioctl\n");
+        return ret;
+    }
+
+    /* 2.1 update the AVCodecContext */
+    avctx->pix_fmt = 
ff_v4l2_format_v4l2_to_avfmt(capture->format.fmt.pix_mp.pixelformat, 
AV_CODEC_ID_RAWVIDEO);
+    capture->av_pix_fmt = avctx->pix_fmt;
+
+    /* 3. set the crop parameters */
+    selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    selection.r.height = avctx->coded_height;
+    selection.r.width = avctx->coded_width;
+    ret = ioctl(s->fd, VIDIOC_S_SELECTION, &selection);
+    if (!ret) {
+        ret = ioctl(s->fd, VIDIOC_G_SELECTION, &selection);
+        if (ret) {
+            av_log(avctx, AV_LOG_WARNING, "VIDIOC_G_SELECTION ioctl\n");
+        } else {
+            av_log(avctx, AV_LOG_DEBUG, "crop output %dx%d\n", 
selection.r.width, selection.r.height);
+            /* update the size of the resulting frame */
+            capture->height = selection.r.height;
+            capture->width  = selection.r.width;
+        }
+    }
+
+    /* 4. init the capture context now that we have the capture format */
+    if (!capture->buffers) {
+        ret = ff_v4l2_context_init(capture, 0);
+        if (ret) {
+            av_log(avctx, AV_LOG_DEBUG, "can't request output buffers\n");
+            return ret;
+        }
+    }
+
+    /* 5. start the capture process */
+    ret = ff_v4l2_context_set_status(capture, VIDIOC_STREAMON);
+    if (ret) {
+        av_log(avctx, AV_LOG_DEBUG, "VIDIOC_STREAMON, on capture context\n");
+        return ret;
+    }
+
+    return 0;
+}
+
+static int v4l2_prepare_decoder(V4L2m2mContext *s)
+{
+    struct v4l2_event_subscription sub;
+    V4L2Context *output = &s->output;
+    int ret;
+
+    /**
+     * requirements
+     */
+    memset(&sub, 0, sizeof(sub));
+    sub.type = V4L2_EVENT_SOURCE_CHANGE;
+    ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
+    if ( ret < 0) {
+        if (output->height == 0 || output->width == 0) {
+            av_log(s->avctx, AV_LOG_ERROR,
+                "the v4l2 driver does not support VIDIOC_SUBSCRIBE_EVENT\n"
+                "you must provide codec_height and codec_width on input\n");
+            return ret;
+        }
+    }
+
+    return 0;
+}
+
+static int v4l2_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+    V4L2m2mContext *s = avctx->priv_data;
+    V4L2Context *const capture = &s->capture;
+    V4L2Context *const output = &s->output;
+    AVPacket avpkt = {0};
+    int ret;
+
+    ret = ff_decode_get_packet(avctx, &avpkt);
+    if (ret < 0 && ret != AVERROR_EOF)
+        return ret;
+
+    if (s->draining)
+        goto dequeue;
+
+    ret = ff_v4l2_context_enqueue_packet(output, &avpkt);
+    if (ret < 0) {
+        if (ret != AVERROR(ENOMEM))
+           return ret;
+        /* no input buffers available, continue dequeing */
+    }
+
+    if (avpkt.size) {
+        ret = v4l2_try_start(avctx);
+        if (ret)
+            return 0;
+    }
+
+dequeue:
+    return ff_v4l2_context_dequeue_frame(capture, frame);
+}
+
+static av_cold int v4l2_decode_init(AVCodecContext *avctx)
+{
+    V4L2m2mContext *s = avctx->priv_data;
+    V4L2Context *capture = &s->capture;
+    V4L2Context *output = &s->output;
+    int ret;
+
+    /* if these dimension are out of range, an event will be raised by the v4l2
+     * driver; this event will trigger a full pipeline reconfig and the proper
+     * values will be read from the driver
+     */
+    output->height = capture->height = avctx->coded_height;
+    output->width = capture->width = avctx->coded_width;
+
+    output->av_codec_id = avctx->codec_id;
+    output->av_pix_fmt  = AV_PIX_FMT_NONE;
+
+    /*
+     * the buffers associated to this context can not be initialized without
+     * additional information available in the kernel driver.
+     * Postpone requesting the buffers until we know more about the frames
+     */
+    capture->lazy_init = 1;
+    capture->av_codec_id = AV_CODEC_ID_RAWVIDEO;
+    capture->av_pix_fmt = avctx->pix_fmt;
+
+    ret = ff_v4l2_m2m_codec_init(avctx);
+    if (ret)
+        return ret;
+
+    return v4l2_prepare_decoder(s);
+}
+
+#define OFFSET(x) offsetof(V4L2m2mContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+    V4L_M2M_DEFAULT_OPTS,
+    { "num_capture_buffers", "Number of buffers in the capture context",
+        OFFSET(capture.num_buffers), AV_OPT_TYPE_INT, {.i64 = 20}, 20, 
INT_MAX, FLAGS },
+    { NULL},
+};
+
+#define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \
+static const AVClass v4l2_m2m_ ## NAME ## _dec_class = {\
+    .class_name = #NAME "_v4l2_m2m_decoder",\
+    .item_name  = av_default_item_name,\
+    .option     = options,\
+    .version    = LIBAVUTIL_VERSION_INT,\
+};\
+\
+AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
+    .name           = #NAME "_v4l2m2m" ,\
+    .long_name      = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " decoder 
wrapper"),\
+    .type           = AVMEDIA_TYPE_VIDEO,\
+    .id             = CODEC ,\
+    .priv_data_size = sizeof(V4L2m2mContext),\
+    .priv_class     = &v4l2_m2m_ ## NAME ## _dec_class,\
+    .init           = v4l2_decode_init,\
+    .receive_frame  = v4l2_receive_frame,\
+    .close          = ff_v4l2_m2m_codec_end,\
+    .bsfs           = bsf_name, \
+};
+
+M2MDEC(h264,  "H.264", AV_CODEC_ID_H264,       "h264_mp4toannexb");
+M2MDEC(hevc,  "HEVC",  AV_CODEC_ID_HEVC,       "hevc_mp4toannexb");
+M2MDEC(mpeg1, "MPEG1", AV_CODEC_ID_MPEG1VIDEO, NULL);
+M2MDEC(mpeg2, "MPEG2", AV_CODEC_ID_MPEG2VIDEO, NULL);
+M2MDEC(mpeg4, "MPEG4", AV_CODEC_ID_MPEG4,      NULL);
+M2MDEC(h263,  "H.263", AV_CODEC_ID_H263,       NULL);
+M2MDEC(vc1 ,  "VC1",   AV_CODEC_ID_VC1,        NULL);
+M2MDEC(vp8,   "VP8",   AV_CODEC_ID_VP8,        NULL);
+M2MDEC(vp9,   "VP9",   AV_CODEC_ID_VP9,        NULL);
diff --git a/libavcodec/v4l2_m2m_enc.c b/libavcodec/v4l2_m2m_enc.c
new file mode 100644
index 0000000..9d1d303
--- /dev/null
+++ b/libavcodec/v4l2_m2m_enc.c
@@ -0,0 +1,350 @@
+/*
+ * V4L2 mem2mem encoders
+ *
+ * Copyright (C) 2017 Alexis Ballier <aball...@gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-or...@linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include <search.h>
+#include "libavcodec/avcodec.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/opt.h"
+#include "v4l2_context.h"
+#include "v4l2_m2m.h"
+
+#define MPEG_CID(x) V4L2_CID_MPEG_VIDEO_##x
+#define MPEG_VIDEO(x) V4L2_MPEG_VIDEO_##x
+
+static inline void v4l2_set_timeperframe(V4L2m2mContext *s, unsigned int num, 
unsigned int den)
+{
+    struct v4l2_streamparm parm = { 0 };
+
+    parm.type = V4L2_TYPE_IS_MULTIPLANAR(s->output.type) ? 
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT;
+    parm.parm.output.timeperframe.denominator = den;
+    parm.parm.output.timeperframe.numerator = num;
+
+    if (ioctl(s->fd, VIDIOC_S_PARM, &parm) < 0)
+        av_log(s->avctx, AV_LOG_WARNING, "Failed to set timeperframe");
+}
+
+static inline void v4l2_set_ext_ctrl(V4L2m2mContext *s, unsigned int id, 
signed int value, const char *name)
+{
+    struct v4l2_ext_controls ctrls = { 0 };
+    struct v4l2_ext_control ctrl = { 0 };
+
+    /* set ctrls */
+    ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+    ctrls.controls = &ctrl;
+    ctrls.count = 1;
+
+    /* set ctrl*/
+    ctrl.value = value;
+    ctrl.id = id ;
+
+    if (ioctl(s->fd, VIDIOC_S_EXT_CTRLS, &ctrls) < 0)
+        av_log(s->avctx, AV_LOG_WARNING, "Failed to set %s\n", name);
+    else
+        av_log(s->avctx, AV_LOG_DEBUG, "Encoder: %s = %d\n", name, value);
+}
+
+static inline int v4l2_get_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed 
int *value, const char *name)
+{
+    struct v4l2_ext_controls ctrls = { 0 };
+    struct v4l2_ext_control ctrl = { 0 };
+    int ret;
+
+    /* set ctrls */
+    ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+    ctrls.controls = &ctrl;
+    ctrls.count = 1;
+
+    /* set ctrl*/
+    ctrl.id = id ;
+
+    ret = ioctl(s->fd, VIDIOC_G_EXT_CTRLS, &ctrls);
+    if (ret < 0) {
+        av_log(s->avctx, AV_LOG_WARNING, "Failed to set %s\n", name);
+        return ret;
+    }
+
+    *value = ctrl.value;
+
+    return 0;
+}
+
+static int match_profile(const void *a, const void *b)
+{
+    if (*(unsigned int *)a == *(unsigned int *)b)
+        return 0;
+
+    return 1;
+}
+
+static inline unsigned int v4l2_h264_profile_from_ff(int p)
+{
+    struct h264_profile  {
+        unsigned int ffmpeg_val;
+        unsigned int v4l2_val;
+    } *val, profile[] = {
+        { FF_PROFILE_H264_CONSTRAINED_BASELINE, 
MPEG_VIDEO(H264_PROFILE_CONSTRAINED_BASELINE) },
+        { FF_PROFILE_H264_HIGH_444_PREDICTIVE, 
MPEG_VIDEO(H264_PROFILE_HIGH_444_PREDICTIVE) },
+        { FF_PROFILE_H264_HIGH_422_INTRA, 
MPEG_VIDEO(H264_PROFILE_HIGH_422_INTRA) },
+        { FF_PROFILE_H264_HIGH_444_INTRA, 
MPEG_VIDEO(H264_PROFILE_HIGH_444_INTRA) },
+        { FF_PROFILE_H264_HIGH_10_INTRA, 
MPEG_VIDEO(H264_PROFILE_HIGH_10_INTRA) },
+        { FF_PROFILE_H264_HIGH_422, MPEG_VIDEO(H264_PROFILE_HIGH_422) },
+        { FF_PROFILE_H264_BASELINE, MPEG_VIDEO(H264_PROFILE_BASELINE) },
+        { FF_PROFILE_H264_EXTENDED, MPEG_VIDEO(H264_PROFILE_EXTENDED) },
+        { FF_PROFILE_H264_HIGH_10, MPEG_VIDEO(H264_PROFILE_HIGH_10) },
+        { FF_PROFILE_H264_MAIN, MPEG_VIDEO(H264_PROFILE_MAIN) },
+        { FF_PROFILE_H264_HIGH, MPEG_VIDEO(H264_PROFILE_HIGH) },
+    };
+    size_t len = FF_ARRAY_ELEMS(profile);
+
+    val = lfind(&p, profile, &len, sizeof(profile[0]), match_profile);
+    if (val)
+        return val->v4l2_val;
+
+    return AVERROR(ENOENT);
+}
+
+static inline int v4l2_mpeg4_profile_from_ff(int p)
+{
+    struct mpeg4_profile {
+        unsigned int ffmpeg_val;
+        unsigned int v4l2_val;
+    } *val, profile[] = {
+        { FF_PROFILE_MPEG4_ADVANCED_CODING, 
MPEG_VIDEO(MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY) },
+        { FF_PROFILE_MPEG4_ADVANCED_SIMPLE, 
MPEG_VIDEO(MPEG4_PROFILE_ADVANCED_SIMPLE) },
+        { FF_PROFILE_MPEG4_SIMPLE_SCALABLE, 
MPEG_VIDEO(MPEG4_PROFILE_SIMPLE_SCALABLE) },
+        { FF_PROFILE_MPEG4_SIMPLE, MPEG_VIDEO(MPEG4_PROFILE_SIMPLE) },
+        { FF_PROFILE_MPEG4_CORE, MPEG_VIDEO(MPEG4_PROFILE_CORE) },
+    };
+    size_t len = FF_ARRAY_ELEMS(profile);
+
+    val = lfind(&p, profile, &len, sizeof(profile[0]), match_profile);
+    if (val)
+        return val->v4l2_val;
+
+    return AVERROR(ENOENT);
+}
+
+static int v4l2_check_b_frame_support(V4L2m2mContext *s)
+{
+    if (s->avctx->max_b_frames)
+        av_log(s->avctx, AV_LOG_WARNING, "Encoder does not support b-frames 
yet\n");
+
+    v4l2_set_ext_ctrl(s, MPEG_CID(B_FRAMES), 0, "number of B-frames");
+    v4l2_get_ext_ctrl(s, MPEG_CID(B_FRAMES), &s->avctx->max_b_frames, "number 
of B-frames");
+    if (s->avctx->max_b_frames == 0)
+        return 0;
+
+    avpriv_report_missing_feature(s->avctx, "DTS/PTS calculation for V4L2 
encoding");
+
+    return AVERROR_PATCHWELCOME;
+}
+
+static int v4l2_prepare_encoder(V4L2m2mContext *s)
+{
+    AVCodecContext *avctx = s->avctx;
+    int qmin_cid, qmax_cid, qmin, qmax;
+    int ret, val;
+
+    /**
+     * requirements
+     */
+    ret = v4l2_check_b_frame_support(s);
+    if (ret)
+        return ret;
+
+    /**
+     * settingss
+     */
+    if (avctx->framerate.num || avctx->framerate.den)
+        v4l2_set_timeperframe(s, avctx->framerate.num, avctx->framerate.den);
+
+    /* set ext ctrls */
+    v4l2_set_ext_ctrl(s, MPEG_CID(HEADER_MODE), 
MPEG_VIDEO(HEADER_MODE_SEPARATE), "header mode");
+    v4l2_set_ext_ctrl(s, MPEG_CID(BITRATE) , avctx->bit_rate, "bit rate");
+    v4l2_set_ext_ctrl(s, MPEG_CID(GOP_SIZE), avctx->gop_size,"gop size");
+
+    av_log(avctx, AV_LOG_DEBUG,
+        "Encoder Context: id (%d), profile (%d), frame rate(%d/%d), number 
b-frames (%d), "
+        "gop size (%d), bit rate (%ld), qmin (%d), qmax (%d)\n",
+        avctx->codec_id, avctx->profile, avctx->framerate.num, 
avctx->framerate.den,
+        avctx->max_b_frames, avctx->gop_size, avctx->bit_rate, avctx->qmin, 
avctx->qmax);
+
+    switch (avctx->codec_id) {
+    case AV_CODEC_ID_H264:
+        val = v4l2_h264_profile_from_ff(avctx->profile);
+        if (val < 0)
+            av_log(avctx, AV_LOG_WARNING, "h264 profile not found\n");
+        else
+            v4l2_set_ext_ctrl(s, MPEG_CID(H264_PROFILE), val, "h264 profile");
+        qmin_cid = MPEG_CID(H264_MIN_QP);
+        qmax_cid = MPEG_CID(H264_MAX_QP);
+        qmin = 0;
+        qmax = 51;
+        break;
+    case AV_CODEC_ID_MPEG4:
+        val = v4l2_mpeg4_profile_from_ff(avctx->profile);
+        if (val < 0)
+            av_log(avctx, AV_LOG_WARNING, "mpeg4 profile not found\n");
+        else
+            v4l2_set_ext_ctrl(s, MPEG_CID(MPEG4_PROFILE), val, "mpeg4 
profile");
+        qmin_cid = MPEG_CID(MPEG4_MIN_QP);
+        qmax_cid = MPEG_CID(MPEG4_MAX_QP);
+        if (avctx->flags & CODEC_FLAG_QPEL)
+            v4l2_set_ext_ctrl(s, MPEG_CID(MPEG4_QPEL), 1, "qpel");
+        qmin = 1;
+        qmax = 31;
+        break;
+    case AV_CODEC_ID_H263:
+        qmin_cid = MPEG_CID(H263_MIN_QP);
+        qmax_cid = MPEG_CID(H263_MAX_QP);
+        qmin = 1;
+        qmax = 31;
+        break;
+    case AV_CODEC_ID_VP8:
+        qmin_cid = MPEG_CID(VPX_MIN_QP);
+        qmax_cid = MPEG_CID(VPX_MAX_QP);
+        qmin = 0;
+        qmax = 127;
+        break;
+    case AV_CODEC_ID_VP9:
+        qmin_cid = MPEG_CID(VPX_MIN_QP);
+        qmax_cid = MPEG_CID(VPX_MAX_QP);
+        qmin = 0;
+        qmax = 255;
+        break;
+    default:
+        return 0;
+    }
+
+    if (qmin != avctx->qmin || qmax != avctx->qmax)
+        av_log(avctx, AV_LOG_WARNING, "Encoder adjusted: qmin (%d), qmax 
(%d)\n", qmin, qmax);
+
+    v4l2_set_ext_ctrl(s, qmin_cid, qmin, "minimum video quantizer scale");
+    v4l2_set_ext_ctrl(s, qmax_cid, qmax, "maximum video quantizer scale");
+
+    return 0;
+}
+
+static int v4l2_send_frame(AVCodecContext *avctx, const AVFrame *frame)
+{
+    V4L2m2mContext *s = avctx->priv_data;
+    V4L2Context *const output = &s->output;
+
+    return ff_v4l2_context_enqueue_frame(output, frame);
+}
+
+static int v4l2_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+{
+    V4L2m2mContext *s = avctx->priv_data;
+    V4L2Context *const capture = &s->capture;
+    V4L2Context *const output = &s->output;
+    int ret;
+
+    if (s->draining)
+        goto dequeue;
+
+    if (!output->streamon) {
+        ret = ff_v4l2_context_set_status(output, VIDIOC_STREAMON);
+        if (ret) {
+            av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF failed on output 
context\n");
+            return ret;
+        }
+    }
+
+    if (!capture->streamon) {
+        ret = ff_v4l2_context_set_status(capture, VIDIOC_STREAMON);
+        if (ret) {
+            av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMON failed on capture 
context\n");
+            return ret;
+        }
+    }
+
+dequeue:
+    return ff_v4l2_context_dequeue_packet(capture, avpkt);
+}
+
+static av_cold int v4l2_encode_init(AVCodecContext *avctx)
+{
+    V4L2m2mContext *s = avctx->priv_data;
+    V4L2Context *capture = &s->capture;
+    V4L2Context *output = &s->output;
+    int ret;
+
+    /* common settings output/capture */
+    output->height = capture->height = avctx->height;
+    output->width = capture->width = avctx->width;
+
+    /* output context */
+    output->av_codec_id = AV_CODEC_ID_RAWVIDEO;
+    output->av_pix_fmt = avctx->pix_fmt;
+
+    /* capture context */
+    capture->av_codec_id = avctx->codec_id;
+    capture->av_pix_fmt = AV_PIX_FMT_NONE;
+
+    ret = ff_v4l2_m2m_codec_init(avctx);
+    if (ret)
+        return ret;
+
+    return v4l2_prepare_encoder(s);
+}
+
+#define OFFSET(x) offsetof(V4L2m2mContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+    V4L_M2M_DEFAULT_OPTS,
+    { "num_capture_buffers", "Number of buffers in the capture context",
+        OFFSET(capture.num_buffers), AV_OPT_TYPE_INT, {.i64 = 4 }, 4, INT_MAX, 
FLAGS },
+    { NULL },
+};
+
+#define M2MENC(NAME, LONGNAME, CODEC) \
+static const AVClass v4l2_m2m_ ## NAME ## _enc_class = {\
+    .class_name = #NAME "_v4l2_m2m_encoder",\
+    .item_name  = av_default_item_name,\
+    .option     = options,\
+    .version    = LIBAVUTIL_VERSION_INT,\
+};\
+\
+AVCodec ff_ ## NAME ## _v4l2m2m_encoder = { \
+    .name           = #NAME "_v4l2m2m" ,\
+    .long_name      = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " encoder 
wrapper"),\
+    .type           = AVMEDIA_TYPE_VIDEO,\
+    .id             = CODEC ,\
+    .priv_data_size = sizeof(V4L2m2mContext),\
+    .priv_class     = &v4l2_m2m_ ## NAME ##_enc_class,\
+    .init           = v4l2_encode_init,\
+    .send_frame     = v4l2_send_frame,\
+    .receive_packet = v4l2_receive_packet,\
+    .close          = ff_v4l2_m2m_codec_end,\
+};
+
+M2MENC(mpeg4,"MPEG4", AV_CODEC_ID_MPEG4);
+M2MENC(h263, "H.263", AV_CODEC_ID_H263);
+M2MENC(h264, "H.264", AV_CODEC_ID_H264);
+M2MENC(hevc, "HEVC",  AV_CODEC_ID_HEVC);
+M2MENC(vp8,  "VP8",   AV_CODEC_ID_VP8);
-- 
2.7.4

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Reply via email to