The following command is on how to apply transpose_vulkan filter: ffmpeg -init_hw_device vulkan -i input.264 -vf \ hwupload=extra_hw_frames=16,transpose_vulkan,hwdownload,format=yuv420p output.264
Signed-off-by: Wu Jianhua <jianhua...@intel.com> --- configure | 1 + libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/vf_transpose_vulkan.c | 316 ++++++++++++++++++++++++++++++ 4 files changed, 319 insertions(+) create mode 100644 libavfilter/vf_transpose_vulkan.c diff --git a/configure b/configure index a98a18abaa..12cb49e877 100755 --- a/configure +++ b/configure @@ -3718,6 +3718,7 @@ tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping" tonemap_opencl_filter_deps="opencl const_nan" transpose_opencl_filter_deps="opencl" transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags" +transpose_vulkan_filter_deps="vulkan spirv_compiler" unsharp_opencl_filter_deps="opencl" uspp_filter_deps="gpl avcodec" vaguedenoiser_filter_deps="gpl" diff --git a/libavfilter/Makefile b/libavfilter/Makefile index c8082c4a2f..8744cc3c63 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -483,6 +483,7 @@ OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER) += vf_transpose_npp.o OBJS-$(CONFIG_TRANSPOSE_OPENCL_FILTER) += vf_transpose_opencl.o opencl.o opencl/transpose.o OBJS-$(CONFIG_TRANSPOSE_VAAPI_FILTER) += vf_transpose_vaapi.o vaapi_vpp.o +OBJS-$(CONFIG_TRANSPOSE_VULKAN_FILTER) += vf_transpose_vulkan.o vulkan.o vulkan_filter.o OBJS-$(CONFIG_TRIM_FILTER) += trim.o OBJS-$(CONFIG_UNPREMULTIPLY_FILTER) += vf_premultiply.o framesync.o OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index b1af2cbcc8..9e16b4e71e 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -462,6 +462,7 @@ extern const AVFilter ff_vf_transpose; extern const AVFilter ff_vf_transpose_npp; extern const AVFilter ff_vf_transpose_opencl; extern const AVFilter ff_vf_transpose_vaapi; +extern const AVFilter ff_vf_transpose_vulkan; extern const AVFilter ff_vf_trim; extern const AVFilter ff_vf_unpremultiply; extern const AVFilter ff_vf_unsharp; diff --git a/libavfilter/vf_transpose_vulkan.c b/libavfilter/vf_transpose_vulkan.c new file mode 100644 index 0000000000..c9bae413c3 --- /dev/null +++ b/libavfilter/vf_transpose_vulkan.c @@ -0,0 +1,316 @@ +/* + * copyright (c) 2021 Wu Jianhua <jianhua...@intel.com> + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/random_seed.h" +#include "libavutil/opt.h" +#include "vulkan_filter.h" +#include "internal.h" + +#define CGS 32 + +typedef struct TransposeVulkanContext { + FFVulkanContext vkctx; + FFVkQueueFamilyCtx qf; + FFVkExecContext *exec; + FFVulkanPipeline *pl; + + VkDescriptorImageInfo input_images[3]; + VkDescriptorImageInfo output_images[3]; + + int initialized; +} TransposeVulkanContext; + +static av_cold int init_filter(AVFilterContext *ctx, AVFrame *in) +{ + int err = 0; + FFVkSPIRVShader *shd; + TransposeVulkanContext *s = ctx->priv; + FFVulkanContext *vkctx = &s->vkctx; + const int planes = av_pix_fmt_count_planes(s->vkctx.output_format); + + FFVulkanDescriptorSetBinding image_descs[] = { + { + .name = "input_images", + .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .dimensions = 2, + .elems = planes, + .stages = VK_SHADER_STAGE_COMPUTE_BIT, + .updater = s->input_images, + }, + { + .name = "output_images", + .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .mem_layout = ff_vk_shader_rep_fmt(s->vkctx.output_format), + .mem_quali = "writeonly", + .dimensions = 2, + .elems = planes, + .stages = VK_SHADER_STAGE_COMPUTE_BIT, + .updater = s->output_images, + }, + }; + + image_descs[0].sampler = ff_vk_init_sampler(vkctx, 1, VK_FILTER_LINEAR); + if (!image_descs[0].sampler) + return AVERROR_EXTERNAL; + + ff_vk_qf_init(vkctx, &s->qf, VK_QUEUE_COMPUTE_BIT, 0); + + { + s->pl = ff_vk_create_pipeline(vkctx, &s->qf); + if (!s->pl) + return AVERROR(ENOMEM); + + shd = ff_vk_init_shader(s->pl, "transpose_compute", image_descs[0].stages); + if (!shd) + return AVERROR(ENOMEM); + + ff_vk_set_compute_shader_sizes(shd, (int [3]){ CGS, 1, 1 }); + RET(ff_vk_add_descriptor_set(vkctx, s->pl, shd, image_descs, FF_ARRAY_ELEMS(image_descs), 0)); + + GLSLC(0, void main() ); + GLSLC(0, { ); + GLSLC(1, ivec2 size; ); + GLSLC(1, const ivec2 pos = ivec2(gl_GlobalInvocationID.xy); ); + for (int i = 0; i < planes; i++) { + GLSLC(0, ); + GLSLF(1, size = imageSize(output_images[%i]); ,i); + GLSLC(1, if (IS_WITHIN(pos, size)) { ); + GLSLF(2, vec4 res = texture(input_images[%i], pos.yx); ,i); + GLSLF(2, imageStore(output_images[%i], pos, res); ,i); + GLSLC(1, } ); + } + GLSLC(0, } ); + + RET(ff_vk_compile_shader(vkctx, shd, "main")); + RET(ff_vk_init_pipeline_layout(vkctx, s->pl)); + RET(ff_vk_init_compute_pipeline(vkctx, s->pl)); + } + + RET(ff_vk_create_exec_ctx(vkctx, &s->exec, &s->qf)); + s->initialized = 1; + +fail: + return err; +} + +static int process_frames(AVFilterContext *avctx, AVFrame *outframe, AVFrame *inframe) +{ + int err = 0; + VkCommandBuffer cmd_buf; + TransposeVulkanContext *s = avctx->priv; + FFVulkanContext *vkctx = &s->vkctx; + FFVulkanFunctions *vk = &s->vkctx.vkfn; + const int planes = av_pix_fmt_count_planes(s->vkctx.output_format); + + AVVkFrame *in = (AVVkFrame *)inframe->data[0]; + AVVkFrame *out = (AVVkFrame *)outframe->data[0]; + + const VkFormat *input_formats = av_vkfmt_from_pixfmt(s->vkctx.input_format); + const VkFormat *output_formats = av_vkfmt_from_pixfmt(s->vkctx.output_format); + + ff_vk_start_exec_recording(vkctx, s->exec); + cmd_buf = ff_vk_get_exec_buf(s->exec); + + for (int i = 0; i < planes; i++) { + RET(ff_vk_create_imageview(vkctx, s->exec, + &s->input_images[i].imageView, in->img[i], + input_formats[i], + ff_comp_identity_map)); + + RET(ff_vk_create_imageview(vkctx, s->exec, + &s->output_images[i].imageView, out->img[i], + output_formats[i], + ff_comp_identity_map)); + + s->input_images[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + s->output_images[i].imageLayout = VK_IMAGE_LAYOUT_GENERAL; + } + + ff_vk_update_descriptor_set(vkctx, s->pl, 0); + + for (int i = 0; i < planes; i++) { + VkImageMemoryBarrier barriers[] = { + { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .oldLayout = in->layout[i], + .newLayout = s->input_images[i].imageLayout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = in->img[i], + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.levelCount = 1, + .subresourceRange.layerCount = 1, + }, + { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .oldLayout = out->layout[i], + .newLayout = s->output_images[i].imageLayout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = out->img[i], + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.levelCount = 1, + .subresourceRange.layerCount = 1, + }, + }; + + vk->CmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, + 0, NULL, 0, NULL, FF_ARRAY_ELEMS(barriers), barriers); + + in->layout[i] = barriers[0].newLayout; + in->access[i] = barriers[0].dstAccessMask; + + out->layout[i] = barriers[1].newLayout; + out->access[i] = barriers[1].dstAccessMask; + } + + ff_vk_bind_pipeline_exec(vkctx, s->exec, s->pl); + vk->CmdDispatch(cmd_buf, FFALIGN(s->vkctx.output_width, CGS)/CGS, + s->vkctx.output_height, 1); + + ff_vk_add_exec_dep(vkctx, s->exec, inframe, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); + ff_vk_add_exec_dep(vkctx, s->exec, outframe, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); + + err = ff_vk_submit_exec_queue(vkctx, s->exec); + if (err) + return err; + + ff_vk_qf_rotate(&s->qf); + + return 0; + +fail: + ff_vk_discard_exec_deps(s->exec); + return err; +} + +static int filter_frame(AVFilterLink *inlink, AVFrame *in) +{ + int err; + AVFrame *out = NULL; + AVFilterContext *ctx = inlink->dst; + TransposeVulkanContext *s = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + + out = ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!out) { + err = AVERROR(ENOMEM); + goto fail; + } + + if (!s->initialized) + RET(init_filter(ctx, in)); + + RET(process_frames(ctx, out, in)); + + RET(av_frame_copy_props(out, in)); + + if (in->sample_aspect_ratio.num) + out->sample_aspect_ratio = in->sample_aspect_ratio; + else { + out->sample_aspect_ratio.num = in->sample_aspect_ratio.den; + out->sample_aspect_ratio.den = in->sample_aspect_ratio.num; + } + + av_frame_free(&in); + + return ff_filter_frame(outlink, out); + +fail: + av_frame_free(&in); + av_frame_free(&out); + return err; +} + +static av_cold void transpose_vulkan_uninit(AVFilterContext *avctx) +{ + TransposeVulkanContext *s = avctx->priv; + ff_vk_uninit(&s->vkctx); + + s->initialized = 0; +} + +static int config_props_output(AVFilterLink *outlink) +{ + int err = 0; + AVFilterContext *avctx = outlink->src; + TransposeVulkanContext *s = avctx->priv; + FFVulkanContext *vkctx = &s->vkctx; + AVFilterLink *inlink = avctx->inputs[0]; + + vkctx->output_width = inlink->h; + vkctx->output_height = inlink->w; + + RET(ff_vk_filter_config_output(outlink)); + + outlink->w = inlink->h; + outlink->h = inlink->w; + + if (inlink->sample_aspect_ratio.num) + outlink->sample_aspect_ratio = av_div_q((AVRational) { 1, 1 }, + inlink->sample_aspect_ratio); + else + outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; + + err = 0; + +fail: + return err; +} + +static const AVOption transpose_vulkan_options[] = { + { NULL } +}; + +AVFILTER_DEFINE_CLASS(transpose_vulkan); + +static const AVFilterPad transpose_vulkan_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = &filter_frame, + .config_props = &ff_vk_filter_config_input, + } +}; + +static const AVFilterPad transpose_vulkan_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = &config_props_output, + } +}; + +const AVFilter ff_vf_transpose_vulkan = { + .name = "transpose_vulkan", + .description = NULL_IF_CONFIG_SMALL("Transpose Vulkan Filter"), + .priv_size = sizeof(TransposeVulkanContext), + .init = &ff_vk_filter_init, + .uninit = &transpose_vulkan_uninit, + FILTER_INPUTS(transpose_vulkan_inputs), + FILTER_OUTPUTS(transpose_vulkan_outputs), + FILTER_SINGLE_PIXFMT(AV_PIX_FMT_VULKAN), + .priv_class = &transpose_vulkan_class, + .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, +}; -- 2.25.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".