Take into account dma fences in dirtyfb callback. If there is no
unsignaled dma fences perform flush immediately. If there are
unsignaled dma fences perform invalidate and add callback which will
queue flush when the fence gets signaled.

v4:
 - Move invalidate before callback is added
v3:
 - Check frontbuffer bits before adding any fence fb
 - Flush only when adding fence cb succeeds
v2: Use dma_resv_get_singleton

Signed-off-by: Jouni Högander <jouni.hogan...@intel.com>
---
 drivers/gpu/drm/i915/display/intel_fb.c | 60 +++++++++++++++++++++++--
 1 file changed, 57 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_fb.c 
b/drivers/gpu/drm/i915/display/intel_fb.c
index b1bfbbef89b5..e7678571b0d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -7,6 +7,9 @@
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_modeset_helper.h>
 
+#include <linux/dma-fence.h>
+#include <linux/dma-resv.h>
+
 #include "i915_drv.h"
 #include "intel_display.h"
 #include "intel_display_types.h"
@@ -1897,6 +1900,21 @@ static int intel_user_framebuffer_create_handle(struct 
drm_framebuffer *fb,
        return drm_gem_handle_create(file, &obj->base, handle);
 }
 
+struct frontbuffer_fence_cb {
+       struct dma_fence_cb base;
+       struct intel_frontbuffer *front;
+};
+
+static void intel_user_framebuffer_fence_wake(struct dma_fence *dma,
+                                             struct dma_fence_cb *data)
+{
+       struct frontbuffer_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+       intel_frontbuffer_queue_flush(cb->front);
+       kfree(cb);
+       dma_fence_put(dma);
+}
+
 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
                                        struct drm_file *file,
                                        unsigned int flags, unsigned int color,
@@ -1904,11 +1922,47 @@ static int intel_user_framebuffer_dirty(struct 
drm_framebuffer *fb,
                                        unsigned int num_clips)
 {
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct intel_frontbuffer *front = to_intel_frontbuffer(fb);
+       struct dma_fence *fence;
+       struct frontbuffer_fence_cb *cb;
+       int ret = 0;
 
-       i915_gem_object_flush_if_display(obj);
-       intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+       if (!atomic_read(&front->bits))
+               return 0;
 
-       return 0;
+       if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false)))
+               goto flush;
+
+       ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false),
+                                    &fence);
+       if (ret || !fence)
+               goto flush;
+
+       cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+       if (!cb) {
+               dma_fence_put(fence);
+               ret = -ENOMEM;
+               goto flush;
+       }
+
+       cb->front = front;
+
+       intel_frontbuffer_invalidate(front, ORIGIN_DIRTYFB);
+
+       ret = dma_fence_add_callback(fence, &cb->base,
+                                    intel_user_framebuffer_fence_wake);
+       if (ret) {
+               intel_user_framebuffer_fence_wake(fence, &cb->base);
+               if (ret == -ENOENT)
+                       ret = 0;
+       }
+
+       return ret;
+
+flush:
+       i915_gem_object_flush_if_display(obj);
+       intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
+       return ret;
 }
 
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
-- 
2.34.1

Reply via email to