Am 12.10.22 um 14:32 schrieb Simon Ser:
Introduce a new DRM_IOCTL_SYNCOBJ_EVENTFD IOCTL which signals an
eventfd from a syncobj.

This is useful for Wayland compositors to handle wait-before-submit.
Wayland clients can send a timeline point to the compositor
before the point has materialized yet, then compositors can wait
for the point to materialize via this new IOCTL.

The existing DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT IOCTL is not suitable
because it blocks. Compositors want to integrate the wait with
their poll(2)-based event loop.

v2:
- Wait for fence when flags is zero
- Improve documentation (Pekka)
- Rename IOCTL (Christian)
- Fix typo in drm_syncobj_add_eventfd() (Christian)

Signed-off-by: Simon Ser <cont...@emersion.fr>
Cc: Jason Ekstrand <ja...@jlekstrand.net>
Cc: Daniel Vetter <dan...@ffwll.ch>
Cc: Christian König <christian.koe...@amd.com>
Cc: Bas Nieuwenhuizen <b...@basnieuwenhuizen.nl>
Cc: Daniel Stone <dan...@fooishbar.org>
Cc: Pekka Paalanen <ppaala...@gmail.com>
Cc: James Jones <jajo...@nvidia.com>

Reviewed-by: Christian König <christian.koe...@amd.com>

---

Toy user-space available at:
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpaste.sr.ht%2F~emersion%2F674bd4fc614570f262b5bb2213be5c77d68944ac&amp;data=05%7C01%7Cchristian.koenig%40amd.com%7C5cc939311a00477ef8eb08daac4de7ea%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C638011747896643399%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000%7C%7C%7C&amp;sdata=COiWGlyLQiqvVJlMaz2BcGLXqVGktLuJTl7CC7jpfDU%3D&amp;reserved=0

  drivers/gpu/drm/drm_internal.h |   2 +
  drivers/gpu/drm/drm_ioctl.c    |   2 +
  drivers/gpu/drm/drm_syncobj.c  | 143 +++++++++++++++++++++++++++++++--
  include/drm/drm_syncobj.h      |   6 +-
  include/uapi/drm/drm.h         |  22 +++++
  5 files changed, 168 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..ca320e155b69 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -242,6 +242,8 @@ int drm_syncobj_wait_ioctl(struct drm_device *dev, void 
*data,
                           struct drm_file *file_private);
  int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_private);
+int drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_private);
  int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_private);
  int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ca2a6e6101dc..95ec9a02f8a7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -702,6 +702,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
                      DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, 
drm_syncobj_timeline_wait_ioctl,
                      DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_EVENTFD, drm_syncobj_eventfd_ioctl,
+                     DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
                      DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 0c2be8360525..659577ad8c07 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -185,6 +185,7 @@
#include <linux/anon_inodes.h>
  #include <linux/dma-fence-unwrap.h>
+#include <linux/eventfd.h>
  #include <linux/file.h>
  #include <linux/fs.h>
  #include <linux/sched/signal.h>
@@ -212,6 +213,20 @@ struct syncobj_wait_entry {
  static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
                                      struct syncobj_wait_entry *wait);
+struct syncobj_eventfd_entry {
+       struct list_head node;
+       struct dma_fence *fence;
+       struct dma_fence_cb fence_cb;
+       struct drm_syncobj *syncobj;
+       struct eventfd_ctx *ev_fd_ctx;
+       u64 point;
+       u32 flags;
+};
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+                          struct syncobj_eventfd_entry *entry);
+
  /**
   * drm_syncobj_find - lookup and reference a sync object.
   * @file_private: drm file private pointer
@@ -274,6 +289,27 @@ static void drm_syncobj_remove_wait(struct drm_syncobj 
*syncobj,
        spin_unlock(&syncobj->lock);
  }
+static void
+syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
+{
+       eventfd_ctx_put(entry->ev_fd_ctx);
+       dma_fence_put(entry->fence);
+       /* This happens either inside the syncobj lock, or after the node has
+        * already been removed from the list */
+       list_del(&entry->node);
+       kfree(entry);
+}
+
+static void
+drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
+                       struct syncobj_eventfd_entry *entry)
+{
+       spin_lock(&syncobj->lock);
+       list_add_tail(&entry->node, &syncobj->ev_fd_list);
+       syncobj_eventfd_entry_func(syncobj, entry);
+       spin_unlock(&syncobj->lock);
+}
+
  /**
   * drm_syncobj_add_point - add new timeline point to the syncobj
   * @syncobj: sync object to add timeline point do
@@ -288,7 +324,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
                           struct dma_fence *fence,
                           uint64_t point)
  {
-       struct syncobj_wait_entry *cur, *tmp;
+       struct syncobj_wait_entry *wait_cur, *wait_tmp;
+       struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
        struct dma_fence *prev;
dma_fence_get(fence);
@@ -302,8 +339,10 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
        dma_fence_chain_init(chain, prev, fence, point);
        rcu_assign_pointer(syncobj->fence, &chain->base);
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
-               syncobj_wait_syncobj_func(syncobj, cur);
+       list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+               syncobj_wait_syncobj_func(syncobj, wait_cur);
+       list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, 
node)
+               syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
        spin_unlock(&syncobj->lock);
/* Walk the chain once to trigger garbage collection */
@@ -323,7 +362,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
                               struct dma_fence *fence)
  {
        struct dma_fence *old_fence;
-       struct syncobj_wait_entry *cur, *tmp;
+       struct syncobj_wait_entry *wait_cur, *wait_tmp;
+       struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
if (fence)
                dma_fence_get(fence);
@@ -335,8 +375,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
        rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
-               list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
-                       syncobj_wait_syncobj_func(syncobj, cur);
+               list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, 
node)
+                       syncobj_wait_syncobj_func(syncobj, wait_cur);
+               list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, 
&syncobj->ev_fd_list, node)
+                       syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
        }
spin_unlock(&syncobj->lock);
@@ -472,7 +514,13 @@ void drm_syncobj_free(struct kref *kref)
        struct drm_syncobj *syncobj = container_of(kref,
                                                   struct drm_syncobj,
                                                   refcount);
+       struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
+
        drm_syncobj_replace_fence(syncobj, NULL);
+
+       list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, 
node)
+               syncobj_eventfd_entry_free(ev_fd_cur);
+
        kfree(syncobj);
  }
  EXPORT_SYMBOL(drm_syncobj_free);
@@ -501,6 +549,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, 
uint32_t flags,
kref_init(&syncobj->refcount);
        INIT_LIST_HEAD(&syncobj->cb_list);
+       INIT_LIST_HEAD(&syncobj->ev_fd_list);
        spin_lock_init(&syncobj->lock);
if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
@@ -1304,6 +1353,88 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, 
void *data,
        return ret;
  }
+static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
+                                            struct dma_fence_cb *cb)
+{
+       struct syncobj_eventfd_entry *entry =
+               container_of(cb, struct syncobj_eventfd_entry, fence_cb);
+
+       eventfd_signal(entry->ev_fd_ctx, 1);
+       syncobj_eventfd_entry_free(entry);
+}
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+                          struct syncobj_eventfd_entry *entry)
+{
+       int ret;
+       struct dma_fence *fence;
+
+       /* This happens inside the syncobj lock */
+       fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+       ret = dma_fence_chain_find_seqno(&fence, entry->point);
+       if (ret != 0 || !fence) {
+               dma_fence_put(fence);
+               return;
+       }
+
+       list_del_init(&entry->node);
+       entry->fence = fence;
+
+       if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
+               eventfd_signal(entry->ev_fd_ctx, 1);
+               syncobj_eventfd_entry_free(entry);
+       } else {
+               ret = dma_fence_add_callback(fence, &entry->fence_cb,
+                                            syncobj_eventfd_entry_fence_func);
+               if (ret == -ENOENT) {
+                       eventfd_signal(entry->ev_fd_ctx, 1);
+                       syncobj_eventfd_entry_free(entry);
+               }
+       }
+}
+
+int
+drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_private)
+{
+       struct drm_syncobj_eventfd *args = data;
+       struct drm_syncobj *syncobj;
+       struct eventfd_ctx *ev_fd_ctx;
+       struct syncobj_eventfd_entry *entry;
+
+       if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+               return -EOPNOTSUPP;
+
+       if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
+               return -EINVAL;
+
+       if (args->pad)
+               return -EINVAL;
+
+       syncobj = drm_syncobj_find(file_private, args->handle);
+       if (!syncobj)
+               return -ENOENT;
+
+       ev_fd_ctx = eventfd_ctx_fdget(args->fd);
+       if (IS_ERR(ev_fd_ctx))
+               return PTR_ERR(ev_fd_ctx);
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               eventfd_ctx_put(ev_fd_ctx);
+               return -ENOMEM;
+       }
+       entry->syncobj = syncobj;
+       entry->ev_fd_ctx = ev_fd_ctx;
+       entry->point = args->point;
+       entry->flags = args->flags;
+
+       drm_syncobj_add_eventfd(syncobj, entry);
+       drm_syncobj_put(syncobj);
+
+       return 0;
+}
int
  drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 6cf7243a1dc5..b40052132e52 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -54,7 +54,11 @@ struct drm_syncobj {
         */
        struct list_head cb_list;
        /**
-        * @lock: Protects &cb_list and write-locks &fence.
+        * @ev_fd_list: List of registered eventfd.
+        */
+       struct list_head ev_fd_list;
+       /**
+        * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
         */
        spinlock_t lock;
        /**
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 642808520d92..5ac0a48b0169 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -909,6 +909,27 @@ struct drm_syncobj_timeline_wait {
        __u32 pad;
  };
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ *         &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ *         available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+       __u32 handle;
+       __u32 flags;
+       __u64 point;
+       __s32 fd;
+       __u32 pad;
+};
+
struct drm_syncobj_array {
        __u64 handles;
@@ -1095,6 +1116,7 @@ extern "C" {
  #define DRM_IOCTL_SYNCOBJ_QUERY               DRM_IOWR(0xCB, struct 
drm_syncobj_timeline_array)
  #define DRM_IOCTL_SYNCOBJ_TRANSFER    DRM_IOWR(0xCC, struct 
drm_syncobj_transfer)
  #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL     DRM_IOWR(0xCD, struct 
drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_EVENTFD      DRM_IOWR(0xCE, struct 
drm_syncobj_eventfd)
/**
   * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.

Reply via email to