Module: Mesa
Branch: main
Commit: 9ddab162b7841d440523a094ef7005bb79cc8d16
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=9ddab162b7841d440523a094ef7005bb79cc8d16

Author: Jason Ekstrand <[email protected]>
Date:   Thu Mar 24 17:05:08 2022 -0500

vulkan/queue: Add a submit mode enum

This encapsulates all three possible submit modes: immediate, deferred,
and threaded.  It's more clear than the has_thread boolean combined with
device-level checks.

Reviewed-by: Lionel Landwerlin <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15566>

---

 src/vulkan/runtime/vk_device.c | 18 ++++++++++++++
 src/vulkan/runtime/vk_device.h | 52 +++++++++++++++++++++++++++++++++++++++
 src/vulkan/runtime/vk_queue.c  | 55 ++++++++++++++++++------------------------
 src/vulkan/runtime/vk_queue.h  | 19 +++++++++++++--
 4 files changed, 110 insertions(+), 34 deletions(-)

diff --git a/src/vulkan/runtime/vk_device.c b/src/vulkan/runtime/vk_device.c
index 627ec61a8fe..8ced6b425d7 100644
--- a/src/vulkan/runtime/vk_device.c
+++ b/src/vulkan/runtime/vk_device.c
@@ -139,6 +139,24 @@ vk_device_init(struct vk_device *device,
 
    device->timeline_mode = get_timeline_mode(physical_device);
 
+   switch (device->timeline_mode) {
+   case VK_DEVICE_TIMELINE_MODE_NONE:
+   case VK_DEVICE_TIMELINE_MODE_NATIVE:
+      device->submit_mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
+      break;
+
+   case VK_DEVICE_TIMELINE_MODE_EMULATED:
+      device->submit_mode = VK_QUEUE_SUBMIT_MODE_DEFERRED;
+      break;
+
+   case VK_DEVICE_TIMELINE_MODE_ASSISTED:
+      device->submit_mode = VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND;
+      break;
+
+   default:
+      unreachable("Invalid timeline mode");
+   }
+
 #ifdef ANDROID
    mtx_init(&device->swapchain_private_mtx, mtx_plain);
    device->swapchain_private = NULL;
diff --git a/src/vulkan/runtime/vk_device.h b/src/vulkan/runtime/vk_device.h
index d40f195c74a..2b7606cf315 100644
--- a/src/vulkan/runtime/vk_device.h
+++ b/src/vulkan/runtime/vk_device.h
@@ -36,6 +36,56 @@ extern "C" {
 
 struct vk_sync;
 
+enum vk_queue_submit_mode {
+   /** Submits happen immediately
+    *
+    * `vkQueueSubmit()` and `vkQueueBindSparse()` call
+    * `vk_queue::driver_submit` directly for all submits and the last call to
+    * `vk_queue::driver_submit` will have completed by the time
+    * `vkQueueSubmit()` or `vkQueueBindSparse()` return.
+    */
+   VK_QUEUE_SUBMIT_MODE_IMMEDIATE,
+
+   /** Submits may be deferred until a future `vk_queue_flush()`
+    *
+    * Submits are added to the queue and `vk_queue_flush()` is called.
+    * However, any submits with unsatisfied dependencies will be left on the
+    * queue until a future `vk_queue_flush()` call.  This is used for
+    * implementing emulated timeline semaphores without threading.
+    */
+   VK_QUEUE_SUBMIT_MODE_DEFERRED,
+
+   /** Submits will be added to the queue and handled later by a thread
+    *
+    * This places additional requirements on the vk_sync types used by the
+    * driver:
+    *
+    *    1. All `vk_sync` types which support `VK_SYNC_FEATURE_GPU_WAIT` also
+    *       support `VK_SYNC_FEATURE_WAIT_PENDING` so that the threads can
+    *       sort out when a given submit has all its dependencies resolved.
+    *
+    *    2. All binary `vk_sync` types which support `VK_SYNC_FEATURE_GPU_WAIT`
+    *       also support `VK_SYNC_FEATURE_CPU_RESET` so we can reset
+    *       semaphores after waiting on them.
+    *
+    *    3. All vk_sync types used as permanent payloads of semaphores support
+    *       `vk_sync_type::move` so that it can move the pending signal into a
+    *       temporary vk_sync and reset the semaphore.
+    *
+    * This is requied for shared timeline semaphores where we need to handle
+    * wait-before-signal by threading in the driver if we ever see an
+    * unresolve dependency.
+    */
+   VK_QUEUE_SUBMIT_MODE_THREADED,
+
+   /** Threaded but only if we need it to resolve dependencies
+    *
+    * This imposes all the same requirements on `vk_sync` types as
+    * `VK_QUEUE_SUBMIT_MODE_THREADED`.
+    */
+   VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND,
+};
+
 struct vk_device {
    struct vk_object_base base;
    VkAllocationCallbacks alloc;
@@ -178,6 +228,8 @@ struct vk_device {
       VK_DEVICE_TIMELINE_MODE_NATIVE,
    } timeline_mode;
 
+   enum vk_queue_submit_mode submit_mode;
+
 #ifdef ANDROID
    mtx_t swapchain_private_mtx;
    struct hash_table *swapchain_private;
diff --git a/src/vulkan/runtime/vk_queue.c b/src/vulkan/runtime/vk_queue.c
index 38b8b4e0f22..f650cfefdd1 100644
--- a/src/vulkan/runtime/vk_queue.c
+++ b/src/vulkan/runtime/vk_queue.c
@@ -62,6 +62,10 @@ vk_queue_init(struct vk_queue *queue, struct vk_device 
*device,
    assert(index_in_family < pCreateInfo->queueCount);
    queue->index_in_family = index_in_family;
 
+   queue->submit.mode = device->submit_mode;
+   if (queue->submit.mode == VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND)
+      queue->submit.mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
+
    list_inithead(&queue->submit.submits);
 
    ret = mtx_init(&queue->submit.mutex, mtx_plain);
@@ -95,12 +99,6 @@ fail_mutex:
    return result;
 }
 
-static bool
-vk_queue_has_submit_thread(struct vk_queue *queue)
-{
-   return queue->submit.has_thread;
-}
-
 VkResult
 _vk_queue_set_lost(struct vk_queue *queue,
                    const char *file, int line,
@@ -380,8 +378,7 @@ vk_queue_flush(struct vk_queue *queue, uint32_t 
*submit_count_out)
 {
    VkResult result = VK_SUCCESS;
 
-   assert(queue->base.device->timeline_mode ==
-          VK_DEVICE_TIMELINE_MODE_EMULATED);
+   assert(queue->submit.mode == VK_QUEUE_SUBMIT_MODE_DEFERRED);
 
    mtx_lock(&queue->submit.mutex);
 
@@ -443,9 +440,6 @@ vk_queue_submit_thread_func(void *_data)
    struct vk_queue *queue = _data;
    VkResult result;
 
-   assert(queue->base.device->timeline_mode ==
-          VK_DEVICE_TIMELINE_MODE_ASSISTED);
-
    mtx_lock(&queue->submit.mutex);
 
    while (queue->submit.thread_run) {
@@ -515,7 +509,7 @@ vk_queue_enable_submit_thread(struct vk_queue *queue)
    if (ret == thrd_error)
       return vk_errorf(queue, VK_ERROR_UNKNOWN, "thrd_create failed");
 
-   queue->submit.has_thread = true;
+   queue->submit.mode = VK_QUEUE_SUBMIT_MODE_THREADED;
 
    return VK_SUCCESS;
 }
@@ -533,7 +527,8 @@ vk_queue_disable_submit_thread(struct vk_queue *queue)
 
    thrd_join(queue->submit.thread, NULL);
 
-   queue->submit.has_thread = false;
+   assert(list_is_empty(&queue->submit.submits));
+   queue->submit.mode = VK_QUEUE_SUBMIT_MODE_IMMEDIATE;
 }
 
 struct vulkan_submit_info {
@@ -785,7 +780,7 @@ vk_queue_submit(struct vk_queue *queue,
 
    switch (queue->base.device->timeline_mode) {
    case VK_DEVICE_TIMELINE_MODE_ASSISTED:
-      if (!vk_queue_has_submit_thread(queue)) {
+      if (queue->submit.mode != VK_QUEUE_SUBMIT_MODE_THREADED) {
          static int force_submit_thread = -1;
          if (unlikely(force_submit_thread < 0)) {
             force_submit_thread =
@@ -808,7 +803,7 @@ vk_queue_submit(struct vk_queue *queue,
             goto fail;
       }
 
-      if (vk_queue_has_submit_thread(queue)) {
+      if (queue->submit.mode == VK_QUEUE_SUBMIT_MODE_THREADED) {
          if (has_binary_permanent_semaphore_wait) {
             for (uint32_t i = 0; i < info->wait_count; i++) {
                VK_FROM_HANDLE(vk_semaphore, semaphore,
@@ -1048,26 +1043,22 @@ vk_queue_signal_sync(struct vk_queue *queue,
    };
 
    VkResult result;
-   switch (queue->base.device->timeline_mode) {
-   case VK_DEVICE_TIMELINE_MODE_ASSISTED:
-      if (vk_queue_has_submit_thread(queue)) {
-         vk_queue_push_submit(queue, submit);
-         return VK_SUCCESS;
-      } else {
-         result = vk_queue_submit_final(queue, submit);
-         vk_queue_submit_destroy(queue, submit);
-         return result;
-      }
+   switch (queue->submit.mode) {
+   case VK_QUEUE_SUBMIT_MODE_IMMEDIATE:
+      result = vk_queue_submit_final(queue, submit);
+      vk_queue_submit_destroy(queue, submit);
+      return result;
 
-   case VK_DEVICE_TIMELINE_MODE_EMULATED:
+   case VK_QUEUE_SUBMIT_MODE_DEFERRED:
       vk_queue_push_submit(queue, submit);
       return vk_device_flush(queue->base.device);
 
-   case VK_DEVICE_TIMELINE_MODE_NONE:
-   case VK_DEVICE_TIMELINE_MODE_NATIVE:
-      result = vk_queue_submit_final(queue, submit);
-      vk_queue_submit_destroy(queue, submit);
-      return result;
+   case VK_QUEUE_SUBMIT_MODE_THREADED:
+      vk_queue_push_submit(queue, submit);
+      return VK_SUCCESS;
+
+   case VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND:
+      unreachable("Invalid vk_queue::submit.mode");
    }
    unreachable("Invalid timeline mode");
 }
@@ -1075,7 +1066,7 @@ vk_queue_signal_sync(struct vk_queue *queue,
 void
 vk_queue_finish(struct vk_queue *queue)
 {
-   if (vk_queue_has_submit_thread(queue))
+   if (queue->submit.mode == VK_QUEUE_SUBMIT_MODE_THREADED)
       vk_queue_disable_submit_thread(queue);
 
    while (!list_is_empty(&queue->submit.submits)) {
diff --git a/src/vulkan/runtime/vk_queue.h b/src/vulkan/runtime/vk_queue.h
index 88f6da6c037..5cd715221ea 100644
--- a/src/vulkan/runtime/vk_queue.h
+++ b/src/vulkan/runtime/vk_queue.h
@@ -24,7 +24,7 @@
 #ifndef VK_QUEUE_H
 #define VK_QUEUE_H
 
-#include "vk_object.h"
+#include "vk_device.h"
 
 #include "c11/threads.h"
 
@@ -72,6 +72,22 @@ struct vk_queue {
                              struct vk_queue_submit *submit);
 
    struct {
+      /** Current submit mode
+       *
+       * This represents the exact current submit mode for this specific queue
+       * which may be different from `vk_device::submit_mode`.  In particular,
+       * this will never be `VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND`.
+       * Instead, when the device submit mode is
+       * `VK_QUEUE_SUBMIT_MODE_THREADED_ON_DEMAND`, the queue submit mode
+       * will be one of `VK_QUEUE_SUBMIT_MODE_THREADED` or
+       * `VK_QUEUE_SUBMIT_MODE_IMMEDIATE` depending on whether or not a submit
+       * thread is currently running for this queue.  If the device submit
+       * mode is `VK_QUEUE_SUBMIT_MODE_DEFERRED`, every queue in the device
+       * will use `VK_QUEUE_SUBMIT_MODE_DEFERRED` because the deferred submit
+       * model depends on regular flushing instead of independent threads.
+       */
+      enum vk_queue_submit_mode mode;
+
       mtx_t mutex;
       cnd_t push;
       cnd_t pop;
@@ -79,7 +95,6 @@ struct vk_queue {
       struct list_head submits;
 
       bool thread_run;
-      bool has_thread;
       thrd_t thread;
    } submit;
 

Reply via email to