From: Dave Airlie <airl...@redhat.com>

This just splits out the begin/end query hw emissions,
it makes it easier to add multiview support for queries.
---
 src/amd/vulkan/radv_query.c | 98 ++++++++++++++++++++++++++-------------------
 1 file changed, 57 insertions(+), 41 deletions(-)

diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c
index 9fee4d2b491..5fae8b65658 100644
--- a/src/amd/vulkan/radv_query.c
+++ b/src/amd/vulkan/radv_query.c
@@ -1077,33 +1077,12 @@ void radv_CmdResetQueryPool(
        }
 }
 
-void radv_CmdBeginQuery(
-    VkCommandBuffer                             commandBuffer,
-    VkQueryPool                                 queryPool,
-    uint32_t                                    query,
-    VkQueryControlFlags                         flags)
+static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
+                            uint64_t va,
+                            VkQueryType query_type)
 {
-       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
-       RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
        struct radeon_winsys_cs *cs = cmd_buffer->cs;
-       uint64_t va = radv_buffer_get_va(pool->bo);
-       va += pool->stride * query;
-
-       radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 8);
-
-       if (cmd_buffer->pending_reset_query) {
-               if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
-                       /* Only need to flush caches if the query pool size is
-                        * large enough to be resetted using the compute shader
-                        * path. Small pools don't need any cache flushes
-                        * because we use a CP dma clear.
-                        */
-                       si_emit_cache_flush(cmd_buffer);
-                       cmd_buffer->pending_reset_query = false;
-               }
-       }
-
-       switch (pool->type) {
+       switch (query_type) {
        case VK_QUERY_TYPE_OCCLUSION:
                radeon_check_space(cmd_buffer->device->ws, cs, 7);
 
@@ -1127,26 +1106,15 @@ void radv_CmdBeginQuery(
        default:
                unreachable("beginning unhandled query type");
        }
-}
 
+}
 
-void radv_CmdEndQuery(
-    VkCommandBuffer                             commandBuffer,
-    VkQueryPool                                 queryPool,
-    uint32_t                                    query)
+static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
+                          uint64_t va, uint64_t avail_va,
+                          VkQueryType query_type)
 {
-       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
-       RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
        struct radeon_winsys_cs *cs = cmd_buffer->cs;
-       uint64_t va = radv_buffer_get_va(pool->bo);
-       uint64_t avail_va = va + pool->availability_offset + 4 * query;
-       va += pool->stride * query;
-
-       /* Do not need to add the pool BO to the list because the query must
-        * currently be active, which means the BO is already in the list.
-        */
-
-       switch (pool->type) {
+       switch (query_type) {
        case VK_QUERY_TYPE_OCCLUSION:
                radeon_check_space(cmd_buffer->device->ws, cs, 14);
 
@@ -1182,6 +1150,54 @@ void radv_CmdEndQuery(
        }
 }
 
+void radv_CmdBeginQuery(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query,
+    VkQueryControlFlags                         flags)
+{
+       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+       struct radeon_winsys_cs *cs = cmd_buffer->cs;
+       uint64_t va = radv_buffer_get_va(pool->bo);
+
+       radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo, 8);
+
+       if (cmd_buffer->pending_reset_query) {
+               if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
+                       /* Only need to flush caches if the query pool size is
+                        * large enough to be resetted using the compute shader
+                        * path. Small pools don't need any cache flushes
+                        * because we use a CP dma clear.
+                        */
+                       si_emit_cache_flush(cmd_buffer);
+                       cmd_buffer->pending_reset_query = false;
+               }
+       }
+
+       va += pool->stride * query;
+
+       emit_begin_query(cmd_buffer, va, pool->type);
+}
+
+
+void radv_CmdEndQuery(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query)
+{
+       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+       RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
+       uint64_t va = radv_buffer_get_va(pool->bo);
+       uint64_t avail_va = va + pool->availability_offset + 4 * query;
+       va += pool->stride * query;
+
+       /* Do not need to add the pool BO to the list because the query must
+        * currently be active, which means the BO is already in the list.
+        */
+       emit_end_query(cmd_buffer, va, avail_va, pool->type);
+}
+
 void radv_CmdWriteTimestamp(
     VkCommandBuffer                             commandBuffer,
     VkPipelineStageFlagBits                     pipelineStage,
-- 
2.14.3

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to