Track the latest fence waited upon on each context, and only add a new
asynchronous wait if the new fence is more recent than the recorded
fence for that context. This requires us to filter out unordered
timelines, which are noted by DMA_FENCE_NO_CONTEXT. However, in the
absence of a universal identifier, we have to use our own
i915->mm.unordered_timeline token.

v2: Throw around the debug crutches
v3: Inline the likely case of the pre-allocation cache being full.
v4: Drop the pre-allocation support, we can lose the most recent fence
in case of allocation failure -- it just means we may emit more awaits
than strictly necessary but will not break.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_request.c            |  66 ++++---
 drivers/gpu/drm/i915/i915_gem_timeline.c           | 194 +++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_timeline.h           |  18 ++
 drivers/gpu/drm/i915/selftests/i915_gem_timeline.c | 123 +++++++++++++
 .../gpu/drm/i915/selftests/i915_mock_selftests.h   |   1 +
 5 files changed, 375 insertions(+), 27 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_timeline.c

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
index 31874a38752e..f679c16aa5c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -714,9 +714,7 @@ int
 i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
                                 struct dma_fence *fence)
 {
-       struct dma_fence_array *array;
        int ret;
-       int i;
 
        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                return 0;
@@ -728,39 +726,53 @@ i915_gem_request_await_dma_fence(struct 
drm_i915_gem_request *req,
        if (fence->context == req->fence.context)
                return 0;
 
-       if (dma_fence_is_i915(fence))
-               return i915_gem_request_await_request(req, to_request(fence));
+       /* Squash repeated waits to the same timelines, picking the latest */
+       if (fence->context != req->i915->mm.unordered_timeline &&
+           intel_timeline_sync_get(req->timeline,
+                                   fence->context, fence->seqno))
+               return 0;
 
-       if (!dma_fence_is_array(fence)) {
+       if (dma_fence_is_i915(fence)) {
+               ret = i915_gem_request_await_request(req, to_request(fence));
+               if (ret < 0)
+                       return ret;
+       } else if (!dma_fence_is_array(fence)) {
                ret = i915_sw_fence_await_dma_fence(&req->submit,
                                                    fence, I915_FENCE_TIMEOUT,
                                                    GFP_KERNEL);
-               return ret < 0 ? ret : 0;
-       }
-
-       /* Note that if the fence-array was created in signal-on-any mode,
-        * we should *not* decompose it into its individual fences. However,
-        * we don't currently store which mode the fence-array is operating
-        * in. Fortunately, the only user of signal-on-any is private to
-        * amdgpu and we should not see any incoming fence-array from
-        * sync-file being in signal-on-any mode.
-        */
-
-       array = to_dma_fence_array(fence);
-       for (i = 0; i < array->num_fences; i++) {
-               struct dma_fence *child = array->fences[i];
-
-               if (dma_fence_is_i915(child))
-                       ret = i915_gem_request_await_request(req,
-                                                            to_request(child));
-               else
-                       ret = i915_sw_fence_await_dma_fence(&req->submit,
-                                                           child, 
I915_FENCE_TIMEOUT,
-                                                           GFP_KERNEL);
                if (ret < 0)
                        return ret;
+       } else {
+               struct dma_fence_array *array = to_dma_fence_array(fence);
+               int i;
+
+               /* Note that if the fence-array was created in signal-on-any 
mode,
+                * we should *not* decompose it into its individual fences. 
However,
+                * we don't currently store which mode the fence-array is 
operating
+                * in. Fortunately, the only user of signal-on-any is private to
+                * amdgpu and we should not see any incoming fence-array from
+                * sync-file being in signal-on-any mode.
+                */
+
+               for (i = 0; i < array->num_fences; i++) {
+                       struct dma_fence *child = array->fences[i];
+
+                       if (dma_fence_is_i915(child))
+                               ret = i915_gem_request_await_request(req,
+                                                                    
to_request(child));
+                       else
+                               ret = 
i915_sw_fence_await_dma_fence(&req->submit,
+                                                                   child, 
I915_FENCE_TIMEOUT,
+                                                                   GFP_KERNEL);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
+       if (fence->context != req->i915->mm.unordered_timeline)
+               intel_timeline_sync_set(req->timeline,
+                                       fence->context, fence->seqno);
+
        return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c 
b/drivers/gpu/drm/i915/i915_gem_timeline.c
index b596ca7ee058..446eefa70e6d 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -24,6 +24,194 @@
 
 #include "i915_drv.h"
 
+#define SHIFT ilog2(NSEQMAP)
+#define MASK (NSEQMAP - 1)
+
+static void __seqmap_free(struct seqmap *p)
+{
+       unsigned int i;
+
+       if (p->height) {
+               for (; (i = ffs(p->bitmap)); p->bitmap &= ~0u << i)
+                       __seqmap_free(p->slot[i - 1]);
+       }
+
+       kfree(p);
+}
+
+static void seqmap_free(struct seqmap *seqmap)
+{
+       if (!seqmap)
+               return;
+
+       while (seqmap->parent)
+               seqmap = seqmap->parent;
+
+       __seqmap_free(seqmap);
+}
+
+static int layer_idx(const struct seqmap *p, u64 id)
+{
+       return (id >> p->height) & MASK;
+}
+
+bool intel_timeline_sync_get(struct intel_timeline *tl, u64 id, u32 seqno)
+{
+       struct seqmap *p;
+       unsigned int idx;
+
+       p = tl->sync;
+       if (!p)
+               return false;
+
+       if ((id >> SHIFT) == p->prefix)
+               goto found;
+
+       /* First climb the tree back to a parent branch */
+       do {
+               p = p->parent;
+               if (!p)
+                       return false;
+
+               if ((id >> p->height >> SHIFT) == p->prefix)
+                       break;
+       } while (1);
+
+       /* And then descend again until we find our leaf */
+       do {
+               if (!p->height)
+                       break;
+
+               p = p->slot[layer_idx(p, id)];
+               if (!p)
+                       return false;
+
+               if ((id >> p->height >> SHIFT) != p->prefix)
+                       return false;
+       } while (1);
+
+       tl->sync = p;
+found:
+       idx = id & MASK;
+       if (!(p->bitmap & BIT(idx)))
+               return false;
+
+       return i915_seqno_passed((uintptr_t)p->slot[idx], seqno);
+}
+
+int intel_timeline_sync_set(struct intel_timeline *tl, u64 id, u32 seqno)
+{
+       struct seqmap *p;
+       unsigned int idx;
+
+       /* We expect to be called in sequence following a  _get(id), which
+        * should have preloaded the tl->sync hint for us.
+        */
+       p = tl->sync;
+       if (likely(p && (id >> SHIFT) == p->prefix))
+               goto set;
+
+       if (!p) {
+               p = kzalloc(sizeof(*p), GFP_KERNEL);
+               if (unlikely(!p))
+                       return -ENOMEM;
+
+               p->prefix = id >> SHIFT;
+               goto found;
+       }
+
+       /* Climb back up the tree until we find a common prefix */
+       do {
+               if (!p->parent)
+                       break;
+
+               p = p->parent;
+
+               if ((id >> p->height >> SHIFT) == p->prefix)
+                       break;
+       } while (1);
+
+       /* No shortcut, we have to descend the tree to find the right layer
+        * containing this fence.
+        *
+        * Each layer in the tree holds 16 (NSEQMAP) pointers, either fences
+        * or lower layers. Leaf nodes (height = 0) contain the fences, all
+        * other nodes (height > 0) are internal layers that point to a lower
+        * node. Each internal layer has at least 2 descendents.
+        *
+        * Starting at the top, we check whether the current prefix matches. If
+        * it doesn't, we have gone passed our layer and need to insert a join
+        * into the tree, and a new leaf node as a descendent as well as the
+        * original layer.
+        *
+        * The matching prefix means we are still following the right branch
+        * of the tree. If it has height 0, we have found our leaf and just
+        * need to replace the fence slot with ourselves. If the height is
+        * not zero, our slot contains the next layer in the tree (unless
+        * it is empty, in which case we can add ourselves as a new leaf).
+        * As descend the tree the prefix grows (and height decreases).
+        */
+       do {
+               struct seqmap *next;
+
+               if ((id >> p->height >> SHIFT) != p->prefix) {
+                       /* insert a join above the current layer */
+                       next = kzalloc(sizeof(*next), GFP_KERNEL);
+                       if (unlikely(!next))
+                               return -ENOMEM;
+
+                       next->height = ALIGN(fls64((id >> p->height >> SHIFT) ^ 
p->prefix),
+                                           SHIFT) + p->height;
+                       next->prefix = id >> next->height >> SHIFT;
+
+                       if (p->parent)
+                               p->parent->slot[layer_idx(p->parent, id)] = 
next;
+                       next->parent = p->parent;
+
+                       idx = p->prefix >> (next->height - p->height - SHIFT) & 
MASK;
+                       next->slot[idx] = p;
+                       next->bitmap |= BIT(idx);
+                       p->parent = next;
+
+                       /* ascend to the join */
+                       p = next;
+               } else {
+                       if (!p->height)
+                               break;
+               }
+
+               /* descend into the next layer */
+               GEM_BUG_ON(!p->height);
+               idx = layer_idx(p, id);
+               next = p->slot[idx];
+               if (unlikely(!next)) {
+                       next = kzalloc(sizeof(*next), GFP_KERNEL);
+                       if (unlikely(!next))
+                               return -ENOMEM;
+
+                       p->slot[idx] = next;
+                       p->bitmap |= BIT(idx);
+                       next->parent = p;
+                       next->prefix = id >> SHIFT;
+
+                       p = next;
+                       break;
+               }
+
+               p = next;
+       } while (1);
+
+found:
+       GEM_BUG_ON(p->height);
+       GEM_BUG_ON(p->prefix != id >> SHIFT);
+       tl->sync = p;
+set:
+       idx = id & MASK;
+       p->slot[idx] = (void *)(uintptr_t)seqno;
+       p->bitmap |= BIT(idx);
+       return 0;
+}
+
 static int __i915_gem_timeline_init(struct drm_i915_private *i915,
                                    struct i915_gem_timeline *timeline,
                                    const char *name,
@@ -91,8 +279,14 @@ void i915_gem_timeline_fini(struct i915_gem_timeline 
*timeline)
                struct intel_timeline *tl = &timeline->engine[i];
 
                GEM_BUG_ON(!list_empty(&tl->requests));
+
+               seqmap_free(tl->sync);
        }
 
        list_del(&timeline->link);
        kfree(timeline->name);
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_timeline.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h 
b/drivers/gpu/drm/i915/i915_gem_timeline.h
index 6c53e14cab2a..64bad92320de 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -26,11 +26,23 @@
 #define I915_GEM_TIMELINE_H
 
 #include <linux/list.h>
+#include <linux/radix-tree.h>
 
+#include "i915_utils.h"
 #include "i915_gem_request.h"
 
 struct i915_gem_timeline;
 
+#define NSEQMAP 16
+
+struct seqmap {
+       u64 prefix;
+       unsigned int height;
+       unsigned int bitmap;
+       struct seqmap *parent;
+       void *slot[NSEQMAP];
+};
+
 struct intel_timeline {
        u64 fence_context;
        u32 seqno;
@@ -55,6 +67,8 @@ struct intel_timeline {
         * struct_mutex.
         */
        struct i915_gem_active last_request;
+
+       struct seqmap *sync;
        u32 sync_seqno[I915_NUM_ENGINES];
 
        struct i915_gem_timeline *common;
@@ -75,4 +89,8 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
 int i915_gem_timeline_init__global(struct drm_i915_private *i915);
 void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
 
+bool intel_timeline_sync_get(struct intel_timeline *tl, u64 id, u32 seqno);
+
+int intel_timeline_sync_set(struct intel_timeline *tl, u64 id, u32 seqno);
+
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
new file mode 100644
index 000000000000..c0bb8ecac93b
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "mock_gem_device.h"
+
+static int igt_seqmap(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       const struct {
+               const char *name;
+               u32 seqno;
+               bool expected;
+               bool set;
+       } pass[] = {
+               { "unset", 0, false, false },
+               { "new", 0, false, true },
+               { "0a", 0, true, true },
+               { "1a", 1, false, true },
+               { "1b", 1, true, true },
+               { "0b", 0, true, false },
+               { "2a", 2, false, true },
+               { "4", 4, false, true },
+               { "INT_MAX", INT_MAX, false, true },
+               { "INT_MAX-1", INT_MAX-1, true, false },
+               { "INT_MAX+1", (u32)INT_MAX+1, false, true },
+               { "INT_MAX", INT_MAX, true, false },
+               { "UINT_MAX", UINT_MAX, false, true },
+               { "wrap", 0, false, true },
+               { "unwrap", UINT_MAX, true, false },
+               {},
+       }, *p;
+       struct intel_timeline *tl;
+       int order, offset;
+       int ret;
+
+       tl = &i915->gt.global_timeline.engine[RCS];
+       for (p = pass; p->name; p++) {
+               for (order = 1; order < 64; order++) {
+                       for (offset = -1; offset <= (order > 1); offset++) {
+                               u64 ctx = BIT_ULL(order) + offset;
+
+                               if (intel_timeline_sync_get(tl,
+                                                           ctx,
+                                                           p->seqno) != 
p->expected) {
+                                       pr_err("1: %s(ctx=%llu, seqno=%u) 
expected passed %s but failed\n",
+                                              p->name, ctx, p->seqno, 
yesno(p->expected));
+                                       return -EINVAL;
+                               }
+
+                               if (p->set) {
+                                       ret = intel_timeline_sync_set(tl, ctx, 
p->seqno);
+                                       if (ret)
+                                               return ret;
+                               }
+                       }
+               }
+       }
+
+       tl = &i915->gt.global_timeline.engine[BCS];
+       for (order = 1; order < 64; order++) {
+               for (offset = -1; offset <= (order > 1); offset++) {
+                       u64 ctx = BIT_ULL(order) + offset;
+
+                       for (p = pass; p->name; p++) {
+                               if (intel_timeline_sync_get(tl,
+                                                           ctx,
+                                                           p->seqno) != 
p->expected) {
+                                       pr_err("2: %s(ctx=%llu, seqno=%u) 
expected passed %s but failed\n",
+                                              p->name, ctx, p->seqno, 
yesno(p->expected));
+                                       return -EINVAL;
+                               }
+
+                               if (p->set) {
+                                       ret = intel_timeline_sync_set(tl, ctx, 
p->seqno);
+                                       if (ret)
+                                               return ret;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int i915_gem_timeline_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_seqmap),
+       };
+       struct drm_i915_private *i915;
+       int err;
+
+       i915 = mock_gem_device();
+       if (!i915)
+               return -ENOMEM;
+
+       err = i915_subtests(tests, i915);
+       drm_dev_unref(&i915->drm);
+
+       return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h 
b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index be9a9ebf5692..8d0f50c25df8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -12,6 +12,7 @@ selftest(sanitycheck, i915_mock_sanitycheck) /* keep first 
(igt selfcheck) */
 selftest(scatterlist, scatterlist_mock_selftests)
 selftest(uncore, intel_uncore_mock_selftests)
 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
+selftest(timelines, i915_gem_timeline_mock_selftests)
 selftest(requests, i915_gem_request_mock_selftests)
 selftest(objects, i915_gem_object_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to