[Intel-gfx] [PATCH 18/22] drm/i915: Export a preallocate variant of i915_active_acquire()

2020-05-03 Thread Chris Wilson
Sometimes we have to be very careful not to allocate underneath a mutex
(or spinlock) and yet still want to track activity. Enter
i915_active_acquire_for_context(). This raises the activity counter on
i915_active prior to use and ensures that the fence-tree contains a slot
for the context.

Signed-off-by: Chris Wilson 
---
 drivers/gpu/drm/i915/i915_active.c | 107 ++---
 drivers/gpu/drm/i915/i915_active.h |   5 ++
 2 files changed, 103 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c 
b/drivers/gpu/drm/i915/i915_active.c
index d960d0be5bd2..71ad0d452680 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -217,11 +217,10 @@ excl_retire(struct dma_fence *fence, struct dma_fence_cb 
*cb)
 }
 
 static struct i915_active_fence *
-active_instance(struct i915_active *ref, struct intel_timeline *tl)
+active_instance(struct i915_active *ref, u64 idx)
 {
struct active_node *node, *prealloc;
struct rb_node **p, *parent;
-   u64 idx = tl->fence_context;
 
/*
 * We track the most recently used timeline to skip a rbtree search
@@ -367,7 +366,7 @@ int i915_active_ref(struct i915_active *ref,
if (err)
return err;
 
-   active = active_instance(ref, tl);
+   active = active_instance(ref, tl->fence_context);
if (!active) {
err = -ENOMEM;
goto out;
@@ -384,32 +383,104 @@ int i915_active_ref(struct i915_active *ref,
atomic_dec(&ref->count);
}
if (!__i915_active_fence_set(active, fence))
-   atomic_inc(&ref->count);
+   __i915_active_acquire(ref);
 
 out:
i915_active_release(ref);
return err;
 }
 
-struct dma_fence *
-i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+static struct dma_fence *
+__i915_active_set_fence(struct i915_active *ref,
+   struct i915_active_fence *active,
+   struct dma_fence *fence)
 {
struct dma_fence *prev;
 
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
 
+   if (is_barrier(active)) { /* proto-node used by our idle barrier */
+   /*
+* This request is on the kernel_context timeline, and so
+* we can use it to substitute for the pending idle-barrer
+* request that we want to emit on the kernel_context.
+*/
+   __active_del_barrier(ref, node_from_active(active));
+   RCU_INIT_POINTER(active->fence, NULL);
+   atomic_dec(&ref->count);
+   }
+
rcu_read_lock();
-   prev = __i915_active_fence_set(&ref->excl, f);
+   prev = __i915_active_fence_set(active, fence);
if (prev)
prev = dma_fence_get_rcu(prev);
else
-   atomic_inc(&ref->count);
+   __i915_active_acquire(ref);
rcu_read_unlock();
 
return prev;
 }
 
+static struct i915_active_fence *
+__active_lookup(struct i915_active *ref, u64 idx)
+{
+   struct active_node *node;
+   struct rb_node *p;
+
+   /* Like active_instance() but with no malloc */
+
+   node = READ_ONCE(ref->cache);
+   if (node && node->timeline == idx)
+   return &node->base;
+
+   spin_lock_irq(&ref->tree_lock);
+   GEM_BUG_ON(i915_active_is_idle(ref));
+
+   p = ref->tree.rb_node;
+   while (p) {
+   node = rb_entry(p, struct active_node, node);
+   if (node->timeline == idx) {
+   ref->cache = node;
+   spin_unlock_irq(&ref->tree_lock);
+   return &node->base;
+   }
+
+   if (node->timeline < idx)
+   p = p->rb_right;
+   else
+   p = p->rb_left;
+   }
+
+   spin_unlock_irq(&ref->tree_lock);
+
+   return NULL;
+}
+
+struct dma_fence *
+__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
+{
+   struct dma_fence *prev = ERR_PTR(-ENOENT);
+   struct i915_active_fence *active;
+
+   if (!i915_active_acquire_if_busy(ref))
+   return ERR_PTR(-EINVAL);
+
+   active = __active_lookup(ref, idx);
+   if (active)
+   prev = __i915_active_set_fence(ref, active, fence);
+
+   i915_active_release(ref);
+   return prev;
+}
+
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+   /* We expect the caller to manage the exclusive timeline ordering */
+   return __i915_active_set_fence(ref, &ref->excl, f);
+}
+
 bool i915_active_acquire_if_busy(struct i915_active *ref)
 {
debug_active_assert(ref);
@@ -443,6 +514,24 @@ int i915_active_acquire(struct i915_active *ref)
return err;
 }
 
+int i915_active_acquire_for_context(struct

[Intel-gfx] [PATCH 18/22] drm/i915: Export a preallocate variant of i915_active_acquire()

2020-05-20 Thread Chris Wilson
Sometimes we have to be very careful not to allocate underneath a mutex
(or spinlock) and yet still want to track activity. Enter
i915_active_acquire_for_context(). This raises the activity counter on
i915_active prior to use and ensures that the fence-tree contains a slot
for the context.

Signed-off-by: Chris Wilson 
---
 drivers/gpu/drm/i915/i915_active.c | 107 ++---
 drivers/gpu/drm/i915/i915_active.h |   5 ++
 2 files changed, 103 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c 
b/drivers/gpu/drm/i915/i915_active.c
index d960d0be5bd2..71ad0d452680 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -217,11 +217,10 @@ excl_retire(struct dma_fence *fence, struct dma_fence_cb 
*cb)
 }
 
 static struct i915_active_fence *
-active_instance(struct i915_active *ref, struct intel_timeline *tl)
+active_instance(struct i915_active *ref, u64 idx)
 {
struct active_node *node, *prealloc;
struct rb_node **p, *parent;
-   u64 idx = tl->fence_context;
 
/*
 * We track the most recently used timeline to skip a rbtree search
@@ -367,7 +366,7 @@ int i915_active_ref(struct i915_active *ref,
if (err)
return err;
 
-   active = active_instance(ref, tl);
+   active = active_instance(ref, tl->fence_context);
if (!active) {
err = -ENOMEM;
goto out;
@@ -384,32 +383,104 @@ int i915_active_ref(struct i915_active *ref,
atomic_dec(&ref->count);
}
if (!__i915_active_fence_set(active, fence))
-   atomic_inc(&ref->count);
+   __i915_active_acquire(ref);
 
 out:
i915_active_release(ref);
return err;
 }
 
-struct dma_fence *
-i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+static struct dma_fence *
+__i915_active_set_fence(struct i915_active *ref,
+   struct i915_active_fence *active,
+   struct dma_fence *fence)
 {
struct dma_fence *prev;
 
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
 
+   if (is_barrier(active)) { /* proto-node used by our idle barrier */
+   /*
+* This request is on the kernel_context timeline, and so
+* we can use it to substitute for the pending idle-barrer
+* request that we want to emit on the kernel_context.
+*/
+   __active_del_barrier(ref, node_from_active(active));
+   RCU_INIT_POINTER(active->fence, NULL);
+   atomic_dec(&ref->count);
+   }
+
rcu_read_lock();
-   prev = __i915_active_fence_set(&ref->excl, f);
+   prev = __i915_active_fence_set(active, fence);
if (prev)
prev = dma_fence_get_rcu(prev);
else
-   atomic_inc(&ref->count);
+   __i915_active_acquire(ref);
rcu_read_unlock();
 
return prev;
 }
 
+static struct i915_active_fence *
+__active_lookup(struct i915_active *ref, u64 idx)
+{
+   struct active_node *node;
+   struct rb_node *p;
+
+   /* Like active_instance() but with no malloc */
+
+   node = READ_ONCE(ref->cache);
+   if (node && node->timeline == idx)
+   return &node->base;
+
+   spin_lock_irq(&ref->tree_lock);
+   GEM_BUG_ON(i915_active_is_idle(ref));
+
+   p = ref->tree.rb_node;
+   while (p) {
+   node = rb_entry(p, struct active_node, node);
+   if (node->timeline == idx) {
+   ref->cache = node;
+   spin_unlock_irq(&ref->tree_lock);
+   return &node->base;
+   }
+
+   if (node->timeline < idx)
+   p = p->rb_right;
+   else
+   p = p->rb_left;
+   }
+
+   spin_unlock_irq(&ref->tree_lock);
+
+   return NULL;
+}
+
+struct dma_fence *
+__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
+{
+   struct dma_fence *prev = ERR_PTR(-ENOENT);
+   struct i915_active_fence *active;
+
+   if (!i915_active_acquire_if_busy(ref))
+   return ERR_PTR(-EINVAL);
+
+   active = __active_lookup(ref, idx);
+   if (active)
+   prev = __i915_active_set_fence(ref, active, fence);
+
+   i915_active_release(ref);
+   return prev;
+}
+
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+   /* We expect the caller to manage the exclusive timeline ordering */
+   return __i915_active_set_fence(ref, &ref->excl, f);
+}
+
 bool i915_active_acquire_if_busy(struct i915_active *ref)
 {
debug_active_assert(ref);
@@ -443,6 +514,24 @@ int i915_active_acquire(struct i915_active *ref)
return err;
 }
 
+int i915_active_acquire_for_context(struct