Re: [Intel-gfx] [PATCH 07/13] drm/i915: Add per context timelines to fence object

2015-12-21 Thread Chris Wilson
On Thu, Dec 17, 2015 at 09:49:27AM -0800, Jesse Barnes wrote:
> Yeah we definitely want this, but it'll have to be reconciled with the 
> different request->fence patches.  I'm not sure if it would be easier to move 
> to per-context seqnos first or go this route and deal with the mismatch 
> between global and per-ctx.

This patch doesn't do independent per-context seqno, so the point is moot.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 07/13] drm/i915: Add per context timelines to fence object

2015-12-17 Thread Jesse Barnes
On 12/11/2015 05:11 AM, john.c.harri...@intel.com wrote:
> From: John Harrison 
> 
> The fence object used inside the request structure requires a sequence
> number. Although this is not used by the i915 driver itself, it could
> potentially be used by non-i915 code if the fence is passed outside of
> the driver. This is the intention as it allows external kernel drivers
> and user applications to wait on batch buffer completion
> asynchronously via the dma-buff fence API.
> 
> To ensure that such external users are not confused by strange things
> happening with the seqno, this patch adds in a per context timeline
> that can provide a guaranteed in-order seqno value for the fence. This
> is safe because the scheduler will not re-order batch buffers within a
> context - they are considered to be mutually dependent.
> 
> v2: New patch in series.
> 
> v3: Renamed/retyped timeline structure fields after review comments by
> Tvrtko Ursulin.
> 
> Added context information to the timeline's name string for better
> identification in debugfs output.
> 
> For: VIZ-5190
> Signed-off-by: John Harrison 
> Cc: Tvrtko Ursulin 
> ---
>  drivers/gpu/drm/i915/i915_drv.h | 25 ---
>  drivers/gpu/drm/i915/i915_gem.c | 80 
> +
>  drivers/gpu/drm/i915/i915_gem_context.c | 15 ++-
>  drivers/gpu/drm/i915/intel_lrc.c|  8 
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  1 -
>  5 files changed, 111 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index caf7897..7d6a7c0 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -841,6 +841,15 @@ struct i915_ctx_hang_stats {
>   bool banned;
>  };
>  
> +struct i915_fence_timeline {
> + charname[32];
> + unsignedfence_context;
> + unsignednext;
> +
> + struct intel_context *ctx;
> + struct intel_engine_cs *ring;
> +};
> +
>  /* This must match up with the value previously used for execbuf2.rsvd1. */
>  #define DEFAULT_CONTEXT_HANDLE 0
>  
> @@ -885,6 +894,7 @@ struct intel_context {
>   struct drm_i915_gem_object *state;
>   struct intel_ringbuffer *ringbuf;
>   int pin_count;
> + struct i915_fence_timeline fence_timeline;
>   } engine[I915_NUM_RINGS];
>  
>   struct list_head link;
> @@ -2177,13 +2187,10 @@ void i915_gem_track_fb(struct drm_i915_gem_object 
> *old,
>  struct drm_i915_gem_request {
>   /**
>* Underlying object for implementing the signal/wait stuff.
> -  * NB: Never call fence_later() or return this fence object to user
> -  * land! Due to lazy allocation, scheduler re-ordering, pre-emption,
> -  * etc., there is no guarantee at all about the validity or
> -  * sequentiality of the fence's seqno! It is also unsafe to let
> -  * anything outside of the i915 driver get hold of the fence object
> -  * as the clean up when decrementing the reference count requires
> -  * holding the driver mutex lock.
> +  * NB: Never return this fence object to user land! It is unsafe to
> +  * let anything outside of the i915 driver get hold of the fence
> +  * object as the clean up when decrementing the reference count
> +  * requires holding the driver mutex lock.
>*/
>   struct fence fence;
>  
> @@ -2263,6 +2270,10 @@ int i915_gem_request_alloc(struct intel_engine_cs 
> *ring,
>  struct drm_i915_gem_request **req_out);
>  void i915_gem_request_cancel(struct drm_i915_gem_request *req);
>  
> +int i915_create_fence_timeline(struct drm_device *dev,
> +struct intel_context *ctx,
> +struct intel_engine_cs *ring);
> +
>  static inline bool i915_gem_request_completed(struct drm_i915_gem_request 
> *req)
>  {
>   return fence_is_signaled(>fence);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 0801738..7a37fb7 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2665,9 +2665,32 @@ static const char 
> *i915_gem_request_get_driver_name(struct fence *req_fence)
>  
>  static const char *i915_gem_request_get_timeline_name(struct fence 
> *req_fence)
>  {
> - struct drm_i915_gem_request *req = container_of(req_fence,
> -  typeof(*req), fence);
> - return req->ring->name;
> + struct drm_i915_gem_request *req;
> + struct i915_fence_timeline *timeline;
> +
> + req = container_of(req_fence, typeof(*req), fence);
> + timeline = >ctx->engine[req->ring->id].fence_timeline;
> +
> + return timeline->name;
> +}
> +
> +static void i915_gem_request_timeline_value_str(struct fence *req_fence, 
> char *str, int size)
> +{
> + struct drm_i915_gem_request *req;
> 

[Intel-gfx] [PATCH 07/13] drm/i915: Add per context timelines to fence object

2015-12-11 Thread John . C . Harrison
From: John Harrison 

The fence object used inside the request structure requires a sequence
number. Although this is not used by the i915 driver itself, it could
potentially be used by non-i915 code if the fence is passed outside of
the driver. This is the intention as it allows external kernel drivers
and user applications to wait on batch buffer completion
asynchronously via the dma-buff fence API.

To ensure that such external users are not confused by strange things
happening with the seqno, this patch adds in a per context timeline
that can provide a guaranteed in-order seqno value for the fence. This
is safe because the scheduler will not re-order batch buffers within a
context - they are considered to be mutually dependent.

v2: New patch in series.

v3: Renamed/retyped timeline structure fields after review comments by
Tvrtko Ursulin.

Added context information to the timeline's name string for better
identification in debugfs output.

For: VIZ-5190
Signed-off-by: John Harrison 
Cc: Tvrtko Ursulin 
---
 drivers/gpu/drm/i915/i915_drv.h | 25 ---
 drivers/gpu/drm/i915/i915_gem.c | 80 +
 drivers/gpu/drm/i915/i915_gem_context.c | 15 ++-
 drivers/gpu/drm/i915/intel_lrc.c|  8 
 drivers/gpu/drm/i915/intel_ringbuffer.h |  1 -
 5 files changed, 111 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index caf7897..7d6a7c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -841,6 +841,15 @@ struct i915_ctx_hang_stats {
bool banned;
 };
 
+struct i915_fence_timeline {
+   charname[32];
+   unsignedfence_context;
+   unsignednext;
+
+   struct intel_context *ctx;
+   struct intel_engine_cs *ring;
+};
+
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0
 
@@ -885,6 +894,7 @@ struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
+   struct i915_fence_timeline fence_timeline;
} engine[I915_NUM_RINGS];
 
struct list_head link;
@@ -2177,13 +2187,10 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 struct drm_i915_gem_request {
/**
 * Underlying object for implementing the signal/wait stuff.
-* NB: Never call fence_later() or return this fence object to user
-* land! Due to lazy allocation, scheduler re-ordering, pre-emption,
-* etc., there is no guarantee at all about the validity or
-* sequentiality of the fence's seqno! It is also unsafe to let
-* anything outside of the i915 driver get hold of the fence object
-* as the clean up when decrementing the reference count requires
-* holding the driver mutex lock.
+* NB: Never return this fence object to user land! It is unsafe to
+* let anything outside of the i915 driver get hold of the fence
+* object as the clean up when decrementing the reference count
+* requires holding the driver mutex lock.
 */
struct fence fence;
 
@@ -2263,6 +2270,10 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
   struct drm_i915_gem_request **req_out);
 void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 
+int i915_create_fence_timeline(struct drm_device *dev,
+  struct intel_context *ctx,
+  struct intel_engine_cs *ring);
+
 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
 {
return fence_is_signaled(>fence);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0801738..7a37fb7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2665,9 +2665,32 @@ static const char 
*i915_gem_request_get_driver_name(struct fence *req_fence)
 
 static const char *i915_gem_request_get_timeline_name(struct fence *req_fence)
 {
-   struct drm_i915_gem_request *req = container_of(req_fence,
-typeof(*req), fence);
-   return req->ring->name;
+   struct drm_i915_gem_request *req;
+   struct i915_fence_timeline *timeline;
+
+   req = container_of(req_fence, typeof(*req), fence);
+   timeline = >ctx->engine[req->ring->id].fence_timeline;
+
+   return timeline->name;
+}
+
+static void i915_gem_request_timeline_value_str(struct fence *req_fence, char 
*str, int size)
+{
+   struct drm_i915_gem_request *req;
+
+   req = container_of(req_fence, typeof(*req), fence);
+
+   /* Last signalled timeline value ??? */
+   snprintf(str, size, "? [%d]"/*, timeline->value*/, 
req->ring->get_seqno(req->ring, true));
+}
+
+static