Re: [Intel-gfx] [PATCH v10 3/9] drm/i915: vgpu pv command buffer support

2019-09-17 Thread Chris Wilson
Quoting Xiaolin Zhang (2019-09-17 06:48:14)
> based on the shared memory setup between guest and GVT, the simple
> pv command buffer ring was introduced by this patch used to perform
> guest-2-gvt single direction communication.
> 
> v1: initial support, added to address i915 PV v6 patch set comment.
> 
> Signed-off-by: Xiaolin Zhang 
> ---
>  drivers/gpu/drm/i915/i915_pvinfo.h |   1 +
>  drivers/gpu/drm/i915/i915_vgpu.c   | 193 
> +
>  drivers/gpu/drm/i915/i915_vgpu.h   |  66 +
>  3 files changed, 260 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h 
> b/drivers/gpu/drm/i915/i915_pvinfo.h
> index 3c63603..db9eebb 100644
> --- a/drivers/gpu/drm/i915/i915_pvinfo.h
> +++ b/drivers/gpu/drm/i915/i915_pvinfo.h
> @@ -49,6 +49,7 @@ enum vgt_g2v_type {
> VGT_G2V_EXECLIST_CONTEXT_CREATE,
> VGT_G2V_EXECLIST_CONTEXT_DESTROY,
> VGT_G2V_SHARED_PAGE_SETUP,
> +   VGT_G2V_PV_SEND_TRIGGER,
> VGT_G2V_MAX,
>  };
>  
> diff --git a/drivers/gpu/drm/i915/i915_vgpu.c 
> b/drivers/gpu/drm/i915/i915_vgpu.c
> index 68fecfd..6e29a52 100644
> --- a/drivers/gpu/drm/i915/i915_vgpu.c
> +++ b/drivers/gpu/drm/i915/i915_vgpu.c
> @@ -322,6 +322,187 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
>   * i915 vgpu PV support for Linux
>   */
>  
> +/**
> + * wait_for_desc_update - Wait for the command buffer descriptor update.
> + * @desc:  buffer descriptor
> + * @fence: response fence
> + * @status:placeholder for status
> + *
> + * GVT will update command buffer descriptor with new fence and status
> + * after processing the command identified by the fence. Wait for
> + * specified fence and then read from the descriptor status of the
> + * command.
> + *
> + * Return:
> + * *   0 response received (status is valid)
> + * *   -ETIMEDOUT no response within hardcoded timeout
> + * *   -EPROTO no response, CT buffer is in error
> + */
> +static int wait_for_desc_update(struct vgpu_pv_ct_buffer_desc *desc,
> +   u32 fence, u32 *status)
> +{
> +   int err;
> +
> +#define done (READ_ONCE(desc->fence) == fence)
> +   err = wait_for_us(done, 5);
> +   if (err)
> +   err = wait_for(done, 10);
> +#undef done
> +
> +   if (unlikely(err)) {
> +   DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
> +   fence, desc->fence);
> +   }
> +
> +   *status = desc->status;
> +   return err;
> +}
> +
> +/**
> + * DOC: CTB Guest to GVT request
> + *
> + * Format of the CTB Guest to GVT request message is as follows::
> + *
> + *  ++-+-+-+-+
> + *  |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
> + *  ++-+-+-+-+
> + *  |   MESSAGE  |   MESSAGE PAYLOAD |
> + *  +   HEADER   +-+-+-+-+
> + *  ||0|1|   ...   |n|
> + *  ++=+=+=+=+
> + *  |  len >= 1  |  FENCE  | request specific data   |
> + *  +--+-+-+-+-+-+
> + *
> + *   ^-len---^
> + */
> +static int pv_command_buffer_write(struct i915_virtual_gpu_pv *pv,
> +   const u32 *action, u32 len /* in dwords */, u32 fence)
> +{
> +   struct vgpu_pv_ct_buffer_desc *desc = pv->ctb.desc;
> +   u32 head = desc->head / 4;  /* in dwords */
> +   u32 tail = desc->tail / 4;  /* in dwords */
> +   u32 size = desc->size / 4;  /* in dwords */
> +   u32 used;   /* in dwords */
> +   u32 header;
> +   u32 *cmds = pv->ctb.cmds;
> +   unsigned int i;
> +
> +   GEM_BUG_ON(desc->size % 4);
> +   GEM_BUG_ON(desc->head % 4);
> +   GEM_BUG_ON(desc->tail % 4);
> +   GEM_BUG_ON(tail >= size);
> +
> +   /*
> +* tail == head condition indicates empty.
> +*/
> +   if (tail < head)
> +   used = (size - head) + tail;
> +   else
> +   used = tail - head;
> +
> +   /* make sure there is a space including extra dw for the fence */
> +   if (unlikely(used + len + 1 >= size))
> +   return -ENOSPC;
> +
> +   /*
> +* Write the message. The format is the following:
> +* DW0: header (including action code)
> +* DW1: fence
> +* DW2+: action data
> +*/
> +   header = (len << PV_CT_MSG_LEN_SHIFT) |
> +(PV_CT_MSG_WRITE_FENCE_TO_DESC) |
> +(action[0] << PV_CT_MSG_ACTION_SHIFT);
> +
> +   cmds[tail] = header;
> +   tail = (tail + 1) % size;
> +
> +   cmds[tail] = fence;
> +   tail = (tail + 1) % size;
> +
> +   for (i = 1; i < len; i++) {
> +   cmds[tail] = action[i];
> +   tail = (tail + 1) % size;
> +   }
> +
> +   /*

[Intel-gfx] [PATCH v10 3/9] drm/i915: vgpu pv command buffer support

2019-09-16 Thread Xiaolin Zhang
based on the shared memory setup between guest and GVT, the simple
pv command buffer ring was introduced by this patch used to perform
guest-2-gvt single direction communication.

v1: initial support, added to address i915 PV v6 patch set comment.

Signed-off-by: Xiaolin Zhang 
---
 drivers/gpu/drm/i915/i915_pvinfo.h |   1 +
 drivers/gpu/drm/i915/i915_vgpu.c   | 193 +
 drivers/gpu/drm/i915/i915_vgpu.h   |  66 +
 3 files changed, 260 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h 
b/drivers/gpu/drm/i915/i915_pvinfo.h
index 3c63603..db9eebb 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -49,6 +49,7 @@ enum vgt_g2v_type {
VGT_G2V_EXECLIST_CONTEXT_CREATE,
VGT_G2V_EXECLIST_CONTEXT_DESTROY,
VGT_G2V_SHARED_PAGE_SETUP,
+   VGT_G2V_PV_SEND_TRIGGER,
VGT_G2V_MAX,
 };
 
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 68fecfd..6e29a52 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -322,6 +322,187 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
  * i915 vgpu PV support for Linux
  */
 
+/**
+ * wait_for_desc_update - Wait for the command buffer descriptor update.
+ * @desc:  buffer descriptor
+ * @fence: response fence
+ * @status:placeholder for status
+ *
+ * GVT will update command buffer descriptor with new fence and status
+ * after processing the command identified by the fence. Wait for
+ * specified fence and then read from the descriptor status of the
+ * command.
+ *
+ * Return:
+ * *   0 response received (status is valid)
+ * *   -ETIMEDOUT no response within hardcoded timeout
+ * *   -EPROTO no response, CT buffer is in error
+ */
+static int wait_for_desc_update(struct vgpu_pv_ct_buffer_desc *desc,
+   u32 fence, u32 *status)
+{
+   int err;
+
+#define done (READ_ONCE(desc->fence) == fence)
+   err = wait_for_us(done, 5);
+   if (err)
+   err = wait_for(done, 10);
+#undef done
+
+   if (unlikely(err)) {
+   DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
+   fence, desc->fence);
+   }
+
+   *status = desc->status;
+   return err;
+}
+
+/**
+ * DOC: CTB Guest to GVT request
+ *
+ * Format of the CTB Guest to GVT request message is as follows::
+ *
+ *  ++-+-+-+-+
+ *  |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
+ *  ++-+-+-+-+
+ *  |   MESSAGE  |   MESSAGE PAYLOAD |
+ *  +   HEADER   +-+-+-+-+
+ *  ||0|1|   ...   |n|
+ *  ++=+=+=+=+
+ *  |  len >= 1  |  FENCE  | request specific data   |
+ *  +--+-+-+-+-+-+
+ *
+ *   ^-len---^
+ */
+static int pv_command_buffer_write(struct i915_virtual_gpu_pv *pv,
+   const u32 *action, u32 len /* in dwords */, u32 fence)
+{
+   struct vgpu_pv_ct_buffer_desc *desc = pv->ctb.desc;
+   u32 head = desc->head / 4;  /* in dwords */
+   u32 tail = desc->tail / 4;  /* in dwords */
+   u32 size = desc->size / 4;  /* in dwords */
+   u32 used;   /* in dwords */
+   u32 header;
+   u32 *cmds = pv->ctb.cmds;
+   unsigned int i;
+
+   GEM_BUG_ON(desc->size % 4);
+   GEM_BUG_ON(desc->head % 4);
+   GEM_BUG_ON(desc->tail % 4);
+   GEM_BUG_ON(tail >= size);
+
+   /*
+* tail == head condition indicates empty.
+*/
+   if (tail < head)
+   used = (size - head) + tail;
+   else
+   used = tail - head;
+
+   /* make sure there is a space including extra dw for the fence */
+   if (unlikely(used + len + 1 >= size))
+   return -ENOSPC;
+
+   /*
+* Write the message. The format is the following:
+* DW0: header (including action code)
+* DW1: fence
+* DW2+: action data
+*/
+   header = (len << PV_CT_MSG_LEN_SHIFT) |
+(PV_CT_MSG_WRITE_FENCE_TO_DESC) |
+(action[0] << PV_CT_MSG_ACTION_SHIFT);
+
+   cmds[tail] = header;
+   tail = (tail + 1) % size;
+
+   cmds[tail] = fence;
+   tail = (tail + 1) % size;
+
+   for (i = 1; i < len; i++) {
+   cmds[tail] = action[i];
+   tail = (tail + 1) % size;
+   }
+
+   /* now update desc tail (back in bytes) */
+   desc->tail = tail * 4;
+   GEM_BUG_ON(desc->tail > desc->size);
+
+   return 0;
+}
+
+static u32 pv_get_next_fence(struct i915_virtual_gpu_pv *pv)
+{
+   /* For now it's trivial */
+   return ++pv->next_fence;
+}
+
+static int pv_send(struct drm_i915_private *d