Re: [PATCH v2 RESEND] drm/i915/gvt: make gvt oblivious of kvmgt data structures

2020-02-19 Thread Julian Stecklina
On Tue, 2020-02-18 at 16:50 +0800, Zhenyu Wang wrote:
> Looks this needs some backmerge first to apply, I'll include this
> for -next pull later.

I usually base these patches on gvt-staging. If there is some other branch to
rebase them onto to make your life easier, just point me to it.

> Thanks for resend.

No problem!

Julian

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v2 RESEND] drm/i915/gvt: make gvt oblivious of kvmgt data structures

2020-02-18 Thread Zhenyu Wang
On 2020.02.17 17:38:58 +0100, Julian Stecklina wrote:
> Instead of defining KVMGT per-device state in struct intel_vgpu
> directly, add an indirection. This makes the GVT code oblivious of
> what state KVMGT needs to keep.
> 
> The intention here is to eventually make it possible to build
> hypervisor backends for the mediator, without having to touch the
> mediator itself. This is a first step.
> 
> v2:
> - rebased onto gvt-staging (no conflicts)
>

Looks this needs some backmerge first to apply, I'll include this
for -next pull later.

Thanks for resend.

> Signed-off-by: Julian Stecklina 
> Acked-by: Zhenyu Wang 
> ---
>  drivers/gpu/drm/i915/gvt/gvt.h   |  32 +---
>  drivers/gpu/drm/i915/gvt/kvmgt.c | 287 +++
>  2 files changed, 184 insertions(+), 135 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index 9fe9decc0d86..8cf292a8d6bd 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -196,31 +196,8 @@ struct intel_vgpu {
>  
>   struct dentry *debugfs;
>  
> -#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
> - struct {
> - struct mdev_device *mdev;
> - struct vfio_region *region;
> - int num_regions;
> - struct eventfd_ctx *intx_trigger;
> - struct eventfd_ctx *msi_trigger;
> -
> - /*
> -  * Two caches are used to avoid mapping duplicated pages (eg.
> -  * scratch pages). This help to reduce dma setup overhead.
> -  */
> - struct rb_root gfn_cache;
> - struct rb_root dma_addr_cache;
> - unsigned long nr_cache_entries;
> - struct mutex cache_lock;
> -
> - struct notifier_block iommu_notifier;
> - struct notifier_block group_notifier;
> - struct kvm *kvm;
> - struct work_struct release_work;
> - atomic_t released;
> - struct vfio_device *vfio_device;
> - } vdev;
> -#endif
> + /* Hypervisor-specific device state. */
> + void *vdev;
>  
>   struct list_head dmabuf_obj_list_head;
>   struct mutex dmabuf_lock;
> @@ -229,6 +206,11 @@ struct intel_vgpu {
>   u32 scan_nonprivbb;
>  };
>  
> +static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
> +{
> + return vgpu->vdev;
> +}
> +
>  /* validating GM healthy status*/
>  #define vgpu_is_vm_unhealthy(ret_val) \
>   (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
> diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c 
> b/drivers/gpu/drm/i915/gvt/kvmgt.c
> index 85e59c502ab5..9a435bc1a2f0 100644
> --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
> +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
> @@ -108,6 +108,36 @@ struct gvt_dma {
>   struct kref ref;
>  };
>  
> +struct kvmgt_vdev {
> + struct intel_vgpu *vgpu;
> + struct mdev_device *mdev;
> + struct vfio_region *region;
> + int num_regions;
> + struct eventfd_ctx *intx_trigger;
> + struct eventfd_ctx *msi_trigger;
> +
> + /*
> +  * Two caches are used to avoid mapping duplicated pages (eg.
> +  * scratch pages). This help to reduce dma setup overhead.
> +  */
> + struct rb_root gfn_cache;
> + struct rb_root dma_addr_cache;
> + unsigned long nr_cache_entries;
> + struct mutex cache_lock;
> +
> + struct notifier_block iommu_notifier;
> + struct notifier_block group_notifier;
> + struct kvm *kvm;
> + struct work_struct release_work;
> + atomic_t released;
> + struct vfio_device *vfio_device;
> +};
> +
> +static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
> +{
> + return intel_vgpu_vdev(vgpu);
> +}
> +
>  static inline bool handle_valid(unsigned long handle)
>  {
>   return !!(handle & ~0xff);
> @@ -129,7 +159,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, 
> unsigned long gfn,
>   for (npage = 0; npage < total_pages; npage++) {
>   unsigned long cur_gfn = gfn + npage;
>  
> - ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), _gfn, 1);
> + ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
> _gfn, 1);
>   WARN_ON(ret != 1);
>   }
>  }
> @@ -152,7 +182,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, 
> unsigned long gfn,
>   unsigned long cur_gfn = gfn + npage;
>   unsigned long pfn;
>  
> - ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), _gfn, 1,
> + ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
> _gfn, 1,
>IOMMU_READ | IOMMU_WRITE, );
>   if (ret != 1) {
>   gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret 
> %d\n",
> @@ -219,7 +249,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, 
> unsigned long gfn,
>  static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
>   dma_addr_t dma_addr)
>  {

[PATCH v2 RESEND] drm/i915/gvt: make gvt oblivious of kvmgt data structures

2020-02-17 Thread Julian Stecklina
Instead of defining KVMGT per-device state in struct intel_vgpu
directly, add an indirection. This makes the GVT code oblivious of
what state KVMGT needs to keep.

The intention here is to eventually make it possible to build
hypervisor backends for the mediator, without having to touch the
mediator itself. This is a first step.

v2:
- rebased onto gvt-staging (no conflicts)

Signed-off-by: Julian Stecklina 
Acked-by: Zhenyu Wang 
---
 drivers/gpu/drm/i915/gvt/gvt.h   |  32 +---
 drivers/gpu/drm/i915/gvt/kvmgt.c | 287 +++
 2 files changed, 184 insertions(+), 135 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 9fe9decc0d86..8cf292a8d6bd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -196,31 +196,8 @@ struct intel_vgpu {
 
struct dentry *debugfs;
 
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
-   struct {
-   struct mdev_device *mdev;
-   struct vfio_region *region;
-   int num_regions;
-   struct eventfd_ctx *intx_trigger;
-   struct eventfd_ctx *msi_trigger;
-
-   /*
-* Two caches are used to avoid mapping duplicated pages (eg.
-* scratch pages). This help to reduce dma setup overhead.
-*/
-   struct rb_root gfn_cache;
-   struct rb_root dma_addr_cache;
-   unsigned long nr_cache_entries;
-   struct mutex cache_lock;
-
-   struct notifier_block iommu_notifier;
-   struct notifier_block group_notifier;
-   struct kvm *kvm;
-   struct work_struct release_work;
-   atomic_t released;
-   struct vfio_device *vfio_device;
-   } vdev;
-#endif
+   /* Hypervisor-specific device state. */
+   void *vdev;
 
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
@@ -229,6 +206,11 @@ struct intel_vgpu {
u32 scan_nonprivbb;
 };
 
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
+{
+   return vgpu->vdev;
+}
+
 /* validating GM healthy status*/
 #define vgpu_is_vm_unhealthy(ret_val) \
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 85e59c502ab5..9a435bc1a2f0 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -108,6 +108,36 @@ struct gvt_dma {
struct kref ref;
 };
 
+struct kvmgt_vdev {
+   struct intel_vgpu *vgpu;
+   struct mdev_device *mdev;
+   struct vfio_region *region;
+   int num_regions;
+   struct eventfd_ctx *intx_trigger;
+   struct eventfd_ctx *msi_trigger;
+
+   /*
+* Two caches are used to avoid mapping duplicated pages (eg.
+* scratch pages). This help to reduce dma setup overhead.
+*/
+   struct rb_root gfn_cache;
+   struct rb_root dma_addr_cache;
+   unsigned long nr_cache_entries;
+   struct mutex cache_lock;
+
+   struct notifier_block iommu_notifier;
+   struct notifier_block group_notifier;
+   struct kvm *kvm;
+   struct work_struct release_work;
+   atomic_t released;
+   struct vfio_device *vfio_device;
+};
+
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
+{
+   return intel_vgpu_vdev(vgpu);
+}
+
 static inline bool handle_valid(unsigned long handle)
 {
return !!(handle & ~0xff);
@@ -129,7 +159,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, 
unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
 
-   ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), _gfn, 1);
+   ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
_gfn, 1);
WARN_ON(ret != 1);
}
 }
@@ -152,7 +182,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, 
unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
 
-   ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), _gfn, 1,
+   ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), 
_gfn, 1,
 IOMMU_READ | IOMMU_WRITE, );
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret 
%d\n",
@@ -219,7 +249,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, 
unsigned long gfn,
 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
 {
-   struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+   struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
struct gvt_dma *itr;
 
while (node) {
@@ -237,7 +267,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct 
intel_vgpu *vgpu,
 
 static struct