Re: [Intel-gfx] [PATCH v5 04/19] kvm/vfio: Rename kvm_vfio_group to prepare for accepting vfio device fd

2023-02-27 Thread Jason Gunthorpe
On Mon, Feb 27, 2023 at 03:11:20AM -0800, Yi Liu wrote:
> Meanwhile, rename related helpers. No functional change is intended.
> 
> Signed-off-by: Yi Liu 
> Reviewed-by: Kevin Tian 
> Reviewed-by: Eric Auger 
> ---
>  virt/kvm/vfio.c | 115 
>  1 file changed, 58 insertions(+), 57 deletions(-)

Reviewed-by: Jason Gunthorpe 

Jason


[Intel-gfx] [PATCH v5 04/19] kvm/vfio: Rename kvm_vfio_group to prepare for accepting vfio device fd

2023-02-27 Thread Yi Liu
Meanwhile, rename related helpers. No functional change is intended.

Signed-off-by: Yi Liu 
Reviewed-by: Kevin Tian 
Reviewed-by: Eric Auger 
---
 virt/kvm/vfio.c | 115 
 1 file changed, 58 insertions(+), 57 deletions(-)

diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 8bac308ba630..857d6ba349e1 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -21,7 +21,7 @@
 #include 
 #endif
 
-struct kvm_vfio_group {
+struct kvm_vfio_file {
struct list_head node;
struct file *file;
 #ifdef CONFIG_SPAPR_TCE_IOMMU
@@ -30,7 +30,7 @@ struct kvm_vfio_group {
 };
 
 struct kvm_vfio {
-   struct list_head group_list;
+   struct list_head file_list;
struct mutex lock;
bool noncoherent;
 };
@@ -98,34 +98,35 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct 
file *file)
 }
 
 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
-struct kvm_vfio_group *kvg)
+struct kvm_vfio_file *kvf)
 {
-   if (WARN_ON_ONCE(!kvg->iommu_group))
+   if (WARN_ON_ONCE(!kvf->iommu_group))
return;
 
-   kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
-   iommu_group_put(kvg->iommu_group);
-   kvg->iommu_group = NULL;
+   kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
+   iommu_group_put(kvf->iommu_group);
+   kvf->iommu_group = NULL;
 }
 #endif
 
 /*
- * Groups can use the same or different IOMMU domains.  If the same then
- * adding a new group may change the coherency of groups we've previously
- * been told about.  We don't want to care about any of that so we retest
- * each group and bail as soon as we find one that's noncoherent.  This
- * means we only ever [un]register_noncoherent_dma once for the whole device.
+ * Groups/devices can use the same or different IOMMU domains. If the same
+ * then adding a new group/device may change the coherency of groups/devices
+ * we've previously been told about. We don't want to care about any of
+ * that so we retest each group/device and bail as soon as we find one that's
+ * noncoherent.  This means we only ever [un]register_noncoherent_dma once
+ * for the whole device.
  */
 static void kvm_vfio_update_coherency(struct kvm_device *dev)
 {
struct kvm_vfio *kv = dev->private;
bool noncoherent = false;
-   struct kvm_vfio_group *kvg;
+   struct kvm_vfio_file *kvf;
 
mutex_lock(&kv->lock);
 
-   list_for_each_entry(kvg, &kv->group_list, node) {
-   if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
+   list_for_each_entry(kvf, &kv->file_list, node) {
+   if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
noncoherent = true;
break;
}
@@ -143,10 +144,10 @@ static void kvm_vfio_update_coherency(struct kvm_device 
*dev)
mutex_unlock(&kv->lock);
 }
 
-static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
 {
struct kvm_vfio *kv = dev->private;
-   struct kvm_vfio_group *kvg;
+   struct kvm_vfio_file *kvf;
struct file *filp;
int ret;
 
@@ -162,27 +163,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, 
unsigned int fd)
 
mutex_lock(&kv->lock);
 
-   list_for_each_entry(kvg, &kv->group_list, node) {
-   if (kvg->file == filp) {
+   list_for_each_entry(kvf, &kv->file_list, node) {
+   if (kvf->file == filp) {
ret = -EEXIST;
goto err_unlock;
}
}
 
-   kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
-   if (!kvg) {
+   kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
+   if (!kvf) {
ret = -ENOMEM;
goto err_unlock;
}
 
-   kvg->file = filp;
-   list_add_tail(&kvg->node, &kv->group_list);
+   kvf->file = filp;
+   list_add_tail(&kvf->node, &kv->file_list);
 
kvm_arch_start_assignment(dev->kvm);
 
mutex_unlock(&kv->lock);
 
-   kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+   kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
kvm_vfio_update_coherency(dev);
 
return 0;
@@ -193,10 +194,10 @@ static int kvm_vfio_group_add(struct kvm_device *dev, 
unsigned int fd)
return ret;
 }
 
-static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
 {
struct kvm_vfio *kv = dev->private;
-   struct kvm_vfio_group *kvg;
+   struct kvm_vfio_file *kvf;
struct fd f;
int ret;
 
@@ -208,18 +209,18 @@ static int kvm_vfio_group_del(struct kvm_device *dev, 
unsigned int fd)
 
mutex_lock(&kv->lock);
 
-   list_for_each_entry(kvg, &kv->group_list, node) {
-