Re: [Intel-gfx] [PATCH 03/13] vfio: Provide better generic support for open/release vfio_device_ops

2021-07-19 Thread Jason Gunthorpe
On Mon, Jul 19, 2021 at 10:01:31AM -0300, Jason Gunthorpe wrote:
> On Mon, Jul 19, 2021 at 02:58:58PM +0200, Cornelia Huck wrote:
> > > - ret = device->ops->open(device);
> > > - if (ret) {
> > > - module_put(device->dev->driver->owner);
> > > - vfio_device_put(device);
> > > - return ret;
> > > + mutex_lock(>dev_set->lock);
> > > + device->open_count++;
> > > + if (device->open_count == 1 && device->ops->open_device) {
> > > + ret = device->ops->open_device(device);
> > > + if (ret)
> > > + goto err_undo_count;
> > 
> > Won't that fail for mdev devices, until the patches later in this series
> > have been applied? (i.e. bad for bisect)
> 
> What are you seeing? At this point all devices will have a NULL
> open_device and skip this logic?

Oh, nevermind, I see it now. Yes, that needs fixing, thanks

Jason
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 03/13] vfio: Provide better generic support for open/release vfio_device_ops

2021-07-19 Thread Jason Gunthorpe
On Mon, Jul 19, 2021 at 02:58:58PM +0200, Cornelia Huck wrote:
> > -   ret = device->ops->open(device);
> > -   if (ret) {
> > -   module_put(device->dev->driver->owner);
> > -   vfio_device_put(device);
> > -   return ret;
> > +   mutex_lock(>dev_set->lock);
> > +   device->open_count++;
> > +   if (device->open_count == 1 && device->ops->open_device) {
> > +   ret = device->ops->open_device(device);
> > +   if (ret)
> > +   goto err_undo_count;
> 
> Won't that fail for mdev devices, until the patches later in this series
> have been applied? (i.e. bad for bisect)

What are you seeing? At this point all devices will have a NULL
open_device and skip this logic?

Jason
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 03/13] vfio: Provide better generic support for open/release vfio_device_ops

2021-07-19 Thread Cornelia Huck
On Wed, Jul 14 2021, Jason Gunthorpe  wrote:

> Currently the driver ops have an open/release pair that is called once
> each time a device FD is opened or closed. Add an additional set of
> open/close_device() ops which are called when the device FD is opened for
> the first time and closed for the last time.
>
> An analysis shows that all of the drivers require this semantic. Some are
> open coding it as part of their reflck implementation, and some are just
> buggy and miss it completely.
>
> To retain the current semantics PCI and FSL depend on, introduce the idea
> of a "device set" which is a grouping of vfio_device's that share the same
> lock around opening.
>
> The device set is established by providing a 'set_id' pointer. All
> vfio_device's that provide the same pointer will be joined to the same
> singleton memory and lock across the whole set. This effectively replaces
> the oddly named reflck.
>
> After conversion the set_id will be sourced from:
>  - A struct device from a fsl_mc_device (fsl)
>  - A struct pci_slot (pci)
>  - A struct pci_bus (pci)
>  - The struct vfio_device (everything)
>
> The design ensures that the above pointers are live as long as the
> vfio_device is registered, so they form reliable unique keys to group
> vfio_devices into sets.
>
> This implementation uses xarray instead of searching through the driver
> core structures, which simplifies the somewhat tricky locking in this
> area.
>
> Following patches convert all the drivers.
>
> Signed-off-by: Yishai Hadas 
> Signed-off-by: Jason Gunthorpe 
> ---
>  drivers/vfio/mdev/vfio_mdev.c |  22 ++
>  drivers/vfio/vfio.c   | 144 --
>  include/linux/mdev.h  |   2 +
>  include/linux/vfio.h  |  19 +
>  4 files changed, 165 insertions(+), 22 deletions(-)
>

(...)

> @@ -760,6 +829,13 @@ int vfio_register_group_dev(struct vfio_device *device)
>   struct iommu_group *iommu_group;
>   struct vfio_group *group;
>  
> + /*
> +  * If the driver doesn't specify a set then the device is added to a
> +  * signleton set just for itself.

s/signleton/singleton/

> +  */
> + if (!device->dev_set)
> + vfio_assign_device_set(device, device);
> +
>   iommu_group = iommu_group_get(device->dev);
>   if (!iommu_group)
>   return -EINVAL;
> @@ -1361,7 +1437,8 @@ static int vfio_group_get_device_fd(struct vfio_group 
> *group, char *buf)
>  {
>   struct vfio_device *device;
>   struct file *filep;
> - int ret;
> + int fdno;
> + int ret = 0;
>  
>   if (0 == atomic_read(>container_users) ||
>   !group->container->iommu_driver || !vfio_group_viable(group))
> @@ -1375,38 +1452,38 @@ static int vfio_group_get_device_fd(struct vfio_group 
> *group, char *buf)
>   return PTR_ERR(device);
>  
>   if (!try_module_get(device->dev->driver->owner)) {
> - vfio_device_put(device);
> - return -ENODEV;
> + ret = -ENODEV;
> + goto err_device_put;
>   }
>  
> - ret = device->ops->open(device);
> - if (ret) {
> - module_put(device->dev->driver->owner);
> - vfio_device_put(device);
> - return ret;
> + mutex_lock(>dev_set->lock);
> + device->open_count++;
> + if (device->open_count == 1 && device->ops->open_device) {
> + ret = device->ops->open_device(device);
> + if (ret)
> + goto err_undo_count;

Won't that fail for mdev devices, until the patches later in this series
have been applied? (i.e. bad for bisect)

> + }
> + mutex_unlock(>dev_set->lock);
> +
> + if (device->ops->open) {
> + ret = device->ops->open(device);
> + if (ret)
> + goto err_close_device;
>   }

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 03/13] vfio: Provide better generic support for open/release vfio_device_ops

2021-07-14 Thread Jason Gunthorpe
Currently the driver ops have an open/release pair that is called once
each time a device FD is opened or closed. Add an additional set of
open/close_device() ops which are called when the device FD is opened for
the first time and closed for the last time.

An analysis shows that all of the drivers require this semantic. Some are
open coding it as part of their reflck implementation, and some are just
buggy and miss it completely.

To retain the current semantics PCI and FSL depend on, introduce the idea
of a "device set" which is a grouping of vfio_device's that share the same
lock around opening.

The device set is established by providing a 'set_id' pointer. All
vfio_device's that provide the same pointer will be joined to the same
singleton memory and lock across the whole set. This effectively replaces
the oddly named reflck.

After conversion the set_id will be sourced from:
 - A struct device from a fsl_mc_device (fsl)
 - A struct pci_slot (pci)
 - A struct pci_bus (pci)
 - The struct vfio_device (everything)

The design ensures that the above pointers are live as long as the
vfio_device is registered, so they form reliable unique keys to group
vfio_devices into sets.

This implementation uses xarray instead of searching through the driver
core structures, which simplifies the somewhat tricky locking in this
area.

Following patches convert all the drivers.

Signed-off-by: Yishai Hadas 
Signed-off-by: Jason Gunthorpe 
---
 drivers/vfio/mdev/vfio_mdev.c |  22 ++
 drivers/vfio/vfio.c   | 144 --
 include/linux/mdev.h  |   2 +
 include/linux/vfio.h  |  19 +
 4 files changed, 165 insertions(+), 22 deletions(-)

diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index a5c77ccb24f70a..3c384d2350b64a 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -17,6 +17,26 @@
 
 #include "mdev_private.h"
 
+static int vfio_mdev_open_device(struct vfio_device *core_vdev)
+{
+   struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
+   struct mdev_parent *parent = mdev->type->parent;
+
+   if (unlikely(!parent->ops->open_device))
+   return -EINVAL;
+
+   return parent->ops->open_device(mdev);
+}
+
+static void vfio_mdev_close_device(struct vfio_device *core_vdev)
+{
+   struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
+   struct mdev_parent *parent = mdev->type->parent;
+
+   if (likely(parent->ops->close_device))
+   parent->ops->close_device(mdev);
+}
+
 static int vfio_mdev_open(struct vfio_device *core_vdev)
 {
struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
@@ -100,6 +120,8 @@ static void vfio_mdev_request(struct vfio_device 
*core_vdev, unsigned int count)
 
 static const struct vfio_device_ops vfio_mdev_dev_ops = {
.name   = "vfio-mdev",
+   .open_device= vfio_mdev_open_device,
+   .close_device   = vfio_mdev_close_device,
.open   = vfio_mdev_open,
.release= vfio_mdev_release,
.ioctl  = vfio_mdev_unlocked_ioctl,
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index cc375df0fd5dda..6908c2ae9b36f6 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -96,6 +96,74 @@ module_param_named(enable_unsafe_noiommu_mode,
 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode.  
This mode provides no device isolation, no DMA translation, no host kernel 
protection, cannot be used for device assignment to virtual machines, requires 
RAWIO permissions, and will taint the kernel.  If you do not know what this is 
for, step away. (default: false)");
 #endif
 
+static DEFINE_XARRAY(vfio_device_set_xa);
+
+int vfio_assign_device_set(struct vfio_device *device, void *set_id)
+{
+   struct vfio_device_set *alloc_dev_set = NULL;
+   struct vfio_device_set *dev_set;
+
+   if (WARN_ON(!set_id))
+   return -EINVAL;
+
+   /*
+* Atomically acquire a singleton object in the xarray for this set_id
+*/
+again:
+   xa_lock(_device_set_xa);
+   if (alloc_dev_set) {
+   dev_set = __xa_cmpxchg(_device_set_xa,
+  (unsigned long)set_id, NULL,
+  alloc_dev_set, GFP_KERNEL);
+   if (xa_is_err(dev_set)) {
+   xa_unlock(_device_set_xa);
+   kfree(alloc_dev_set);
+   return xa_err(dev_set);
+   }
+   if (!dev_set)
+   dev_set = alloc_dev_set;
+   } else
+   dev_set = xa_load(_device_set_xa, (unsigned long)set_id);
+   if (dev_set) {
+   dev_set->device_count++;
+   xa_unlock(_device_set_xa);
+   device->dev_set = dev_set;
+   if (dev_set != alloc_dev_set)
+   kfree(alloc_dev_set);
+   return