Use combined macros for_each_svm_dev() to simplify SVM device iteration
and error checking.

Suggested-by: Andy Shevchenko <andriy.shevche...@linux.intel.com>
Signed-off-by: Jacob Pan <jacob.jun....@linux.intel.com>
Reviewed-by: Eric Auger <eric.au...@redhat.com>
---
 drivers/iommu/intel-svm.c | 85 +++++++++++++++++++++++------------------------
 1 file changed, 41 insertions(+), 44 deletions(-)

diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 5aef5b7bf561..34323a15849a 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -212,6 +212,10 @@ static const struct mmu_notifier_ops intel_mmuops = {
 static DEFINE_MUTEX(pasid_mutex);
 static LIST_HEAD(global_svm_list);
 
+#define for_each_svm_dev(svm, dev)                     \
+       list_for_each_entry(sdev, &svm->devs, list)     \
+       if (dev == sdev->dev)                           \
+
 int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct 
svm_dev_ops *ops)
 {
        struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
@@ -257,15 +261,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int 
flags, struct svm_dev_
                                goto out;
                        }
 
-                       list_for_each_entry(sdev, &svm->devs, list) {
-                               if (dev == sdev->dev) {
-                                       if (sdev->ops != ops) {
-                                               ret = -EBUSY;
-                                               goto out;
-                                       }
-                                       sdev->users++;
-                                       goto success;
+                       for_each_svm_dev(svm, dev) {
+                               if (sdev->ops != ops) {
+                                       ret = -EBUSY;
+                                       goto out;
                                }
+                               sdev->users++;
+                               goto success;
                        }
 
                        break;
@@ -402,48 +404,43 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
                goto out;
 
        svm = ioasid_find(NULL, pasid, NULL);
-       if (IS_ERR(svm)) {
+       if (IS_ERR_OR_NULL(svm)) {
                ret = PTR_ERR(svm);
                goto out;
        }
 
-       if (!svm)
-               goto out;
+       for_each_svm_dev(svm, dev) {
+               ret = 0;
+               sdev->users--;
+               if (!sdev->users) {
+                       list_del_rcu(&sdev->list);
+                       /* Flush the PASID cache and IOTLB for this device.
+                        * Note that we do depend on the hardware *not* using
+                        * the PASID any more. Just as we depend on other
+                        * devices never using PASIDs that they have no right
+                        * to use. We have a *shared* PASID table, because it's
+                        * large and has to be physically contiguous. So it's
+                        * hard to be as defensive as we might like. */
+                       intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
+                       intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+                       kfree_rcu(sdev, rcu);
+
+                       if (list_empty(&svm->devs)) {
+                               ioasid_free(svm->pasid);
+                               if (svm->mm)
+                                       mmu_notifier_unregister(&svm->notifier, 
svm->mm);
 
-       list_for_each_entry(sdev, &svm->devs, list) {
-               if (dev == sdev->dev) {
-                       ret = 0;
-                       sdev->users--;
-                       if (!sdev->users) {
-                               list_del_rcu(&sdev->list);
-                               /* Flush the PASID cache and IOTLB for this 
device.
-                                * Note that we do depend on the hardware *not* 
using
-                                * the PASID any more. Just as we depend on 
other
-                                * devices never using PASIDs that they have no 
right
-                                * to use. We have a *shared* PASID table, 
because it's
-                                * large and has to be physically contiguous. 
So it's
-                                * hard to be as defensive as we might like. */
-                               intel_pasid_tear_down_entry(iommu, dev, 
svm->pasid);
-                               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
-                               kfree_rcu(sdev, rcu);
-
-                               if (list_empty(&svm->devs)) {
-                                       ioasid_free(svm->pasid);
-                                       if (svm->mm)
-                                               
mmu_notifier_unregister(&svm->notifier, svm->mm);
-
-                                       list_del(&svm->list);
-
-                                       /* We mandate that no page faults may 
be outstanding
-                                        * for the PASID when 
intel_svm_unbind_mm() is called.
-                                        * If that is not obeyed, subtle errors 
will happen.
-                                        * Let's make them less subtle... */
-                                       memset(svm, 0x6b, sizeof(*svm));
-                                       kfree(svm);
-                               }
+                               list_del(&svm->list);
+
+                               /* We mandate that no page faults may be 
outstanding
+                                * for the PASID when intel_svm_unbind_mm() is 
called.
+                                * If that is not obeyed, subtle errors will 
happen.
+                                * Let's make them less subtle... */
+                               memset(svm, 0x6b, sizeof(*svm));
+                               kfree(svm);
                        }
-                       break;
                }
+               break;
        }
  out:
        mutex_unlock(&pasid_mutex);
@@ -579,7 +576,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                         * to unbind the mm while any page faults are 
outstanding.
                         * So we only need RCU to protect the internal idr 
code. */
                        rcu_read_unlock();
-                       if (IS_ERR(svm) || !svm) {
+                       if (IS_ERR_OR_NULL(svm)) {
                                pr_err("%s: Page request for invalid PASID %d: 
%08llx %08llx\n",
                                       iommu->name, req->pasid, ((unsigned long 
long *)req)[0],
                                       ((unsigned long long *)req)[1]);
-- 
2.7.4

Reply via email to