Hi Jacob,

On 8/22/20 6:35 AM, Jacob Pan wrote:
> On Intel Scalable I/O Virtualization (SIOV) enabled platforms, IOMMU
> driver is one of the users of IOASIDs. In normal flow, callers will
> perform IOASID allocation, bind, unbind, and free in order. However, for
> guest SVA, IOASID free could come before unbind as guest is untrusted.
> This patch registers IOASID notification handler such that IOMMU driver
> can perform PASID teardown upon receiving an unexpected IOASID free
> event.
> 
> Signed-off-by: Jacob Pan <jacob.jun....@linux.intel.com>
> ---
>  drivers/iommu/intel/svm.c   | 74 
> ++++++++++++++++++++++++++++++++++++++++++++-
>  include/linux/intel-iommu.h |  2 ++
>  2 files changed, 75 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
> index 634e191ca2c3..600e3ae5b656 100644
> --- a/drivers/iommu/intel/svm.c
> +++ b/drivers/iommu/intel/svm.c
> @@ -95,6 +95,72 @@ static inline bool intel_svm_capable(struct intel_iommu 
> *iommu)
>       return iommu->flags & VTD_FLAG_SVM_CAPABLE;
>  }
>  
> +#define pasid_lock_held() lock_is_held(&pasid_mutex.dep_map)
put after the pasid_mutex definition?
> +static DEFINE_MUTEX(pasid_mutex);
> +
> +static void intel_svm_free_async_fn(struct work_struct *work)
> +{
> +     struct intel_svm *svm = container_of(work, struct intel_svm, work);
> +     struct intel_svm_dev *sdev;
> +
> +     /*
> +      * Unbind all devices associated with this PASID which is
> +      * being freed by other users such as VFIO.
> +      */
> +     mutex_lock(&pasid_mutex);
> +     list_for_each_entry_rcu(sdev, &svm->devs, list, pasid_lock_held()) {
> +             /* Does not poison forward pointer */
> +             list_del_rcu(&sdev->list);
> +             spin_lock(&svm->iommu->lock);
> +             intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
> +                                     svm->pasid, true);
> +             spin_unlock(&svm->iommu->lock);
> +             kfree_rcu(sdev, rcu);
> +             /*
> +              * Free before unbind only happens with guest usaged
usaged?
> +              * host PASIDs. IOASID free will detach private data
> +              * and free the IOASID entry.
> +              */
> +             ioasid_put(NULL, svm->pasid);
> +             if (list_empty(&svm->devs))
> +                     kfree(svm);
> +     }
> +     mutex_unlock(&pasid_mutex);
> +}
> +
> +
> +static int pasid_status_change(struct notifier_block *nb,
> +                             unsigned long code, void *data)
> +{
> +     struct ioasid_nb_args *args = (struct ioasid_nb_args *)data;
> +     struct intel_svm *svm = (struct intel_svm *)args->pdata;
> +     int ret = NOTIFY_DONE;
> +
> +     if (code == IOASID_FREE) {
> +             if (!svm)
> +                     goto done;
> +             if (args->id != svm->pasid) {
> +                     pr_warn("Notify PASID does not match data %d : %d\n",
> +                             args->id, svm->pasid);
> +                     goto done;
> +             }
> +             schedule_work(&svm->work);
> +             return NOTIFY_OK;
> +     }
> +done:
> +     return ret;> +}
> +
> +static struct notifier_block pasid_nb = {
> +     .notifier_call = pasid_status_change,
> +};
> +
> +void intel_svm_add_pasid_notifier(void)
> +{
> +     /* Listen to all PASIDs, not specific to a set */
> +     ioasid_register_notifier(NULL, &pasid_nb);
> +}
> +
>  void intel_svm_check(struct intel_iommu *iommu)
>  {
>       if (!pasid_supported(iommu))
> @@ -221,7 +287,6 @@ static const struct mmu_notifier_ops intel_mmuops = {
>       .invalidate_range = intel_invalidate_range,
>  };
>  
> -static DEFINE_MUTEX(pasid_mutex);
>  static LIST_HEAD(global_svm_list);
>  
>  #define for_each_svm_dev(sdev, svm, d)                       \
> @@ -342,7 +407,14 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, 
> struct device *dev,
>                       svm->gpasid = data->gpasid;
>                       svm->flags |= SVM_FLAG_GUEST_PASID;
>               }
> +             svm->iommu = iommu;
> +             /*
> +              * Set up cleanup async work in case IOASID core notify us PASID
> +              * is freed before unbind.
> +              */
> +             INIT_WORK(&svm->work, intel_svm_free_async_fn);
>               ioasid_attach_data(data->hpasid, svm);
> +             ioasid_get(NULL, svm->pasid);
>               INIT_LIST_HEAD_RCU(&svm->devs);
>               mmput(svm->mm);
>       }
> diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
> index b1ed2f25f7c0..d36038e6ae0b 100644
> --- a/include/linux/intel-iommu.h
> +++ b/include/linux/intel-iommu.h
> @@ -744,6 +744,7 @@ void intel_svm_unbind(struct iommu_sva *handle);
>  int intel_svm_get_pasid(struct iommu_sva *handle);
>  int intel_svm_page_response(struct device *dev, struct iommu_fault_event 
> *evt,
>                           struct iommu_page_response *msg);
> +void intel_svm_add_pasid_notifier(void);
>  
>  struct svm_dev_ops;
>  
> @@ -770,6 +771,7 @@ struct intel_svm {
>       int gpasid; /* In case that guest PASID is different from host PASID */
>       struct list_head devs;
>       struct list_head list;
> +     struct work_struct work; /* For deferred clean up */
>  };
>  #else
>  static inline void intel_svm_check(struct intel_iommu *iommu) {}
> 

Thanks

Eric

Reply via email to