This is used to protect potential race condition at use_count.
since probes of client drivers, calling attach_dev(), may run
concurrently.

Reviewed-by: Dmitry Osipenko <dig...@gmail.com>
Tested-by: Dmitry Osipenko <dig...@gmail.com>
Acked-by: Thierry Reding <tred...@nvidia.com>
Signed-off-by: Nicolin Chen <nicoleots...@gmail.com>
---
 drivers/iommu/tegra-smmu.c | 34 +++++++++++++++++++++-------------
 1 file changed, 21 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index ec4c9dafff95..6a3ecc334481 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -256,26 +256,19 @@ static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, 
unsigned int *idp)
 {
        unsigned long id;
 
-       mutex_lock(&smmu->lock);
-
        id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
-       if (id >= smmu->soc->num_asids) {
-               mutex_unlock(&smmu->lock);
+       if (id >= smmu->soc->num_asids)
                return -ENOSPC;
-       }
 
        set_bit(id, smmu->asids);
        *idp = id;
 
-       mutex_unlock(&smmu->lock);
        return 0;
 }
 
 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
 {
-       mutex_lock(&smmu->lock);
        clear_bit(id, smmu->asids);
-       mutex_unlock(&smmu->lock);
 }
 
 static bool tegra_smmu_capable(enum iommu_cap cap)
@@ -420,17 +413,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
                                 struct tegra_smmu_as *as)
 {
        u32 value;
-       int err;
+       int err = 0;
+
+       mutex_lock(&smmu->lock);
 
        if (as->use_count > 0) {
                as->use_count++;
-               return 0;
+               goto unlock;
        }
 
        as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
                                  DMA_TO_DEVICE);
-       if (dma_mapping_error(smmu->dev, as->pd_dma))
-               return -ENOMEM;
+       if (dma_mapping_error(smmu->dev, as->pd_dma)) {
+               err = -ENOMEM;
+               goto unlock;
+       }
 
        /* We can't handle 64-bit DMA addresses */
        if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
@@ -453,24 +450,35 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
        as->smmu = smmu;
        as->use_count++;
 
+       mutex_unlock(&smmu->lock);
+
        return 0;
 
 err_unmap:
        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+unlock:
+       mutex_unlock(&smmu->lock);
+
        return err;
 }
 
 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
                                    struct tegra_smmu_as *as)
 {
-       if (--as->use_count > 0)
+       mutex_lock(&smmu->lock);
+
+       if (--as->use_count > 0) {
+               mutex_unlock(&smmu->lock);
                return;
+       }
 
        tegra_smmu_free_asid(smmu, as->id);
 
        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
 
        as->smmu = NULL;
+
+       mutex_unlock(&smmu->lock);
 }
 
 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to