Change pnv_pci_ioda_iommu_bypass_supported() to have no side effects, by
separating the part of the function that determines if bypass is
supported from the part that actually attempts to configure it.

Move the latter to a controller-specific dma_set_mask() callback.

Signed-off-by: Reza Arbab <ar...@linux.ibm.com>
---
 arch/powerpc/platforms/powernv/Kconfig    |  1 +
 arch/powerpc/platforms/powernv/pci-ioda.c | 30 ++++++++++++++++--------------
 2 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/platforms/powernv/Kconfig 
b/arch/powerpc/platforms/powernv/Kconfig
index 938803eab0ad..6e6e27841764 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -17,6 +17,7 @@ config PPC_POWERNV
        select PPC_DOORBELL
        select MMU_NOTIFIER
        select FORCE_SMP
+       select ARCH_HAS_DMA_SET_MASK
        default y
 
 config OPAL_PRD
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c 
b/arch/powerpc/platforms/powernv/pci-ioda.c
index 57e6a43d9a3a..5291464930ed 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1854,32 +1854,33 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct 
pci_dev *pdev,
                u64 dma_mask)
 {
        struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
-       bool bypass;
 
        if (WARN_ON(!pe))
                return false;
 
-       bypass = pnv_ioda_pe_iommu_bypass_supported(pe, dma_mask);
+       return pnv_ioda_pe_iommu_bypass_supported(pe, dma_mask) ||
+              pnv_phb3_iommu_bypass_supported(pe, dma_mask);
+}
+
+static void pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 mask)
+{
+       struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
+
+       if (!pe)
+               return;
 
-       if (!bypass && pnv_phb3_iommu_bypass_supported(pe, dma_mask)) {
+       if (!pnv_ioda_pe_iommu_bypass_supported(pe, mask) &&
+           pnv_phb3_iommu_bypass_supported(pe, mask)) {
                /* Configure the bypass mode */
                if (pnv_pci_ioda_dma_64bit_bypass(pe))
-                       return false;
+                       return;
 
                /* 4GB offset bypasses 32-bit space */
                pdev->dev.archdata.dma_offset = (1ULL << 32);
-
-               bypass = true;
        }
 
-       /*
-        * Update peer npu devices. We also do this for the special case where
-        * a 64-bit dma mask can't be fulfilled and falls back to default.
-        */
-       if (bypass || !(dma_mask >> 32) || dma_mask == DMA_BIT_MASK(64))
-               pnv_npu_try_dma_set_bypass(pdev, dma_mask);
-
-       return bypass;
+       /* Update peer npu devices */
+       pnv_npu_try_dma_set_bypass(pdev, mask);
 }
 
 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
@@ -3612,6 +3613,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller 
*hose)
 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
        .dma_dev_setup          = pnv_pci_dma_dev_setup,
        .dma_bus_setup          = pnv_pci_dma_bus_setup,
+       .dma_set_mask           = pnv_pci_ioda_dma_set_mask,
        .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported,
        .setup_msi_irqs         = pnv_setup_msi_irqs,
        .teardown_msi_irqs      = pnv_teardown_msi_irqs,
-- 
1.8.3.1

Reply via email to