When a device has a DMA offset the dma capable result will change due
to the difference between the physical and DMA address.  Take that into
account.

Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 kernel/dma/direct.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8be8106270c2..d32d4f0d2c0c 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -167,7 +167,7 @@ int dma_direct_map_sg(struct device *dev, struct 
scatterlist *sgl, int nents,
 int dma_direct_supported(struct device *dev, u64 mask)
 {
 #ifdef CONFIG_ZONE_DMA
-       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
                return 0;
 #else
        /*
@@ -176,14 +176,14 @@ int dma_direct_supported(struct device *dev, u64 mask)
         * memory, or by providing a ZONE_DMA32.  If neither is the case, the
         * architecture needs to use an IOMMU instead of the direct mapping.
         */
-       if (mask < DMA_BIT_MASK(32))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
                return 0;
 #endif
        /*
         * Various PCI/PCIe bridges have broken support for > 32bit DMA even
         * if the device itself might support it.
         */
-       if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32))
+       if (dev->dma_32bit_limit && mask > phys_to_dma(dev, DMA_BIT_MASK(32)))
                return 0;
        return 1;
 }
-- 
2.18.0

Reply via email to