Since the commit 133d624b1cee ("dma: Introduce dma_max_mapping_size()")
provides a helper function to get the max mapping size, we can use
the function instead of the workaround code for swiotlb.

Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda...@renesas.com>
---
 drivers/mmc/host/tmio_mmc_core.c | 17 ++++-------------
 1 file changed, 4 insertions(+), 13 deletions(-)

diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 130b91c..85bd6aa6 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -26,6 +26,7 @@
 
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -1189,19 +1190,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
        mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
        mmc->max_blk_count = pdata->max_blk_count ? :
                (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
-       mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
-       /*
-        * Since swiotlb has memory size limitation, this will calculate
-        * the maximum size locally (because we don't have any APIs for it now)
-        * and check the current max_req_size. And then, this will update
-        * the max_req_size if needed as a workaround.
-        */
-       if (swiotlb_max_segment()) {
-               unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
-
-               if (mmc->max_req_size > max_size)
-                       mmc->max_req_size = max_size;
-       }
+       mmc->max_req_size = min_t(unsigned int,
+                                 mmc->max_blk_size * mmc->max_blk_count,
+                                 dma_max_mapping_size(&pdev->dev));
        mmc->max_seg_size = mmc->max_req_size;
 
        if (mmc_can_gpio_ro(mmc))
-- 
2.7.4

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to