From: Todd Poynor <toddpoy...@google.com>

Some explicit memory barriers in the page table code are not necessary,
either because:

(a) The barrier follows a non-relaxed MMIO access that already performs
a read or write memory barrier.

(b) The barrier follows DMA API calls for which the device-visible
effects of IOMMU programming are guaranteed to be flushed to the IOMMU
prior to the call returning, and doesn't need to sync with normal memory
access.

Signed-off-by: Todd Poynor <toddpoy...@google.com>
---
 drivers/staging/gasket/gasket_page_table.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/drivers/staging/gasket/gasket_page_table.c 
b/drivers/staging/gasket/gasket_page_table.c
index 4d2499269499b..53492f4fad6aa 100644
--- a/drivers/staging/gasket/gasket_page_table.c
+++ b/drivers/staging/gasket/gasket_page_table.c
@@ -317,8 +317,6 @@ static void gasket_free_extended_subtable(struct 
gasket_page_table *pg_tbl,
 
        /* Release the page table from the device */
        writeq(0, slot);
-       /* Force sync around the address release. */
-       mb();
 
        if (pte->dma_addr)
                dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
@@ -504,8 +502,6 @@ static int gasket_perform_mapping(struct gasket_page_table 
*pg_tbl,
                                        (void *)page_to_phys(page));
                                return -1;
                        }
-                       /* Wait until the page is mapped. */
-                       mb();
                }
 
                /* Make the DMA-space address available to the device. */
@@ -604,12 +600,13 @@ static void gasket_perform_unmapping(struct 
gasket_page_table *pg_tbl,
         */
        for (i = 0; i < num_pages; i++) {
                /* release the address from the device, */
-               if (is_simple_mapping || ptes[i].status == PTE_INUSE)
+               if (is_simple_mapping || ptes[i].status == PTE_INUSE) {
                        writeq(0, &slots[i]);
-               else
+               } else {
                        ((u64 __force *)slots)[i] = 0;
-               /* Force sync around the address release. */
-               mb();
+                       /* sync above PTE update before updating mappings */
+                       wmb();
+               }
 
                /* release the address from the driver, */
                if (ptes[i].status == PTE_INUSE) {
@@ -898,8 +895,6 @@ static int gasket_alloc_extended_subtable(struct 
gasket_page_table *pg_tbl,
        /* Map the page into DMA space. */
        pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
                                     DMA_BIDIRECTIONAL);
-       /* Wait until the page is mapped. */
-       mb();
 
        /* make the addresses available to the device */
        dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
-- 
2.18.0.597.ga71716f1ad-goog

_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to