Commit 7b49682b authored by Todd Poynor's avatar Todd Poynor Committed by Greg Kroah-Hartman

staging: gasket: page table: remove extraneous memory barriers

Some explicit memory barriers in the page table code are not necessary,
either because:

(a) The barrier follows a non-relaxed MMIO access that already performs
a read or write memory barrier.

(b) The barrier follows DMA API calls for which the device-visible
effects of IOMMU programming are guaranteed to be flushed to the IOMMU
prior to the call returning, and doesn't need to sync with normal memory
access.
Signed-off-by: default avatarTodd Poynor <toddpoynor@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 717264ba
...@@ -317,8 +317,6 @@ static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl, ...@@ -317,8 +317,6 @@ static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl,
/* Release the page table from the device */ /* Release the page table from the device */
writeq(0, slot); writeq(0, slot);
/* Force sync around the address release. */
mb();
if (pte->dma_addr) if (pte->dma_addr)
dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE, dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
...@@ -504,8 +502,6 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl, ...@@ -504,8 +502,6 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
(void *)page_to_phys(page)); (void *)page_to_phys(page));
return -1; return -1;
} }
/* Wait until the page is mapped. */
mb();
} }
/* Make the DMA-space address available to the device. */ /* Make the DMA-space address available to the device. */
...@@ -604,12 +600,13 @@ static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl, ...@@ -604,12 +600,13 @@ static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
*/ */
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
/* release the address from the device, */ /* release the address from the device, */
if (is_simple_mapping || ptes[i].status == PTE_INUSE) if (is_simple_mapping || ptes[i].status == PTE_INUSE) {
writeq(0, &slots[i]); writeq(0, &slots[i]);
else } else {
((u64 __force *)slots)[i] = 0; ((u64 __force *)slots)[i] = 0;
/* Force sync around the address release. */ /* sync above PTE update before updating mappings */
mb(); wmb();
}
/* release the address from the driver, */ /* release the address from the driver, */
if (ptes[i].status == PTE_INUSE) { if (ptes[i].status == PTE_INUSE) {
...@@ -898,8 +895,6 @@ static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl, ...@@ -898,8 +895,6 @@ static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
/* Map the page into DMA space. */ /* Map the page into DMA space. */
pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE, pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
/* Wait until the page is mapped. */
mb();
/* make the addresses available to the device */ /* make the addresses available to the device */
dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG; dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment