Commit b159a68b authored by Olof Johansson's avatar Olof Johansson Committed by Linus Torvalds

[PATCH] ppc64: More IOMMU cleanups

* Tidy up some of the arguments to iommu_*()
* Comment cleanup
* Don't bump the hint to the next block for large allocs, to avoid
  fragmentation.
* Simplify vmerge logic during SG allocations
* Move the memory barriers from the bus-specific parts into the common
  code.

Some changes are mine, some are from Ben Herrenschmidt.
parent 8ccd6848
This diff is collapsed.
...@@ -99,10 +99,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size, ...@@ -99,10 +99,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
memset(ret, 0, size); memset(ret, 0, size);
/* Set up tces to cover the allocated range */ /* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL, NULL); mapping = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL);
/* Make sure the update is visible to hardware. */
mb();
if (mapping == NO_TCE) { if (mapping == NO_TCE) {
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
...@@ -145,7 +142,6 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr, ...@@ -145,7 +142,6 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
dma_addr_t dma_handle = NO_TCE; dma_addr_t dma_handle = NO_TCE;
unsigned long uaddr; unsigned long uaddr;
unsigned int npages; unsigned int npages;
unsigned long handle = 0;
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
...@@ -156,7 +152,7 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr, ...@@ -156,7 +152,7 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
tbl = devnode_table(hwdev); tbl = devnode_table(hwdev);
if (tbl) { if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction, &handle); dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
if (dma_handle == NO_TCE) { if (dma_handle == NO_TCE) {
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %p npages %d\n", printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %p npages %d\n",
...@@ -166,8 +162,6 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr, ...@@ -166,8 +162,6 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
dma_handle |= (uaddr & ~PAGE_MASK); dma_handle |= (uaddr & ~PAGE_MASK);
} }
mb();
return dma_handle; return dma_handle;
} }
...@@ -194,7 +188,6 @@ int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem ...@@ -194,7 +188,6 @@ int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem
int direction) int direction)
{ {
struct iommu_table * tbl; struct iommu_table * tbl;
unsigned long handle;
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
...@@ -205,9 +198,7 @@ int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem ...@@ -205,9 +198,7 @@ int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem
if (!tbl) if (!tbl)
return 0; return 0;
handle = 0; return iommu_alloc_sg(tbl, &pdev->dev, sglist, nelems, direction);
return iommu_alloc_sg(tbl, sglist, nelems, direction, &handle);
} }
void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
...@@ -221,7 +212,7 @@ void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int ne ...@@ -221,7 +212,7 @@ void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int ne
if (!tbl) if (!tbl)
return; return;
iommu_free_sg(tbl, sglist, nelems, direction); iommu_free_sg(tbl, sglist, nelems);
} }
/* We support DMA to/from any memory page via the iommu */ /* We support DMA to/from any memory page via the iommu */
......
...@@ -432,7 +432,7 @@ dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr, ...@@ -432,7 +432,7 @@ dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr,
tbl = dev->iommu_table; tbl = dev->iommu_table;
if (tbl) { if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction, NULL); dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
dma_handle |= (uaddr & ~PAGE_MASK); dma_handle |= (uaddr & ~PAGE_MASK);
} }
...@@ -461,7 +461,6 @@ int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems, ...@@ -461,7 +461,6 @@ int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
int direction) int direction)
{ {
struct iommu_table *tbl; struct iommu_table *tbl;
unsigned long handle;
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
...@@ -472,7 +471,7 @@ int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems, ...@@ -472,7 +471,7 @@ int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
if (!tbl) if (!tbl)
return 0; return 0;
return iommu_alloc_sg(tbl, sglist, nelems, direction, &handle); return iommu_alloc_sg(tbl, &vdev->dev, sglist, nelems, direction);
} }
EXPORT_SYMBOL(vio_map_sg); EXPORT_SYMBOL(vio_map_sg);
...@@ -485,7 +484,7 @@ void vio_unmap_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems, ...@@ -485,7 +484,7 @@ void vio_unmap_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
tbl = vdev->iommu_table; tbl = vdev->iommu_table;
if (tbl) if (tbl)
iommu_free_sg(tbl, sglist, nelems, direction); iommu_free_sg(tbl, sglist, nelems);
} }
EXPORT_SYMBOL(vio_unmap_sg); EXPORT_SYMBOL(vio_unmap_sg);
...@@ -517,7 +516,7 @@ void *vio_alloc_consistent(struct vio_dev *dev, size_t size, ...@@ -517,7 +516,7 @@ void *vio_alloc_consistent(struct vio_dev *dev, size_t size,
/* Page allocation succeeded */ /* Page allocation succeeded */
memset(ret, 0, npages << PAGE_SHIFT); memset(ret, 0, npages << PAGE_SHIFT);
/* Set up tces to cover the allocated range */ /* Set up tces to cover the allocated range */
tce = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL, NULL); tce = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL);
if (tce == NO_TCE) { if (tce == NO_TCE) {
PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: iommu_alloc failed\n" ); PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: iommu_alloc failed\n" );
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/types.h> #include <asm/types.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/device.h>
/* /*
* IOMAP_MAX_ORDER defines the largest contiguous block * IOMAP_MAX_ORDER defines the largest contiguous block
...@@ -78,6 +79,7 @@ struct iommu_table { ...@@ -78,6 +79,7 @@ struct iommu_table {
unsigned long it_blocksize; /* Entries in each block (cacheline) */ unsigned long it_blocksize; /* Entries in each block (cacheline) */
unsigned long it_hint; /* Hint for next alloc */ unsigned long it_hint; /* Hint for next alloc */
unsigned long it_largehint; /* Hint for large allocs */ unsigned long it_largehint; /* Hint for large allocs */
unsigned long it_halfpoint; /* Breaking point for small/large allocs */
spinlock_t it_lock; /* Protects it_map */ spinlock_t it_lock; /* Protects it_map */
unsigned long it_mapsize; /* Size of map in # of entries (bits) */ unsigned long it_mapsize; /* Size of map in # of entries (bits) */
unsigned long *it_map; /* A simple allocation bitmap for now */ unsigned long *it_map; /* A simple allocation bitmap for now */
...@@ -132,16 +134,16 @@ extern struct iommu_table *iommu_init_table(struct iommu_table * tbl); ...@@ -132,16 +134,16 @@ extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
/* allocates a range of tces and sets them to the pages */ /* allocates a range of tces and sets them to the pages */
extern dma_addr_t iommu_alloc(struct iommu_table *, void *page, extern dma_addr_t iommu_alloc(struct iommu_table *, void *page,
unsigned int numPages, int direction, unsigned int numPages, int direction);
unsigned long *handle);
extern void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, extern void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages); unsigned int npages);
/* same with sg lists */ /* same with sg lists */
extern int iommu_alloc_sg(struct iommu_table *table, struct scatterlist *sglist, extern int iommu_alloc_sg(struct iommu_table *table, struct device *dev,
int nelems, int direction, unsigned long *handle); struct scatterlist *sglist, int nelems,
int direction);
extern void iommu_free_sg(struct iommu_table *tbl, struct scatterlist *sglist, extern void iommu_free_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, int direction); int nelems);
extern void tce_init_pSeries(void); extern void tce_init_pSeries(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment