Commit 007658d4 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: create dma_mapping_error

From: Anton Blanchard <anton@samba.org>

From: Stephen Rothwell <sfr@canb.auug.org.au>

This creates DMA_ERROR_CODE and uses it everywhere instead of
PCI_DMA_ERROR_CODE as we really want the three DMA mapping API's to return
a single error code.  Also we now have dma_mapping_error and
vio_dma_mapping_error - and this latter and pci_dma_mapping_error both just
call the former.

Also a small fix in the vscsi - dma_map_sg returns 0 to indicate an error.
parent a460c410
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -73,7 +74,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long np ...@@ -73,7 +74,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long np
if (unlikely(npages) == 0) { if (unlikely(npages) == 0) {
if (printk_ratelimit()) if (printk_ratelimit())
WARN_ON(1); WARN_ON(1);
return PCI_DMA_ERROR_CODE; return DMA_ERROR_CODE;
} }
if (handle && *handle) if (handle && *handle)
...@@ -110,7 +111,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long np ...@@ -110,7 +111,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long np
goto again; goto again;
} else { } else {
/* Third failure, give up */ /* Third failure, give up */
return PCI_DMA_ERROR_CODE; return DMA_ERROR_CODE;
} }
} }
...@@ -144,15 +145,15 @@ dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, ...@@ -144,15 +145,15 @@ dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
unsigned int npages, int direction) unsigned int npages, int direction)
{ {
unsigned long entry, flags; unsigned long entry, flags;
dma_addr_t ret = PCI_DMA_ERROR_CODE; dma_addr_t ret = DMA_ERROR_CODE;
spin_lock_irqsave(&(tbl->it_lock), flags); spin_lock_irqsave(&(tbl->it_lock), flags);
entry = iommu_range_alloc(tbl, npages, NULL); entry = iommu_range_alloc(tbl, npages, NULL);
if (unlikely(entry == PCI_DMA_ERROR_CODE)) { if (unlikely(entry == DMA_ERROR_CODE)) {
spin_unlock_irqrestore(&(tbl->it_lock), flags); spin_unlock_irqrestore(&(tbl->it_lock), flags);
return PCI_DMA_ERROR_CODE; return DMA_ERROR_CODE;
} }
entry += tbl->it_offset; /* Offset into real TCE table */ entry += tbl->it_offset; /* Offset into real TCE table */
...@@ -263,7 +264,7 @@ int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev, ...@@ -263,7 +264,7 @@ int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev,
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */ /* Handle failure */
if (unlikely(entry == PCI_DMA_ERROR_CODE)) { if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
" npages %lx\n", tbl, vaddr, npages); " npages %lx\n", tbl, vaddr, npages);
...@@ -327,7 +328,7 @@ int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev, ...@@ -327,7 +328,7 @@ int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev,
*/ */
if (outcount < nelems) { if (outcount < nelems) {
outs++; outs++;
outs->dma_address = PCI_DMA_ERROR_CODE; outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0; outs->dma_length = 0;
} }
return outcount; return outcount;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/iommu.h> #include <asm/iommu.h>
...@@ -82,7 +83,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size, ...@@ -82,7 +83,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
if (order >= IOMAP_MAX_ORDER) { if (order >= IOMAP_MAX_ORDER) {
printk("PCI_DMA: pci_alloc_consistent size too large: 0x%lx\n", printk("PCI_DMA: pci_alloc_consistent size too large: 0x%lx\n",
size); size);
return (void *)PCI_DMA_ERROR_CODE; return (void *)DMA_ERROR_CODE;
} }
tbl = devnode_table(hwdev); tbl = devnode_table(hwdev);
...@@ -101,7 +102,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size, ...@@ -101,7 +102,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
/* Set up tces to cover the allocated range */ /* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL); mapping = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL);
if (mapping == PCI_DMA_ERROR_CODE) { if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
ret = NULL; ret = NULL;
} else } else
...@@ -139,7 +140,7 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr, ...@@ -139,7 +140,7 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
size_t size, int direction) size_t size, int direction)
{ {
struct iommu_table * tbl; struct iommu_table * tbl;
dma_addr_t dma_handle = PCI_DMA_ERROR_CODE; dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr; unsigned long uaddr;
unsigned int npages; unsigned int npages;
...@@ -153,7 +154,7 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr, ...@@ -153,7 +154,7 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
if (tbl) { if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction); dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
if (dma_handle == PCI_DMA_ERROR_CODE) { if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %p npages %d\n", printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %p npages %d\n",
tbl, vaddr, npages); tbl, vaddr, npages);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/dma.h> #include <asm/dma.h>
...@@ -419,7 +420,7 @@ dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr, ...@@ -419,7 +420,7 @@ dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr,
size_t size, int direction ) size_t size, int direction )
{ {
struct iommu_table *tbl; struct iommu_table *tbl;
dma_addr_t dma_handle = PCI_DMA_ERROR_CODE; dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr; unsigned long uaddr;
unsigned int npages; unsigned int npages;
...@@ -504,7 +505,7 @@ void *vio_alloc_consistent(struct vio_dev *dev, size_t size, ...@@ -504,7 +505,7 @@ void *vio_alloc_consistent(struct vio_dev *dev, size_t size,
/* It is easier to debug here for the drivers than in the tce tables.*/ /* It is easier to debug here for the drivers than in the tce tables.*/
if(order >= IOMAP_MAX_ORDER) { if(order >= IOMAP_MAX_ORDER) {
printk("VIO_DMA: vio_alloc_consistent size to large: 0x%lx \n", size); printk("VIO_DMA: vio_alloc_consistent size to large: 0x%lx \n", size);
return (void *)PCI_DMA_ERROR_CODE; return (void *)DMA_ERROR_CODE;
} }
tbl = dev->iommu_table; tbl = dev->iommu_table;
...@@ -517,7 +518,7 @@ void *vio_alloc_consistent(struct vio_dev *dev, size_t size, ...@@ -517,7 +518,7 @@ void *vio_alloc_consistent(struct vio_dev *dev, size_t size,
memset(ret, 0, npages << PAGE_SHIFT); memset(ret, 0, npages << PAGE_SHIFT);
/* Set up tces to cover the allocated range */ /* Set up tces to cover the allocated range */
tce = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL); tce = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL);
if (tce == PCI_DMA_ERROR_CODE) { if (tce == DMA_ERROR_CODE) {
PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: iommu_alloc failed\n" ); PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: iommu_alloc failed\n" );
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
ret = NULL; ret = NULL;
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
...@@ -60,9 +61,6 @@ ...@@ -60,9 +61,6 @@
#include "ibmveth.h" #include "ibmveth.h"
#warning remove NO_TCE usage from ibmveth.c
#define NO_TCE PCI_DMA_ERROR_CODE
#define DEBUG 1 #define DEBUG 1
#define ibmveth_printk(fmt, args...) \ #define ibmveth_printk(fmt, args...) \
...@@ -407,27 +405,27 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) ...@@ -407,27 +405,27 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
static void ibmveth_cleanup(struct ibmveth_adapter *adapter) static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{ {
if(adapter->buffer_list_addr != NULL) { if(adapter->buffer_list_addr != NULL) {
if(adapter->buffer_list_dma != NO_TCE) { if(!vio_dma_mapping_error(adapter->buffer_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, PCI_DMA_BIDIRECTIONAL); vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, PCI_DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = NO_TCE; adapter->buffer_list_dma = DMA_ERROR_CODE;
} }
free_page((unsigned long)adapter->buffer_list_addr); free_page((unsigned long)adapter->buffer_list_addr);
adapter->buffer_list_addr = NULL; adapter->buffer_list_addr = NULL;
} }
if(adapter->filter_list_addr != NULL) { if(adapter->filter_list_addr != NULL) {
if(adapter->filter_list_dma != NO_TCE) { if(!vio_dma_mapping_error(adapter->filter_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, PCI_DMA_BIDIRECTIONAL); vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, PCI_DMA_BIDIRECTIONAL);
adapter->filter_list_dma = NO_TCE; adapter->filter_list_dma = DMA_ERROR_CODE;
} }
free_page((unsigned long)adapter->filter_list_addr); free_page((unsigned long)adapter->filter_list_addr);
adapter->filter_list_addr = NULL; adapter->filter_list_addr = NULL;
} }
if(adapter->rx_queue.queue_addr != NULL) { if(adapter->rx_queue.queue_addr != NULL) {
if(adapter->rx_queue.queue_dma != NO_TCE) { if(!vio_dma_mapping_error(adapter->rx_queue.queue_dma)) {
vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, PCI_DMA_BIDIRECTIONAL); vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, PCI_DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = NO_TCE; adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
} }
kfree(adapter->rx_queue.queue_addr); kfree(adapter->rx_queue.queue_addr);
adapter->rx_queue.queue_addr = NULL; adapter->rx_queue.queue_addr = NULL;
...@@ -476,9 +474,9 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -476,9 +474,9 @@ static int ibmveth_open(struct net_device *netdev)
adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, PCI_DMA_BIDIRECTIONAL); adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, PCI_DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, PCI_DMA_BIDIRECTIONAL); adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, PCI_DMA_BIDIRECTIONAL);
if((adapter->buffer_list_dma == NO_TCE) || if((vio_dma_mapping_error(adapter->buffer_list_dma) ) ||
(adapter->filter_list_dma == NO_TCE) || (vio_dma_mapping_error(adapter->filter_list_dma)) ||
(adapter->rx_queue.queue_dma == NO_TCE)) { (vio_dma_mapping_error(adapter->rx_queue.queue_dma))) {
ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_error_printk("unable to map filter or buffer list pages\n");
ibmveth_cleanup(adapter); ibmveth_cleanup(adapter);
return -ENOMEM; return -ENOMEM;
...@@ -647,7 +645,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -647,7 +645,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[0].fields.address = vio_map_single(adapter->vdev, skb->data, desc[0].fields.length, PCI_DMA_TODEVICE); desc[0].fields.address = vio_map_single(adapter->vdev, skb->data, desc[0].fields.length, PCI_DMA_TODEVICE);
desc[0].fields.valid = 1; desc[0].fields.valid = 1;
if(desc[0].fields.address == NO_TCE) { if(vio_dma_mapping_error(desc[0].fields.address)) {
ibmveth_error_printk("tx: unable to map initial fragment\n"); ibmveth_error_printk("tx: unable to map initial fragment\n");
adapter->tx_map_failed++; adapter->tx_map_failed++;
adapter->stats.tx_dropped++; adapter->stats.tx_dropped++;
...@@ -666,7 +664,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -666,7 +664,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[curfrag+1].fields.length = frag->size; desc[curfrag+1].fields.length = frag->size;
desc[curfrag+1].fields.valid = 1; desc[curfrag+1].fields.valid = 1;
if(desc[curfrag+1].fields.address == NO_TCE) { if(vio_dma_mapping_error(desc[curfrag+1].fields.address)) {
ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
adapter->tx_map_failed++; adapter->tx_map_failed++;
adapter->stats.tx_dropped++; adapter->stats.tx_dropped++;
...@@ -947,9 +945,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ ...@@ -947,9 +945,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter); INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
adapter->buffer_list_dma = NO_TCE; adapter->buffer_list_dma = DMA_ERROR_CODE;
adapter->filter_list_dma = NO_TCE; adapter->filter_list_dma = DMA_ERROR_CODE;
adapter->rx_queue.queue_dma = NO_TCE; adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
atomic_set(&adapter->not_replenishing, 1); atomic_set(&adapter->not_replenishing, 1);
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/bug.h> #include <asm/bug.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern int dma_supported(struct device *dev, u64 mask); extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size, extern void *dma_alloc_coherent(struct device *dev, size_t size,
...@@ -72,4 +74,9 @@ dma_cache_sync(void *vaddr, size_t size, ...@@ -72,4 +74,9 @@ dma_cache_sync(void *vaddr, size_t size,
BUG(); BUG();
} }
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);
}
#endif /* _ASM_DMA_MAPPING_H */ #endif /* _ASM_DMA_MAPPING_H */
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/dma-mapping.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -169,10 +170,9 @@ static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) ...@@ -169,10 +170,9 @@ static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
return 0; return 0;
} }
#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
static inline int pci_dma_mapping_error(dma_addr_t dma_addr) static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
{ {
return (dma_addr == PCI_DMA_ERROR_CODE); return dma_mapping_error(dma_addr);
} }
extern int pci_domain_nr(struct pci_bus *bus); extern int pci_domain_nr(struct pci_bus *bus);
......
...@@ -11,13 +11,14 @@ ...@@ -11,13 +11,14 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#ifndef _VIO_H #ifndef _ASM_VIO_H
#define _VIO_H #define _ASM_VIO_H
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
...@@ -137,4 +138,9 @@ static inline struct vio_dev *to_vio_dev(struct device *dev) ...@@ -137,4 +138,9 @@ static inline struct vio_dev *to_vio_dev(struct device *dev)
return container_of(dev, struct vio_dev, dev); return container_of(dev, struct vio_dev, dev);
} }
#endif /* _PHYP_H */ static inline int vio_dma_mapping_error(dma_addr_t dma_addr)
{
return dma_mapping_error(dma_addr);
}
#endif /* _ASM_VIO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment