Commit 266439c9 authored by Tushar Dave's avatar Tushar Dave Committed by David S. Miller

sunqe: Fix compiler warnings

sunqe uses '__u32' for dma handle while invoking kernel DMA APIs,
instead of using dma_addr_t. This hasn't caused any 'incompatible
pointer type' warning on SPARC because until now dma_addr_t is of
type u32. However, recent changes in SPARC ATU (iommu) enables 64bit
DMA and therefore dma_addr_t becomes of type u64. This makes
'incompatible pointer type' warnings inevitable.

e.g.
drivers/net/ethernet/sun/sunqe.c: In function ‘qec_ether_init’:
drivers/net/ethernet/sun/sunqe.c:883: warning: passing argument 3 of ‘dma_alloc_coherent’ from incompatible pointer type
./include/linux/dma-mapping.h:445: note: expected ‘dma_addr_t *’ but argument is of type ‘__u32 *’
drivers/net/ethernet/sun/sunqe.c:885: warning: passing argument 3 of ‘dma_alloc_coherent’ from incompatible pointer type
./include/linux/dma-mapping.h:445: note: expected ‘dma_addr_t *’ but argument is of type ‘__u32 *’

This patch resolves above compiler warnings.
Signed-off-by: default avatarTushar Dave <tushar.n.dave@oracle.com>
Reviewed-by: default avatarchris hyser <chris.hyser@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 49cc0c43
...@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep) ...@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep)
{ {
struct qe_init_block *qb = qep->qe_block; struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers; struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma; __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int i; int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
...@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq) ...@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
void __iomem *mregs = qep->mregs; void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs; void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0]; unsigned char *e = &qep->dev->dev_addr[0];
__u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp; u32 tmp;
int i; int i;
...@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq) ...@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq)
return -EAGAIN; return -EAGAIN;
/* Setup initial rx/tx init block pointers. */ /* Setup initial rx/tx init block pointers. */
sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */ /* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(0, cregs + CREG_RIMASK);
...@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep) ...@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
struct net_device *dev = qep->dev; struct net_device *dev = qep->dev;
struct qe_rxd *this; struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers; struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma; __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int elem = qep->rx_new; int elem = qep->rx_new;
u32 flags; u32 flags;
...@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct sunqe *qep = netdev_priv(dev); struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers; struct sunqe_buffers *qbufs = qep->buffers;
__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
unsigned char *txbuf; unsigned char *txbuf;
int len, entry; int len, entry;
......
...@@ -334,12 +334,12 @@ struct sunqe { ...@@ -334,12 +334,12 @@ struct sunqe {
void __iomem *qcregs; /* QEC per-channel Registers */ void __iomem *qcregs; /* QEC per-channel Registers */
void __iomem *mregs; /* Per-channel MACE Registers */ void __iomem *mregs; /* Per-channel MACE Registers */
struct qe_init_block *qe_block; /* RX and TX descriptors */ struct qe_init_block *qe_block; /* RX and TX descriptors */
__u32 qblock_dvma; /* RX and TX descriptors */ dma_addr_t qblock_dvma; /* RX and TX descriptors */
spinlock_t lock; /* Protects txfull state */ spinlock_t lock; /* Protects txfull state */
int rx_new, rx_old; /* RX ring extents */ int rx_new, rx_old; /* RX ring extents */
int tx_new, tx_old; /* TX ring extents */ int tx_new, tx_old; /* TX ring extents */
struct sunqe_buffers *buffers; /* CPU visible address. */ struct sunqe_buffers *buffers; /* CPU visible address. */
__u32 buffers_dvma; /* DVMA visible address. */ dma_addr_t buffers_dvma; /* DVMA visible address. */
struct sunqec *parent; struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */ u8 mconfig; /* Base MACE mconfig value */
struct platform_device *op; /* QE's OF device struct */ struct platform_device *op; /* QE's OF device struct */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment