Commit 945350d1 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Linus Torvalds

[PATCH] Be careful about memory ordering in sungem driver

Some barriers between setting up the DMA regions and writing the
descriptor addresses would be most useful.

I had some in my 2.4 version but they got lost someway, probably me not
properly merging with davem at this point.  The 970 is definitely more
agressive at re-ordering stores than previous CPUs...

Here is a patch adding some (probably too much, but better safe than
sorry).
parent 18d1ca4f
...@@ -654,6 +654,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) ...@@ -654,6 +654,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
cluster_start = curr = (gp->rx_new & ~(4 - 1)); cluster_start = curr = (gp->rx_new & ~(4 - 1));
count = 0; count = 0;
kick = -1; kick = -1;
wmb();
while (curr != limit) { while (curr != limit) {
curr = NEXT_RX(curr); curr = NEXT_RX(curr);
if (++count == 4) { if (++count == 4) {
...@@ -670,8 +671,10 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) ...@@ -670,8 +671,10 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
count = 0; count = 0;
} }
} }
if (kick >= 0) if (kick >= 0) {
mb();
writel(kick, gp->regs + RXDMA_KICK); writel(kick, gp->regs + RXDMA_KICK);
}
} }
static void gem_rx(struct gem *gp) static void gem_rx(struct gem *gp)
...@@ -884,6 +887,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -884,6 +887,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (gem_intme(entry)) if (gem_intme(entry))
ctrl |= TXDCTRL_INTME; ctrl |= TXDCTRL_INTME;
txd->buffer = cpu_to_le64(mapping); txd->buffer = cpu_to_le64(mapping);
wmb();
txd->control_word = cpu_to_le64(ctrl); txd->control_word = cpu_to_le64(ctrl);
entry = NEXT_TX(entry); entry = NEXT_TX(entry);
} else { } else {
...@@ -923,6 +927,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -923,6 +927,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
txd = &gp->init_block->txd[entry]; txd = &gp->init_block->txd[entry];
txd->buffer = cpu_to_le64(mapping); txd->buffer = cpu_to_le64(mapping);
wmb();
txd->control_word = cpu_to_le64(this_ctrl | len); txd->control_word = cpu_to_le64(this_ctrl | len);
if (gem_intme(entry)) if (gem_intme(entry))
...@@ -932,6 +937,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -932,6 +937,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
txd = &gp->init_block->txd[first_entry]; txd = &gp->init_block->txd[first_entry];
txd->buffer = cpu_to_le64(first_mapping); txd->buffer = cpu_to_le64(first_mapping);
wmb();
txd->control_word = txd->control_word =
cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
} }
...@@ -943,6 +949,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -943,6 +949,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_msg_tx_queued(gp)) if (netif_msg_tx_queued(gp))
printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
dev->name, entry, skb->len); dev->name, entry, skb->len);
mb();
writel(gp->tx_new, gp->regs + TXDMA_KICK); writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
...@@ -1418,6 +1425,7 @@ static void gem_clean_rings(struct gem *gp) ...@@ -1418,6 +1425,7 @@ static void gem_clean_rings(struct gem *gp)
gp->rx_skbs[i] = NULL; gp->rx_skbs[i] = NULL;
} }
rxd->status_word = 0; rxd->status_word = 0;
wmb();
rxd->buffer = 0; rxd->buffer = 0;
} }
...@@ -1478,6 +1486,7 @@ static void gem_init_rings(struct gem *gp) ...@@ -1478,6 +1486,7 @@ static void gem_init_rings(struct gem *gp)
RX_BUF_ALLOC_SIZE(gp), RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
rxd->buffer = cpu_to_le64(dma_addr); rxd->buffer = cpu_to_le64(dma_addr);
wmb();
rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
skb_reserve(skb, RX_OFFSET); skb_reserve(skb, RX_OFFSET);
} }
...@@ -1486,8 +1495,10 @@ static void gem_init_rings(struct gem *gp) ...@@ -1486,8 +1495,10 @@ static void gem_init_rings(struct gem *gp)
struct gem_txd *txd = &gb->txd[i]; struct gem_txd *txd = &gb->txd[i];
txd->control_word = 0; txd->control_word = 0;
wmb();
txd->buffer = 0; txd->buffer = 0;
} }
wmb();
} }
/* Must be invoked under gp->lock. */ /* Must be invoked under gp->lock. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment