Commit c7b57274 authored by Thomas Bogendoerfer's avatar Thomas Bogendoerfer Committed by David S. Miller

net: sgi: ioc3-eth: allocate space for desc rings only once

Memory for descriptor rings are allocated/freed, when interface is
brought up/down. Since the size of the rings is not changeable by
hardware, we now allocate rings now during probe and free it, when
device is removed.
Signed-off-by: default avatarThomas Bogendoerfer <tbogendoerfer@suse.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 141a7dbb
...@@ -800,28 +800,17 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) ...@@ -800,28 +800,17 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
static void ioc3_free_rings(struct ioc3_private *ip) static void ioc3_free_rings(struct ioc3_private *ip)
{ {
struct sk_buff *skb;
int rx_entry, n_entry; int rx_entry, n_entry;
if (ip->txr) { ioc3_clean_tx_ring(ip);
ioc3_clean_tx_ring(ip);
free_pages((unsigned long)ip->txr, 2);
ip->txr = NULL;
}
if (ip->rxr) { n_entry = ip->rx_ci;
n_entry = ip->rx_ci; rx_entry = ip->rx_pi;
rx_entry = ip->rx_pi;
while (n_entry != rx_entry) { while (n_entry != rx_entry) {
skb = ip->rx_skbs[n_entry]; dev_kfree_skb_any(ip->rx_skbs[n_entry]);
if (skb)
dev_kfree_skb_any(skb);
n_entry = (n_entry + 1) & RX_RING_MASK; n_entry = (n_entry + 1) & RX_RING_MASK;
}
free_page((unsigned long)ip->rxr);
ip->rxr = NULL;
} }
} }
...@@ -829,49 +818,34 @@ static void ioc3_alloc_rings(struct net_device *dev) ...@@ -829,49 +818,34 @@ static void ioc3_alloc_rings(struct net_device *dev)
{ {
struct ioc3_private *ip = netdev_priv(dev); struct ioc3_private *ip = netdev_priv(dev);
struct ioc3_erxbuf *rxb; struct ioc3_erxbuf *rxb;
unsigned long *rxr;
int i; int i;
if (!ip->rxr) { /* Now the rx buffers. The RX ring may be larger but
/* Allocate and initialize rx ring. 4kb = 512 entries */ * we only allocate 16 buffers for now. Need to tune
ip->rxr = (unsigned long *)get_zeroed_page(GFP_ATOMIC); * this for performance and memory later.
rxr = ip->rxr; */
if (!rxr) for (i = 0; i < RX_BUFFS; i++) {
pr_err("%s: get_zeroed_page() failed!\n", __func__); struct sk_buff *skb;
/* Now the rx buffers. The RX ring may be larger but
* we only allocate 16 buffers for now. Need to tune
* this for performance and memory later.
*/
for (i = 0; i < RX_BUFFS; i++) {
struct sk_buff *skb;
skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) { if (!skb) {
show_free_areas(0, NULL); show_free_areas(0, NULL);
continue; continue;
} }
ip->rx_skbs[i] = skb; ip->rx_skbs[i] = skb;
/* Because we reserve afterwards. */ /* Because we reserve afterwards. */
skb_put(skb, (1664 + RX_OFFSET)); skb_put(skb, (1664 + RX_OFFSET));
rxb = (struct ioc3_erxbuf *)skb->data; rxb = (struct ioc3_erxbuf *)skb->data;
rxr[i] = cpu_to_be64(ioc3_map(rxb, 1)); ip->rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
skb_reserve(skb, RX_OFFSET); skb_reserve(skb, RX_OFFSET);
}
ip->rx_ci = 0;
ip->rx_pi = RX_BUFFS;
} }
ip->rx_ci = 0;
ip->rx_pi = RX_BUFFS;
if (!ip->txr) { ip->tx_pi = 0;
/* Allocate and initialize tx rings. 16kb = 128 bufs. */ ip->tx_ci = 0;
ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
if (!ip->txr)
pr_err("%s: __get_free_pages() failed!\n", __func__);
ip->tx_pi = 0;
ip->tx_ci = 0;
}
} }
static void ioc3_init_rings(struct net_device *dev) static void ioc3_init_rings(struct net_device *dev)
...@@ -1239,6 +1213,23 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1239,6 +1213,23 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&ip->ioc3_timer, ioc3_timer, 0); timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
ioc3_stop(ip); ioc3_stop(ip);
/* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
ip->rxr = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!ip->rxr) {
pr_err("ioc3-eth: rx ring allocation failed\n");
err = -ENOMEM;
goto out_stop;
}
/* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
if (!ip->txr) {
pr_err("ioc3-eth: tx ring allocation failed\n");
err = -ENOMEM;
goto out_stop;
}
ioc3_init(dev); ioc3_init(dev);
ip->pdev = pdev; ip->pdev = pdev;
...@@ -1293,6 +1284,11 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1293,6 +1284,11 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_stop(ip); ioc3_stop(ip);
del_timer_sync(&ip->ioc3_timer); del_timer_sync(&ip->ioc3_timer);
ioc3_free_rings(ip); ioc3_free_rings(ip);
if (ip->rxr)
free_page((unsigned long)ip->rxr);
if (ip->txr)
free_pages((unsigned long)ip->txr, 2);
kfree(ip->txr);
out_res: out_res:
pci_release_regions(pdev); pci_release_regions(pdev);
out_free: out_free:
...@@ -1310,6 +1306,9 @@ static void ioc3_remove_one(struct pci_dev *pdev) ...@@ -1310,6 +1306,9 @@ static void ioc3_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev); struct ioc3_private *ip = netdev_priv(dev);
free_page((unsigned long)ip->rxr);
free_pages((unsigned long)ip->txr, 2);
unregister_netdev(dev); unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer); del_timer_sync(&ip->ioc3_timer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment