Commit f7b261bf authored by Esben Haabendal's avatar Esben Haabendal Committed by David S. Miller

net: ll_temac: Make RX/TX ring sizes configurable

Add support for setting the RX and TX ring sizes for this driver using
ethtool. Also increase the default RX ring size as the previous default
was far too low for good performance in some configurations.
Signed-off-by: default avatarEsben Haabendal <esben@geanix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7c462a0c
...@@ -369,8 +369,10 @@ struct temac_local { ...@@ -369,8 +369,10 @@ struct temac_local {
/* Buffer descriptors */ /* Buffer descriptors */
struct cdmac_bd *tx_bd_v; struct cdmac_bd *tx_bd_v;
dma_addr_t tx_bd_p; dma_addr_t tx_bd_p;
u32 tx_bd_num;
struct cdmac_bd *rx_bd_v; struct cdmac_bd *rx_bd_v;
dma_addr_t rx_bd_p; dma_addr_t rx_bd_p;
u32 rx_bd_num;
int tx_bd_ci; int tx_bd_ci;
int tx_bd_tail; int tx_bd_tail;
int rx_bd_ci; int rx_bd_ci;
......
...@@ -58,8 +58,11 @@ ...@@ -58,8 +58,11 @@
#include "ll_temac.h" #include "ll_temac.h"
#define TX_BD_NUM 64 /* Descriptors defines for Tx and Rx DMA */
#define RX_BD_NUM 128 #define TX_BD_NUM_DEFAULT 64
#define RX_BD_NUM_DEFAULT 1024
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
/* --------------------------------------------------------------------- /* ---------------------------------------------------------------------
* Low level register access functions * Low level register access functions
...@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev) ...@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
/* Reset Local Link (DMA) */ /* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
for (i = 0; i < RX_BD_NUM; i++) { for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i]) if (!lp->rx_skb[i])
break; break;
else { else {
...@@ -312,11 +315,11 @@ static void temac_dma_bd_release(struct net_device *ndev) ...@@ -312,11 +315,11 @@ static void temac_dma_bd_release(struct net_device *ndev)
} }
if (lp->rx_bd_v) if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent, dma_free_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v, lp->rx_bd_p); lp->rx_bd_v, lp->rx_bd_p);
if (lp->tx_bd_v) if (lp->tx_bd_v)
dma_free_coherent(ndev->dev.parent, dma_free_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v, lp->tx_bd_p); lp->tx_bd_v, lp->tx_bd_p);
} }
...@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev) ...@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev)
dma_addr_t skb_dma_addr; dma_addr_t skb_dma_addr;
int i; int i;
lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb), lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
GFP_KERNEL); sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb) if (!lp->rx_skb)
goto out; goto out;
/* allocate the tx and rx ring buffer descriptors. */ /* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */ /* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL); &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v) if (!lp->tx_bd_v)
goto out; goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL); &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v) if (!lp->rx_bd_v)
goto out; goto out;
for (i = 0; i < TX_BD_NUM; i++) { for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
+ sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM)); + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
} }
for (i = 0; i < RX_BD_NUM; i++) { for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM)); + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
skb = netdev_alloc_skb_ip_align(ndev, skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE); XTE_MAX_JUMBO_FRAME_SIZE);
...@@ -389,7 +392,7 @@ static int temac_dma_bd_init(struct net_device *ndev) ...@@ -389,7 +392,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->tx_bd_ci = 0; lp->tx_bd_ci = 0;
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0; lp->rx_bd_ci = 0;
lp->rx_bd_tail = RX_BD_NUM - 1; lp->rx_bd_tail = lp->rx_bd_num - 1;
/* Enable RX DMA transfers */ /* Enable RX DMA transfers */
wmb(); wmb();
...@@ -784,7 +787,7 @@ static void temac_start_xmit_done(struct net_device *ndev) ...@@ -784,7 +787,7 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++; lp->tx_bd_ci++;
if (lp->tx_bd_ci >= TX_BD_NUM) if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0; lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
...@@ -810,7 +813,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) ...@@ -810,7 +813,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
tail++; tail++;
if (tail >= TX_BD_NUM) if (tail >= lp->tx_bd_num)
tail = 0; tail = 0;
cur_p = &lp->tx_bd_v[tail]; cur_p = &lp->tx_bd_v[tail];
...@@ -874,7 +877,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -874,7 +877,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ptr_to_txbd((void *)skb, cur_p); ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) { for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= TX_BD_NUM) if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
...@@ -884,7 +887,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -884,7 +887,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0) if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = TX_BD_NUM - 1; lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) { while (--ii >= 0) {
--frag; --frag;
...@@ -893,7 +896,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -893,7 +896,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_size(frag), skb_frag_size(frag),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0) if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = TX_BD_NUM - 1; lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
} }
dma_unmap_single(ndev->dev.parent, dma_unmap_single(ndev->dev.parent,
...@@ -912,7 +915,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -912,7 +915,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++; lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM) if (lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
...@@ -932,7 +935,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp) ...@@ -932,7 +935,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp)
return 0; return 0;
available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
if (available <= 0) if (available <= 0)
available += RX_BD_NUM; available += lp->rx_bd_num;
return available; return available;
} }
...@@ -1001,7 +1004,7 @@ static void ll_temac_recv(struct net_device *ndev) ...@@ -1001,7 +1004,7 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_bytes += length; ndev->stats.rx_bytes += length;
rx_bd = lp->rx_bd_ci; rx_bd = lp->rx_bd_ci;
if (++lp->rx_bd_ci >= RX_BD_NUM) if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0; lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail); } while (rx_bd != lp->rx_bd_tail);
...@@ -1032,7 +1035,7 @@ static void ll_temac_recv(struct net_device *ndev) ...@@ -1032,7 +1035,7 @@ static void ll_temac_recv(struct net_device *ndev)
dma_addr_t skb_dma_addr; dma_addr_t skb_dma_addr;
rx_bd = lp->rx_bd_tail + 1; rx_bd = lp->rx_bd_tail + 1;
if (rx_bd >= RX_BD_NUM) if (rx_bd >= lp->rx_bd_num)
rx_bd = 0; rx_bd = 0;
bd = &lp->rx_bd_v[rx_bd]; bd = &lp->rx_bd_v[rx_bd];
...@@ -1248,13 +1251,52 @@ static const struct attribute_group temac_attr_group = { ...@@ -1248,13 +1251,52 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs, .attrs = temac_device_attrs,
}; };
/* ethtool support */ /* ---------------------------------------------------------------------
* ethtool support
*/
static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
struct temac_local *lp = netdev_priv(ndev);
ering->rx_max_pending = RX_BD_NUM_MAX;
ering->rx_mini_max_pending = 0;
ering->rx_jumbo_max_pending = 0;
ering->tx_max_pending = TX_BD_NUM_MAX;
ering->rx_pending = lp->rx_bd_num;
ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0;
ering->tx_pending = lp->tx_bd_num;
}
static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
struct temac_local *lp = netdev_priv(ndev);
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
ering->rx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
return -EBUSY;
lp->rx_bd_num = ering->rx_pending;
lp->tx_bd_num = ering->tx_pending;
return 0;
}
static const struct ethtool_ops temac_ethtool_ops = { static const struct ethtool_ops temac_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset, .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings, .get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = ll_temac_ethtools_get_ringparam,
.set_ringparam = ll_temac_ethtools_set_ringparam,
}; };
static int temac_probe(struct platform_device *pdev) static int temac_probe(struct platform_device *pdev)
...@@ -1298,6 +1340,8 @@ static int temac_probe(struct platform_device *pdev) ...@@ -1298,6 +1340,8 @@ static int temac_probe(struct platform_device *pdev)
lp->ndev = ndev; lp->ndev = ndev;
lp->dev = &pdev->dev; lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS; lp->options = XTE_OPTION_DEFAULTS;
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
spin_lock_init(&lp->rx_lock); spin_lock_init(&lp->rx_lock);
INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment