Commit 8ec2cd48 authored by willy tarreau's avatar willy tarreau Committed by David S. Miller

net: mvneta: convert to build_skb()

Make use of build_skb() to allocate frags on the RX path. When frag size
is lower than a page size, we can use netdev_alloc_frag(), and we fall back
to kmalloc() for larger sizes. The frag size is stored into the mvneta_port
struct. The alloc/free functions check the frag size to decide what alloc/
free method to use. MTU changes are safe because the MTU change function
stops the device and clears the queues before applying the change.

With this patch, I observed a reproducible 2% performance improvement on
HTTP-based benchmarks, and 5% on small packet RX rate.

Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Cc: Gregory CLEMENT <gregory.clement@free-electrons.com>
Tested-by: default avatarArnaud Ebalard <arno@natisbad.org>
Signed-off-by: default avatarWilly Tarreau <w@1wt.eu>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 34e4179d
...@@ -268,6 +268,7 @@ struct mvneta_pcpu_stats { ...@@ -268,6 +268,7 @@ struct mvneta_pcpu_stats {
struct mvneta_port { struct mvneta_port {
int pkt_size; int pkt_size;
unsigned int frag_size;
void __iomem *base; void __iomem *base;
struct mvneta_rx_queue *rxqs; struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs; struct mvneta_tx_queue *txqs;
...@@ -1332,28 +1333,43 @@ static int mvneta_txq_done(struct mvneta_port *pp, ...@@ -1332,28 +1333,43 @@ static int mvneta_txq_done(struct mvneta_port *pp,
return tx_done; return tx_done;
} }
static void *mvneta_frag_alloc(const struct mvneta_port *pp)
{
if (likely(pp->frag_size <= PAGE_SIZE))
return netdev_alloc_frag(pp->frag_size);
else
return kmalloc(pp->frag_size, GFP_ATOMIC);
}
static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
{
if (likely(pp->frag_size <= PAGE_SIZE))
put_page(virt_to_head_page(data));
else
kfree(data);
}
/* Refill processing */ /* Refill processing */
static int mvneta_rx_refill(struct mvneta_port *pp, static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc) struct mvneta_rx_desc *rx_desc)
{ {
dma_addr_t phys_addr; dma_addr_t phys_addr;
struct sk_buff *skb; void *data;
skb = netdev_alloc_skb(pp->dev, pp->pkt_size); data = mvneta_frag_alloc(pp);
if (!skb) if (!data)
return -ENOMEM; return -ENOMEM;
phys_addr = dma_map_single(pp->dev->dev.parent, skb->head, phys_addr = dma_map_single(pp->dev->dev.parent, data,
MVNETA_RX_BUF_SIZE(pp->pkt_size), MVNETA_RX_BUF_SIZE(pp->pkt_size),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
dev_kfree_skb(skb); mvneta_frag_free(pp, data);
return -ENOMEM; return -ENOMEM;
} }
mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb); mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
return 0; return 0;
} }
...@@ -1407,9 +1423,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, ...@@ -1407,9 +1423,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
for (i = 0; i < rxq->size; i++) { for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i; struct mvneta_rx_desc *rx_desc = rxq->descs + i;
struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie; void *data = (void *)rx_desc->buf_cookie;
dev_kfree_skb_any(skb); mvneta_frag_free(pp, data);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
} }
...@@ -1440,20 +1456,21 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, ...@@ -1440,20 +1456,21 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
while (rx_done < rx_todo) { while (rx_done < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
struct sk_buff *skb; struct sk_buff *skb;
unsigned char *data;
u32 rx_status; u32 rx_status;
int rx_bytes, err; int rx_bytes, err;
rx_done++; rx_done++;
rx_filled++; rx_filled++;
rx_status = rx_desc->status; rx_status = rx_desc->status;
skb = (struct sk_buff *)rx_desc->buf_cookie; data = (unsigned char *)rx_desc->buf_cookie;
if (!mvneta_rxq_desc_is_first_last(rx_status) || if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) { (rx_status & MVNETA_RXD_ERR_SUMMARY) ||
!(skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size))) {
dev->stats.rx_errors++; dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc); mvneta_rx_error(pp, rx_desc);
mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr, /* leave the descriptor untouched */
(u32)skb);
continue; continue;
} }
...@@ -1466,7 +1483,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, ...@@ -1466,7 +1483,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rcvd_bytes += rx_bytes; rcvd_bytes += rx_bytes;
/* Linux processing */ /* Linux processing */
skb_reserve(skb, MVNETA_MH_SIZE); skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
skb_put(skb, rx_bytes); skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
...@@ -2276,6 +2293,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) ...@@ -2276,6 +2293,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_cleanup_rxqs(pp); mvneta_cleanup_rxqs(pp);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp); ret = mvneta_setup_rxqs(pp);
if (ret) { if (ret) {
...@@ -2423,6 +2442,8 @@ static int mvneta_open(struct net_device *dev) ...@@ -2423,6 +2442,8 @@ static int mvneta_open(struct net_device *dev)
mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp); ret = mvneta_setup_rxqs(pp);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment