Commit 5e46631f authored by Rasesh Mody's avatar Rasesh Mody Committed by David S. Miller

bna: Code Cleanup and Enhancements

Change details:
 -      Remove unnecessary prefetch
 -      Simplify checking & comparison of CQ flags
 -      Dereference & store unmap_array, unmap_cons & current unmap_array
        element only once
 -      Make structures tx_config & rx_config cache line aligned.
Signed-off-by: default avatarRasesh Mody <rmody@brocade.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f9c4d420
...@@ -210,7 +210,6 @@ bnad_txcmpl_process(struct bnad *bnad, ...@@ -210,7 +210,6 @@ bnad_txcmpl_process(struct bnad *bnad,
unmap_array = unmap_q->unmap_array; unmap_array = unmap_q->unmap_array;
unmap_cons = unmap_q->consumer_index; unmap_cons = unmap_q->consumer_index;
prefetch(&unmap_array[unmap_cons + 1]);
while (wis) { while (wis) {
skb = unmap_array[unmap_cons].skb; skb = unmap_array[unmap_cons].skb;
...@@ -383,6 +382,20 @@ bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) ...@@ -383,6 +382,20 @@ bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
} }
} }
#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
BNA_CQ_EF_L4_CKSUM_OK)
#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
#define flags_udp6 (BNA_CQ_EF_IPV6 | \
BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
static u32 static u32
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
{ {
...@@ -390,15 +403,12 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) ...@@ -390,15 +403,12 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bna_rcb *rcb = NULL; struct bna_rcb *rcb = NULL;
unsigned int wi_range, packets = 0, wis = 0; unsigned int wi_range, packets = 0, wis = 0;
struct bnad_unmap_q *unmap_q; struct bnad_unmap_q *unmap_q;
struct bnad_skb_unmap *unmap_array; struct bnad_skb_unmap *unmap_array, *curr_ua;
struct sk_buff *skb; struct sk_buff *skb;
u32 flags, unmap_cons; u32 flags, unmap_cons, masked_flags;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
return 0;
prefetch(bnad->netdev); prefetch(bnad->netdev);
BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
wi_range); wi_range);
...@@ -416,12 +426,13 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) ...@@ -416,12 +426,13 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
unmap_array = unmap_q->unmap_array; unmap_array = unmap_q->unmap_array;
unmap_cons = unmap_q->consumer_index; unmap_cons = unmap_q->consumer_index;
skb = unmap_array[unmap_cons].skb; curr_ua = &unmap_array[unmap_cons];
skb = curr_ua->skb;
BUG_ON(!(skb)); BUG_ON(!(skb));
unmap_array[unmap_cons].skb = NULL; curr_ua->skb = NULL;
dma_unmap_single(&bnad->pcidev->dev, dma_unmap_single(&bnad->pcidev->dev,
dma_unmap_addr(&unmap_array[unmap_cons], dma_unmap_addr(curr_ua, dma_addr),
dma_addr),
rcb->rxq->buffer_size, rcb->rxq->buffer_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
...@@ -452,13 +463,15 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) ...@@ -452,13 +463,15 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
} }
skb_put(skb, ntohs(cmpl->length)); skb_put(skb, ntohs(cmpl->length));
masked_flags = flags & flags_cksum_prot_mask;
if (likely if (likely
((bnad->netdev->features & NETIF_F_RXCSUM) && ((bnad->netdev->features & NETIF_F_RXCSUM) &&
(((flags & BNA_CQ_EF_IPV4) && ((masked_flags == flags_tcp4) ||
(flags & BNA_CQ_EF_L3_CKSUM_OK)) || (masked_flags == flags_udp4) ||
(flags & BNA_CQ_EF_IPV6)) && (masked_flags == flags_tcp6) ||
(flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && (masked_flags == flags_udp6))))
(flags & BNA_CQ_EF_L4_CKSUM_OK)))
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else else
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
......
...@@ -284,8 +284,8 @@ struct bnad { ...@@ -284,8 +284,8 @@ struct bnad {
u8 tx_coalescing_timeo; u8 tx_coalescing_timeo;
u8 rx_coalescing_timeo; u8 rx_coalescing_timeo;
struct bna_rx_config rx_config[BNAD_MAX_RX]; struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned;
struct bna_tx_config tx_config[BNAD_MAX_TX]; struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned;
void __iomem *bar0; /* BAR0 address */ void __iomem *bar0; /* BAR0 address */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment