Commit 588c7e5c authored by Heiner Kallweit's avatar Heiner Kallweit Committed by David S. Miller

r8169: make rtl_rx better readable

Avoid the goto from the rx error handling branch into the else branch,
and in general avoid having the main rx work in the else branch.
In addition ensure proper reverse xmas tree order of variables in the
for loop.

No functional change intended.
Signed-off-by: default avatarHeiner Kallweit <hkallweit1@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 35e43c39
...@@ -4413,15 +4413,17 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) ...@@ -4413,15 +4413,17 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{ {
unsigned int cur_rx, rx_left; unsigned int cur_rx, rx_left, count;
unsigned int count; struct device *d = tp_to_dev(tp);
cur_rx = tp->cur_rx; cur_rx = tp->cur_rx;
for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC; unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC;
const void *rx_buf = page_address(tp->Rx_databuff[entry]);
struct RxDesc *desc = tp->RxDescArray + entry; struct RxDesc *desc = tp->RxDescArray + entry;
struct sk_buff *skb;
const void *rx_buf;
dma_addr_t addr;
u32 status; u32 status;
status = le32_to_cpu(desc->opts1); status = le32_to_cpu(desc->opts1);
...@@ -4443,62 +4445,57 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget ...@@ -4443,62 +4445,57 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dev->stats.rx_length_errors++; dev->stats.rx_length_errors++;
if (status & RxCRC) if (status & RxCRC)
dev->stats.rx_crc_errors++; dev->stats.rx_crc_errors++;
if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
dev->features & NETIF_F_RXALL) {
goto process_pkt;
}
} else {
unsigned int pkt_size;
struct sk_buff *skb;
process_pkt:
pkt_size = status & GENMASK(13, 0);
if (likely(!(dev->features & NETIF_F_RXFCS)))
pkt_size -= ETH_FCS_LEN;
/*
* The driver does not support incoming fragmented
* frames. They are seen as a symptom of over-mtu
* sized frames.
*/
if (unlikely(rtl8169_fragmented_frame(status))) {
dev->stats.rx_dropped++;
dev->stats.rx_length_errors++;
goto release_descriptor;
}
skb = napi_alloc_skb(&tp->napi, pkt_size); if (!(dev->features & NETIF_F_RXALL))
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
goto release_descriptor; goto release_descriptor;
} else if (status & RxRWT || !(status & (RxRUNT | RxCRC)))
goto release_descriptor;
}
dma_sync_single_for_cpu(tp_to_dev(tp), pkt_size = status & GENMASK(13, 0);
le64_to_cpu(desc->addr), if (likely(!(dev->features & NETIF_F_RXFCS)))
pkt_size, DMA_FROM_DEVICE); pkt_size -= ETH_FCS_LEN;
prefetch(rx_buf);
skb_copy_to_linear_data(skb, rx_buf, pkt_size);
skb->tail += pkt_size;
skb->len = pkt_size;
dma_sync_single_for_device(tp_to_dev(tp), /* The driver does not support incoming fragmented frames.
le64_to_cpu(desc->addr), * They are seen as a symptom of over-mtu sized frames.
pkt_size, DMA_FROM_DEVICE); */
if (unlikely(rtl8169_fragmented_frame(status))) {
dev->stats.rx_dropped++;
dev->stats.rx_length_errors++;
goto release_descriptor;
}
rtl8169_rx_csum(skb, status); skb = napi_alloc_skb(&tp->napi, pkt_size);
skb->protocol = eth_type_trans(skb, dev); if (unlikely(!skb)) {
dev->stats.rx_dropped++;
goto release_descriptor;
}
rtl8169_rx_vlan_tag(desc, skb); addr = le64_to_cpu(desc->addr);
rx_buf = page_address(tp->Rx_databuff[entry]);
if (skb->pkt_type == PACKET_MULTICAST) dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
dev->stats.multicast++; prefetch(rx_buf);
skb_copy_to_linear_data(skb, rx_buf, pkt_size);
skb->tail += pkt_size;
skb->len = pkt_size;
dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
napi_gro_receive(&tp->napi, skb); rtl8169_rx_csum(skb, status);
skb->protocol = eth_type_trans(skb, dev);
rtl8169_rx_vlan_tag(desc, skb);
if (skb->pkt_type == PACKET_MULTICAST)
dev->stats.multicast++;
napi_gro_receive(&tp->napi, skb);
u64_stats_update_begin(&tp->rx_stats.syncp);
tp->rx_stats.packets++;
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
u64_stats_update_begin(&tp->rx_stats.syncp);
tp->rx_stats.packets++;
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
}
release_descriptor: release_descriptor:
rtl8169_mark_to_asic(desc); rtl8169_mark_to_asic(desc);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment