Commit a7ae7b0b authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: make softnet_data.dropped an atomic_t

If under extreme cpu backlog pressure enqueue_to_backlog() has
to drop a packet, it could do this without dirtying a cache line
and potentially slowing down the target cpu.

Move sd->dropped into a separate cache line, and make it atomic.

In non pressure mode, this field is not touched, no need to consume
valuable space in a hot cache line.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95e48d86
...@@ -3237,10 +3237,11 @@ struct softnet_data { ...@@ -3237,10 +3237,11 @@ struct softnet_data {
unsigned int input_queue_tail; unsigned int input_queue_tail;
#endif #endif
unsigned int received_rps; unsigned int received_rps;
unsigned int dropped;
struct sk_buff_head input_pkt_queue; struct sk_buff_head input_pkt_queue;
struct napi_struct backlog; struct napi_struct backlog;
atomic_t dropped ____cacheline_aligned_in_smp;
/* Another possibly contended cache line */ /* Another possibly contended cache line */
spinlock_t defer_lock ____cacheline_aligned_in_smp; spinlock_t defer_lock ____cacheline_aligned_in_smp;
int defer_count; int defer_count;
......
...@@ -4800,17 +4800,22 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, ...@@ -4800,17 +4800,22 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
struct softnet_data *sd; struct softnet_data *sd;
unsigned long flags; unsigned long flags;
unsigned int qlen; unsigned int qlen;
int max_backlog;
reason = SKB_DROP_REASON_DEV_READY; reason = SKB_DROP_REASON_DEV_READY;
if (!netif_running(skb->dev)) if (!netif_running(skb->dev))
goto bad_dev; goto bad_dev;
reason = SKB_DROP_REASON_CPU_BACKLOG;
sd = &per_cpu(softnet_data, cpu); sd = &per_cpu(softnet_data, cpu);
qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
max_backlog = READ_ONCE(net_hotdata.max_backlog);
if (unlikely(qlen > max_backlog))
goto cpu_backlog_drop;
backlog_lock_irq_save(sd, &flags); backlog_lock_irq_save(sd, &flags);
qlen = skb_queue_len(&sd->input_pkt_queue); qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= READ_ONCE(net_hotdata.max_backlog) && if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
!skb_flow_limit(skb, qlen)) {
if (qlen) { if (qlen) {
enqueue: enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb); __skb_queue_tail(&sd->input_pkt_queue, skb);
...@@ -4826,11 +4831,11 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, ...@@ -4826,11 +4831,11 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
napi_schedule_rps(sd); napi_schedule_rps(sd);
goto enqueue; goto enqueue;
} }
reason = SKB_DROP_REASON_CPU_BACKLOG;
sd->dropped++;
backlog_unlock_irq_restore(sd, &flags); backlog_unlock_irq_restore(sd, &flags);
cpu_backlog_drop:
atomic_inc(&sd->dropped);
bad_dev: bad_dev:
dev_core_stats_rx_dropped_inc(skb->dev); dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb_reason(skb, reason); kfree_skb_reason(skb, reason);
......
...@@ -144,7 +144,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v) ...@@ -144,7 +144,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, seq_printf(seq,
"%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x\n", "%08x %08x\n",
sd->processed, sd->dropped, sd->time_squeeze, 0, sd->processed, atomic_read(&sd->dropped),
sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */ 0, 0, 0, 0, /* was fastroute */
0, /* was cpu_collision */ 0, /* was cpu_collision */
sd->received_rps, flow_limit_count, sd->received_rps, flow_limit_count,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment