Commit ecefbc09 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Jakub Kicinski

net: softnet_data: Make xmit per task.

Softirq is preemptible on PREEMPT_RT. Without a per-CPU lock in
local_bh_disable() there is no guarantee that only one device is
transmitting at a time.
With preemption and multiple senders it is possible that the per-CPU
`recursion' counter gets incremented by different threads and exceeds
XMIT_RECURSION_LIMIT leading to a false positive recursion alert.
The `more' member is subject to similar problems if set by one thread
for one driver and wrongly used by another driver within another thread.

Instead of adding a lock to protect the per-CPU variable it is simpler
to make xmit per-task. Sending and receiving skbs happens always
in thread context anyway.

Having a lock to protected the per-CPU counter would block/ serialize two
sending threads needlessly. It would also require a recursive lock to
ensure that the owner can increment the counter further.

Make the softnet_data.xmit a task_struct member on PREEMPT_RT. Add
needed wrapper.

Cc: Ben Segall <bsegall@google.com>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-9-bigeasy@linutronix.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c67ef53a
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/netdev_features.h> #include <linux/netdev_features.h>
#include <linux/neighbour.h> #include <linux/neighbour.h>
#include <linux/netdevice_xmit.h>
#include <uapi/linux/netdevice.h> #include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h> #include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h> #include <uapi/linux/pkt_cls.h>
...@@ -3223,13 +3224,7 @@ struct softnet_data { ...@@ -3223,13 +3224,7 @@ struct softnet_data {
struct sk_buff_head xfrm_backlog; struct sk_buff_head xfrm_backlog;
#endif #endif
/* written and read only by owning cpu: */ /* written and read only by owning cpu: */
struct { struct netdev_xmit xmit;
u16 recursion;
u8 more;
#ifdef CONFIG_NET_EGRESS
u8 skip_txqueue;
#endif
} xmit;
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct, /* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line. * and only read by other cpus. Worth using a cache line.
...@@ -3257,10 +3252,18 @@ struct softnet_data { ...@@ -3257,10 +3252,18 @@ struct softnet_data {
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void) static inline int dev_recursion_level(void)
{ {
return this_cpu_read(softnet_data.xmit.recursion); return this_cpu_read(softnet_data.xmit.recursion);
} }
#else
static inline int dev_recursion_level(void)
{
return current->net_xmit.recursion;
}
#endif
void __netif_schedule(struct Qdisc *q); void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq); void netif_schedule_queue(struct netdev_queue *txq);
...@@ -4872,18 +4875,35 @@ static inline ktime_t netdev_get_tstamp(struct net_device *dev, ...@@ -4872,18 +4875,35 @@ static inline ktime_t netdev_get_tstamp(struct net_device *dev,
return hwtstamps->hwtstamp; return hwtstamps->hwtstamp;
} }
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, #ifndef CONFIG_PREEMPT_RT
struct sk_buff *skb, struct net_device *dev, static inline void netdev_xmit_set_more(bool more)
bool more)
{ {
__this_cpu_write(softnet_data.xmit.more, more); __this_cpu_write(softnet_data.xmit.more, more);
return ops->ndo_start_xmit(skb, dev);
} }
static inline bool netdev_xmit_more(void) static inline bool netdev_xmit_more(void)
{ {
return __this_cpu_read(softnet_data.xmit.more); return __this_cpu_read(softnet_data.xmit.more);
} }
#else
static inline void netdev_xmit_set_more(bool more)
{
current->net_xmit.more = more;
}
static inline bool netdev_xmit_more(void)
{
return current->net_xmit.more;
}
#endif
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
bool more)
{
netdev_xmit_set_more(more);
return ops->ndo_start_xmit(skb, dev);
}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more) struct netdev_queue *txq, bool more)
......
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_NETDEVICE_XMIT_H
#define _LINUX_NETDEVICE_XMIT_H
struct netdev_xmit {
u16 recursion;
u8 more;
#ifdef CONFIG_NET_EGRESS
u8 skip_txqueue;
#endif
};
#endif
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/signal_types.h> #include <linux/signal_types.h>
#include <linux/syscall_user_dispatch_types.h> #include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h> #include <linux/mm_types_task.h>
#include <linux/netdevice_xmit.h>
#include <linux/task_io_accounting.h> #include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h> #include <linux/posix-timers_types.h>
#include <linux/restart_block.h> #include <linux/restart_block.h>
...@@ -975,7 +976,9 @@ struct task_struct { ...@@ -975,7 +976,9 @@ struct task_struct {
/* delay due to memory thrashing */ /* delay due to memory thrashing */
unsigned in_thrashing:1; unsigned in_thrashing:1;
#endif #endif
#ifdef CONFIG_PREEMPT_RT
struct netdev_xmit net_xmit;
#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */ unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block; struct restart_block restart_block;
......
...@@ -3940,6 +3940,7 @@ netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) ...@@ -3940,6 +3940,7 @@ netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
} }
#ifndef CONFIG_PREEMPT_RT
static bool netdev_xmit_txqueue_skipped(void) static bool netdev_xmit_txqueue_skipped(void)
{ {
return __this_cpu_read(softnet_data.xmit.skip_txqueue); return __this_cpu_read(softnet_data.xmit.skip_txqueue);
...@@ -3950,6 +3951,19 @@ void netdev_xmit_skip_txqueue(bool skip) ...@@ -3950,6 +3951,19 @@ void netdev_xmit_skip_txqueue(bool skip)
__this_cpu_write(softnet_data.xmit.skip_txqueue, skip); __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
} }
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
#else
static bool netdev_xmit_txqueue_skipped(void)
{
return current->net_xmit.skip_txqueue;
}
void netdev_xmit_skip_txqueue(bool skip)
{
current->net_xmit.skip_txqueue = skip;
}
EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
#endif
#endif /* CONFIG_NET_EGRESS */ #endif /* CONFIG_NET_EGRESS */
#ifdef CONFIG_NET_XGRESS #ifdef CONFIG_NET_XGRESS
......
...@@ -150,6 +150,8 @@ struct napi_struct *napi_by_id(unsigned int napi_id); ...@@ -150,6 +150,8 @@ struct napi_struct *napi_by_id(unsigned int napi_id);
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8 #define XMIT_RECURSION_LIMIT 8
#ifndef CONFIG_PREEMPT_RT
static inline bool dev_xmit_recursion(void) static inline bool dev_xmit_recursion(void)
{ {
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
...@@ -165,6 +167,22 @@ static inline void dev_xmit_recursion_dec(void) ...@@ -165,6 +167,22 @@ static inline void dev_xmit_recursion_dec(void)
{ {
__this_cpu_dec(softnet_data.xmit.recursion); __this_cpu_dec(softnet_data.xmit.recursion);
} }
#else
static inline bool dev_xmit_recursion(void)
{
return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
}
static inline void dev_xmit_recursion_inc(void)
{
current->net_xmit.recursion++;
}
static inline void dev_xmit_recursion_dec(void)
{
current->net_xmit.recursion--;
}
#endif
int dev_set_hwtstamp_phylib(struct net_device *dev, int dev_set_hwtstamp_phylib(struct net_device *dev,
struct kernel_hwtstamp_config *cfg, struct kernel_hwtstamp_config *cfg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment