Commit 37056719 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

net: Track start of busy loop instead of when it should end

This patch flips the logic we were using to determine if the busy polling
has timed out.  The main motivation for this is that we will need to
support two different possible timeout values in the future and by
recording the start time rather than when we would want to end we can focus
on making the end_time specific to the task be it epoll or socket based
polling.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2b5cd0df
...@@ -409,7 +409,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) ...@@ -409,7 +409,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
int retval, i, timed_out = 0; int retval, i, timed_out = 0;
u64 slack = 0; u64 slack = 0;
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
unsigned long busy_end = 0; unsigned long busy_start = 0;
rcu_read_lock(); rcu_read_lock();
retval = max_select_fd(n, fds); retval = max_select_fd(n, fds);
...@@ -512,11 +512,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) ...@@ -512,11 +512,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
/* only if found POLL_BUSY_LOOP sockets && not out of time */ /* only if found POLL_BUSY_LOOP sockets && not out of time */
if (can_busy_loop && !need_resched()) { if (can_busy_loop && !need_resched()) {
if (!busy_end) { if (!busy_start) {
busy_end = busy_loop_end_time(); busy_start = busy_loop_current_time();
continue; continue;
} }
if (!busy_loop_timeout(busy_end)) if (!busy_loop_timeout(busy_start))
continue; continue;
} }
busy_flag = 0; busy_flag = 0;
...@@ -800,7 +800,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, ...@@ -800,7 +800,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
int timed_out = 0, count = 0; int timed_out = 0, count = 0;
u64 slack = 0; u64 slack = 0;
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
unsigned long busy_end = 0; unsigned long busy_start = 0;
/* Optimise the no-wait case */ /* Optimise the no-wait case */
if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
...@@ -853,11 +853,11 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, ...@@ -853,11 +853,11 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
/* only if found POLL_BUSY_LOOP sockets && not out of time */ /* only if found POLL_BUSY_LOOP sockets && not out of time */
if (can_busy_loop && !need_resched()) { if (can_busy_loop && !need_resched()) {
if (!busy_end) { if (!busy_start) {
busy_end = busy_loop_end_time(); busy_start = busy_loop_current_time();
continue; continue;
} }
if (!busy_loop_timeout(busy_end)) if (!busy_loop_timeout(busy_start))
continue; continue;
} }
busy_flag = 0; busy_flag = 0;
......
...@@ -46,62 +46,70 @@ static inline bool net_busy_loop_on(void) ...@@ -46,62 +46,70 @@ static inline bool net_busy_loop_on(void)
return sysctl_net_busy_poll; return sysctl_net_busy_poll;
} }
static inline u64 busy_loop_us_clock(void) static inline bool sk_can_busy_loop(const struct sock *sk)
{ {
return local_clock() >> 10; return sk->sk_ll_usec && !signal_pending(current);
} }
static inline unsigned long sk_busy_loop_end_time(struct sock *sk) void sk_busy_loop(struct sock *sk, int nonblock);
{
return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
}
/* in poll/select we use the global sysctl_net_ll_poll value */ #else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long busy_loop_end_time(void) static inline unsigned long net_busy_loop_on(void)
{ {
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll); return 0;
} }
static inline bool sk_can_busy_loop(const struct sock *sk) static inline bool sk_can_busy_loop(struct sock *sk)
{ {
return sk->sk_ll_usec && !signal_pending(current); return false;
} }
static inline bool busy_loop_timeout(unsigned long end_time) static inline void sk_busy_loop(struct sock *sk, int nonblock)
{ {
unsigned long now = busy_loop_us_clock();
return time_after(now, end_time);
} }
void sk_busy_loop(struct sock *sk, int nonblock); #endif /* CONFIG_NET_RX_BUSY_POLL */
#else /* CONFIG_NET_RX_BUSY_POLL */ static inline unsigned long busy_loop_current_time(void)
static inline unsigned long net_busy_loop_on(void)
{ {
#ifdef CONFIG_NET_RX_BUSY_POLL
return (unsigned long)(local_clock() >> 10);
#else
return 0; return 0;
#endif
} }
static inline unsigned long busy_loop_end_time(void) /* in poll/select we use the global sysctl_net_ll_poll value */
static inline bool busy_loop_timeout(unsigned long start_time)
{ {
return 0; #ifdef CONFIG_NET_RX_BUSY_POLL
} unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
static inline bool sk_can_busy_loop(struct sock *sk) if (bp_usec) {
{ unsigned long end_time = start_time + bp_usec;
return false; unsigned long now = busy_loop_current_time();
}
static inline bool busy_loop_timeout(unsigned long end_time) return time_after(now, end_time);
{ }
#endif
return true; return true;
} }
static inline void sk_busy_loop(struct sock *sk, int nonblock) static inline bool sk_busy_loop_timeout(struct sock *sk,
unsigned long start_time)
{ {
} #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
#endif /* CONFIG_NET_RX_BUSY_POLL */ if (bp_usec) {
unsigned long end_time = start_time + bp_usec;
unsigned long now = busy_loop_current_time();
return time_after(now, end_time);
}
#endif
return true;
}
/* used in the NIC receive handler to mark the skb */ /* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb, static inline void skb_mark_napi_id(struct sk_buff *skb,
......
...@@ -5062,7 +5062,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) ...@@ -5062,7 +5062,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
void sk_busy_loop(struct sock *sk, int nonblock) void sk_busy_loop(struct sock *sk, int nonblock)
{ {
unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; unsigned long start_time = nonblock ? 0 : busy_loop_current_time();
int (*napi_poll)(struct napi_struct *napi, int budget); int (*napi_poll)(struct napi_struct *napi, int budget);
void *have_poll_lock = NULL; void *have_poll_lock = NULL;
struct napi_struct *napi; struct napi_struct *napi;
...@@ -5111,7 +5111,7 @@ void sk_busy_loop(struct sock *sk, int nonblock) ...@@ -5111,7 +5111,7 @@ void sk_busy_loop(struct sock *sk, int nonblock)
local_bh_enable(); local_bh_enable();
if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) || if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
busy_loop_timeout(end_time)) sk_busy_loop_timeout(sk, start_time))
break; break;
if (unlikely(need_resched())) { if (unlikely(need_resched())) {
...@@ -5121,7 +5121,7 @@ void sk_busy_loop(struct sock *sk, int nonblock) ...@@ -5121,7 +5121,7 @@ void sk_busy_loop(struct sock *sk, int nonblock)
rcu_read_unlock(); rcu_read_unlock();
cond_resched(); cond_resched();
if (!skb_queue_empty(&sk->sk_receive_queue) || if (!skb_queue_empty(&sk->sk_receive_queue) ||
busy_loop_timeout(end_time)) sk_busy_loop_timeout(sk, start_time))
return; return;
goto restart; goto restart;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment