Commit 585aa621 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Jakub Kicinski

net/tcp_sigpool: Use nested-BH locking for sigpool_scratch.

sigpool_scratch is a per-CPU variable and relies on disabled BH for its
locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
this data structure requires explicit locking.

Make a struct with a pad member (original sigpool_scratch) and a
local_lock_t and use local_lock_nested_bh() for locking. This change
adds only lockdep coverage and does not alter the functional behaviour
for !PREEMPT_RT.

Cc: David Ahern <dsahern@kernel.org>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-6-bigeasy@linutronix.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent bdacf3e3
...@@ -10,7 +10,14 @@ ...@@ -10,7 +10,14 @@
#include <net/tcp.h> #include <net/tcp.h>
static size_t __scratch_size; static size_t __scratch_size;
static DEFINE_PER_CPU(void __rcu *, sigpool_scratch); struct sigpool_scratch {
local_lock_t bh_lock;
void __rcu *pad;
};
static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = {
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
};
struct sigpool_entry { struct sigpool_entry {
struct crypto_ahash *hash; struct crypto_ahash *hash;
...@@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_t size) ...@@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_t size)
break; break;
} }
old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu), old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
scratch, lockdep_is_held(&cpool_mutex)); scratch, lockdep_is_held(&cpool_mutex));
if (!cpu_online(cpu) || !old_scratch) { if (!cpu_online(cpu) || !old_scratch) {
kfree(old_scratch); kfree(old_scratch);
...@@ -93,7 +100,7 @@ static void sigpool_scratch_free(void) ...@@ -93,7 +100,7 @@ static void sigpool_scratch_free(void)
int cpu; int cpu;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu), kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
NULL, lockdep_is_held(&cpool_mutex))); NULL, lockdep_is_held(&cpool_mutex)));
__scratch_size = 0; __scratch_size = 0;
} }
...@@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC ...@@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC
/* Pairs with tcp_sigpool_reserve_scratch(), scratch area is /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
* valid (allocated) until tcp_sigpool_end(). * valid (allocated) until tcp_sigpool_end().
*/ */
c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch)); local_lock_nested_bh(&sigpool_scratch.bh_lock);
c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad));
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(tcp_sigpool_start); EXPORT_SYMBOL_GPL(tcp_sigpool_start);
...@@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH) ...@@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH)
{ {
struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req); struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
local_unlock_nested_bh(&sigpool_scratch.bh_lock);
rcu_read_unlock_bh(); rcu_read_unlock_bh();
ahash_request_free(c->req); ahash_request_free(c->req);
crypto_free_ahash(hash); crypto_free_ahash(hash);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment