Commit 788199b5 authored by Eric Dumazet's avatar Eric Dumazet Committed by Luis Henriques

tcp: md5: do not use alloc_percpu()

commit 349ce993 upstream.

percpu tcp_md5sig_pool contains memory blobs that ultimately
go through sg_set_buf().

-> sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));

This requires that whole area is in a physically contiguous portion
of memory. And that @buf is not backed by vmalloc().

Given that alloc_percpu() can use vmalloc() areas, this does not
fit the requirements.

Replace alloc_percpu() by a static DEFINE_PER_CPU() as tcp_md5sig_pool
is small anyway, there is no gain to dynamically allocate it.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Fixes: 765cf997 ("tcp: md5: remove one indirection level in tcp_md5sig_pool")
Reported-by: default avatarCrestez Dan Leonard <cdleonard@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent 766e24b4
...@@ -2967,61 +2967,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt); ...@@ -2967,61 +2967,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif #endif
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
static DEFINE_MUTEX(tcp_md5sig_mutex); static DEFINE_MUTEX(tcp_md5sig_mutex);
static bool tcp_md5sig_pool_populated = false;
static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
{
int cpu;
for_each_possible_cpu(cpu) {
struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
if (p->md5_desc.tfm)
crypto_free_hash(p->md5_desc.tfm);
}
free_percpu(pool);
}
static void __tcp_alloc_md5sig_pool(void) static void __tcp_alloc_md5sig_pool(void)
{ {
int cpu; int cpu;
struct tcp_md5sig_pool __percpu *pool;
pool = alloc_percpu(struct tcp_md5sig_pool);
if (!pool)
return;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct crypto_hash *hash; if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
struct crypto_hash *hash;
hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR_OR_NULL(hash))
goto out_free;
per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR_OR_NULL(hash))
return;
per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
}
} }
/* before setting tcp_md5sig_pool, we must commit all writes /* before setting tcp_md5sig_pool_populated, we must commit all writes
* to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() * to memory. See smp_rmb() in tcp_get_md5sig_pool()
*/ */
smp_wmb(); smp_wmb();
tcp_md5sig_pool = pool; tcp_md5sig_pool_populated = true;
return;
out_free:
__tcp_free_md5sig_pool(pool);
} }
bool tcp_alloc_md5sig_pool(void) bool tcp_alloc_md5sig_pool(void)
{ {
if (unlikely(!tcp_md5sig_pool)) { if (unlikely(!tcp_md5sig_pool_populated)) {
mutex_lock(&tcp_md5sig_mutex); mutex_lock(&tcp_md5sig_mutex);
if (!tcp_md5sig_pool) if (!tcp_md5sig_pool_populated)
__tcp_alloc_md5sig_pool(); __tcp_alloc_md5sig_pool();
mutex_unlock(&tcp_md5sig_mutex); mutex_unlock(&tcp_md5sig_mutex);
} }
return tcp_md5sig_pool != NULL; return tcp_md5sig_pool_populated;
} }
EXPORT_SYMBOL(tcp_alloc_md5sig_pool); EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
...@@ -3035,13 +3016,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); ...@@ -3035,13 +3016,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
*/ */
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{ {
struct tcp_md5sig_pool __percpu *p;
local_bh_disable(); local_bh_disable();
p = ACCESS_ONCE(tcp_md5sig_pool);
if (p)
return __this_cpu_ptr(p);
if (tcp_md5sig_pool_populated) {
/* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
smp_rmb();
return this_cpu_ptr(&tcp_md5sig_pool);
}
local_bh_enable(); local_bh_enable();
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment