Commit 07fd5515 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

ftrace: Free hash with call_rcu_sched()

When a hash is modified and might be in use, we need to perform
a schedule RCU operation on it, as the hashes will soon be used
directly in the function tracer callback.

Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 2b499381
...@@ -913,6 +913,7 @@ struct ftrace_hash { ...@@ -913,6 +913,7 @@ struct ftrace_hash {
unsigned long size_bits; unsigned long size_bits;
struct hlist_head *buckets; struct hlist_head *buckets;
unsigned long count; unsigned long count;
struct rcu_head rcu;
}; };
/* /*
...@@ -1058,6 +1059,21 @@ static void free_ftrace_hash(struct ftrace_hash *hash) ...@@ -1058,6 +1059,21 @@ static void free_ftrace_hash(struct ftrace_hash *hash)
kfree(hash); kfree(hash);
} }
static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
{
struct ftrace_hash *hash;
hash = container_of(rcu, struct ftrace_hash, rcu);
free_ftrace_hash(hash);
}
static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
if (!hash || hash == EMPTY_HASH)
return;
call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
}
static struct ftrace_hash *alloc_ftrace_hash(int size_bits) static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{ {
struct ftrace_hash *hash; struct ftrace_hash *hash;
...@@ -1122,7 +1138,8 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) ...@@ -1122,7 +1138,8 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct hlist_node *tp, *tn; struct hlist_node *tp, *tn;
struct hlist_head *hhd; struct hlist_head *hhd;
struct ftrace_hash *hash = *dst; struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash;
unsigned long key; unsigned long key;
int size = src->count; int size = src->count;
int bits = 0; int bits = 0;
...@@ -1133,13 +1150,11 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) ...@@ -1133,13 +1150,11 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
* the empty_hash. * the empty_hash.
*/ */
if (!src->count) { if (!src->count) {
free_ftrace_hash(*dst); free_ftrace_hash_rcu(*dst);
*dst = EMPTY_HASH; rcu_assign_pointer(*dst, EMPTY_HASH);
return 0; return 0;
} }
ftrace_hash_clear(hash);
/* /*
* Make the hash size about 1/2 the # found * Make the hash size about 1/2 the # found
*/ */
...@@ -1150,27 +1165,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) ...@@ -1150,27 +1165,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
if (bits > FTRACE_HASH_MAX_BITS) if (bits > FTRACE_HASH_MAX_BITS)
bits = FTRACE_HASH_MAX_BITS; bits = FTRACE_HASH_MAX_BITS;
/* We can't modify the empty_hash */ new_hash = alloc_ftrace_hash(bits);
if (hash == EMPTY_HASH) { if (!new_hash)
/* Create a new hash */ return -ENOMEM;
*dst = alloc_ftrace_hash(bits);
if (!*dst) {
*dst = EMPTY_HASH;
return -ENOMEM;
}
hash = *dst;
} else {
size = 1 << bits;
/* Use the old hash, but create new buckets */
hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
if (!hhd)
return -ENOMEM;
kfree(hash->buckets);
hash->buckets = hhd;
hash->size_bits = bits;
}
size = 1 << src->size_bits; size = 1 << src->size_bits;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
...@@ -1181,10 +1178,14 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) ...@@ -1181,10 +1178,14 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
else else
key = 0; key = 0;
remove_hash_entry(src, entry); remove_hash_entry(src, entry);
__add_hash_entry(hash, entry); __add_hash_entry(new_hash, entry);
} }
} }
old_hash = *dst;
rcu_assign_pointer(*dst, new_hash);
free_ftrace_hash_rcu(old_hash);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment