Commit cb5dc5b0 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf: safeguard hashtab locking in NMI context'

Song Liu says:

====================
LOCKDEP NMI warning highlighted potential deadlock of hashtab in NMI
context:

[   74.828971] ================================
[   74.828972] WARNING: inconsistent lock state
[   74.828973] 5.9.0-rc8+ #275 Not tainted
[   74.828974] --------------------------------
[   74.828975] inconsistent {INITIAL USE} -> {IN-NMI} usage.
[   74.828976] taskset/1174 [HC2[2]:SC0[0]:HE0:SE1] takes:
[...]
[   74.828999]  Possible unsafe locking scenario:
[   74.828999]
[   74.829000]        CPU0
[   74.829001]        ----
[   74.829001]   lock(&htab->buckets[i].raw_lock);
[   74.829003]   <Interrupt>
[   74.829004]     lock(&htab->buckets[i].raw_lock);

Please refer to patch 1/2 for full trace.

This warning is a false alert, as "INITIAL USE" and "IN-NMI" in the tests
are from different hashtab. On the other hand, in theory, it is possible
to deadlock when a hashtab is access from both non-NMI and NMI context.
Patch 1/2 fixes this false alert by assigning separate lockdep class to
each hashtab. Patch 2/2 introduces map_locked counters, which is similar to
bpf_prog_active counter, to avoid hashtab deadlock in NMI context.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents cf83b2d2 20b6cc34
...@@ -86,6 +86,9 @@ struct bucket { ...@@ -86,6 +86,9 @@ struct bucket {
}; };
}; };
#define HASHTAB_MAP_LOCK_COUNT 8
#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
struct bpf_htab { struct bpf_htab {
struct bpf_map map; struct bpf_map map;
struct bucket *buckets; struct bucket *buckets;
...@@ -99,6 +102,8 @@ struct bpf_htab { ...@@ -99,6 +102,8 @@ struct bpf_htab {
u32 n_buckets; /* number of hash buckets */ u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */ u32 elem_size; /* size of each element in bytes */
u32 hashrnd; u32 hashrnd;
struct lock_class_key lockdep_key;
int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
}; };
/* each htab element is struct htab_elem + key + value */ /* each htab element is struct htab_elem + key + value */
...@@ -136,35 +141,56 @@ static void htab_init_buckets(struct bpf_htab *htab) ...@@ -136,35 +141,56 @@ static void htab_init_buckets(struct bpf_htab *htab)
{ {
unsigned i; unsigned i;
lockdep_register_key(&htab->lockdep_key);
for (i = 0; i < htab->n_buckets; i++) { for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
if (htab_use_raw_lock(htab)) if (htab_use_raw_lock(htab)) {
raw_spin_lock_init(&htab->buckets[i].raw_lock); raw_spin_lock_init(&htab->buckets[i].raw_lock);
else lockdep_set_class(&htab->buckets[i].raw_lock,
&htab->lockdep_key);
} else {
spin_lock_init(&htab->buckets[i].lock); spin_lock_init(&htab->buckets[i].lock);
lockdep_set_class(&htab->buckets[i].lock,
&htab->lockdep_key);
}
} }
} }
static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab, static inline int htab_lock_bucket(const struct bpf_htab *htab,
struct bucket *b) struct bucket *b, u32 hash,
unsigned long *pflags)
{ {
unsigned long flags; unsigned long flags;
hash = hash & HASHTAB_MAP_LOCK_MASK;
migrate_disable();
if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
__this_cpu_dec(*(htab->map_locked[hash]));
migrate_enable();
return -EBUSY;
}
if (htab_use_raw_lock(htab)) if (htab_use_raw_lock(htab))
raw_spin_lock_irqsave(&b->raw_lock, flags); raw_spin_lock_irqsave(&b->raw_lock, flags);
else else
spin_lock_irqsave(&b->lock, flags); spin_lock_irqsave(&b->lock, flags);
return flags; *pflags = flags;
return 0;
} }
static inline void htab_unlock_bucket(const struct bpf_htab *htab, static inline void htab_unlock_bucket(const struct bpf_htab *htab,
struct bucket *b, struct bucket *b, u32 hash,
unsigned long flags) unsigned long flags)
{ {
hash = hash & HASHTAB_MAP_LOCK_MASK;
if (htab_use_raw_lock(htab)) if (htab_use_raw_lock(htab))
raw_spin_unlock_irqrestore(&b->raw_lock, flags); raw_spin_unlock_irqrestore(&b->raw_lock, flags);
else else
spin_unlock_irqrestore(&b->lock, flags); spin_unlock_irqrestore(&b->lock, flags);
__this_cpu_dec(*(htab->map_locked[hash]));
migrate_enable();
} }
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
...@@ -422,8 +448,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -422,8 +448,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab; struct bpf_htab *htab;
int err, i;
u64 cost; u64 cost;
int err;
htab = kzalloc(sizeof(*htab), GFP_USER); htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab) if (!htab)
...@@ -480,6 +506,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -480,6 +506,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab->buckets) if (!htab->buckets)
goto free_charge; goto free_charge;
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
htab->map_locked[i] = __alloc_percpu_gfp(sizeof(int),
sizeof(int), GFP_USER);
if (!htab->map_locked[i])
goto free_map_locked;
}
if (htab->map.map_flags & BPF_F_ZERO_SEED) if (htab->map.map_flags & BPF_F_ZERO_SEED)
htab->hashrnd = 0; htab->hashrnd = 0;
else else
...@@ -490,7 +523,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -490,7 +523,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (prealloc) { if (prealloc) {
err = prealloc_init(htab); err = prealloc_init(htab);
if (err) if (err)
goto free_buckets; goto free_map_locked;
if (!percpu && !lru) { if (!percpu && !lru) {
/* lru itself can remove the least used element, so /* lru itself can remove the least used element, so
...@@ -506,7 +539,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -506,7 +539,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
free_prealloc: free_prealloc:
prealloc_destroy(htab); prealloc_destroy(htab);
free_buckets: free_map_locked:
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]);
bpf_map_area_free(htab->buckets); bpf_map_area_free(htab->buckets);
free_charge: free_charge:
bpf_map_charge_finish(&htab->map.memory); bpf_map_charge_finish(&htab->map.memory);
...@@ -687,12 +722,15 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) ...@@ -687,12 +722,15 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
unsigned long flags; unsigned long flags;
struct bucket *b; struct bucket *b;
int ret;
tgt_l = container_of(node, struct htab_elem, lru_node); tgt_l = container_of(node, struct htab_elem, lru_node);
b = __select_bucket(htab, tgt_l->hash); b = __select_bucket(htab, tgt_l->hash);
head = &b->head; head = &b->head;
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
if (ret)
return false;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) { if (l == tgt_l) {
...@@ -700,7 +738,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) ...@@ -700,7 +738,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
break; break;
} }
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, tgt_l->hash, flags);
return l == tgt_l; return l == tgt_l;
} }
...@@ -972,7 +1010,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -972,7 +1010,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
*/ */
} }
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
return ret;
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -1013,7 +1053,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -1013,7 +1053,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
} }
ret = 0; ret = 0;
err: err:
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, hash, flags);
return ret; return ret;
} }
...@@ -1051,7 +1091,9 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -1051,7 +1091,9 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
return -ENOMEM; return -ENOMEM;
memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
return ret;
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -1070,7 +1112,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -1070,7 +1112,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
ret = 0; ret = 0;
err: err:
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, hash, flags);
if (ret) if (ret)
bpf_lru_push_free(&htab->lru, &l_new->lru_node); bpf_lru_push_free(&htab->lru, &l_new->lru_node);
...@@ -1105,7 +1147,9 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1105,7 +1147,9 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
return ret;
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -1128,7 +1172,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1128,7 +1172,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
} }
ret = 0; ret = 0;
err: err:
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, hash, flags);
return ret; return ret;
} }
...@@ -1168,7 +1212,9 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1168,7 +1212,9 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
return -ENOMEM; return -ENOMEM;
} }
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
return ret;
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -1190,7 +1236,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1190,7 +1236,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
} }
ret = 0; ret = 0;
err: err:
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, hash, flags);
if (l_new) if (l_new)
bpf_lru_push_free(&htab->lru, &l_new->lru_node); bpf_lru_push_free(&htab->lru, &l_new->lru_node);
return ret; return ret;
...@@ -1218,7 +1264,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1218,7 +1264,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
struct htab_elem *l; struct htab_elem *l;
unsigned long flags; unsigned long flags;
u32 hash, key_size; u32 hash, key_size;
int ret = -ENOENT; int ret;
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
...@@ -1228,17 +1274,20 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1228,17 +1274,20 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
return ret;
l = lookup_elem_raw(head, hash, key, key_size); l = lookup_elem_raw(head, hash, key, key_size);
if (l) { if (l) {
hlist_nulls_del_rcu(&l->hash_node); hlist_nulls_del_rcu(&l->hash_node);
free_htab_elem(htab, l); free_htab_elem(htab, l);
ret = 0; } else {
ret = -ENOENT;
} }
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, hash, flags);
return ret; return ret;
} }
...@@ -1250,7 +1299,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1250,7 +1299,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
struct htab_elem *l; struct htab_elem *l;
unsigned long flags; unsigned long flags;
u32 hash, key_size; u32 hash, key_size;
int ret = -ENOENT; int ret;
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
...@@ -1260,16 +1309,18 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1260,16 +1309,18 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
return ret;
l = lookup_elem_raw(head, hash, key, key_size); l = lookup_elem_raw(head, hash, key, key_size);
if (l) { if (l)
hlist_nulls_del_rcu(&l->hash_node); hlist_nulls_del_rcu(&l->hash_node);
ret = 0; else
} ret = -ENOENT;
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, hash, flags);
if (l) if (l)
bpf_lru_push_free(&htab->lru, &l->lru_node); bpf_lru_push_free(&htab->lru, &l->lru_node);
return ret; return ret;
...@@ -1295,6 +1346,7 @@ static void delete_all_elements(struct bpf_htab *htab) ...@@ -1295,6 +1346,7 @@ static void delete_all_elements(struct bpf_htab *htab)
static void htab_map_free(struct bpf_map *map) static void htab_map_free(struct bpf_map *map)
{ {
struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
int i;
/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback. /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
* bpf_free_used_maps() is called after bpf prog is no longer executing. * bpf_free_used_maps() is called after bpf prog is no longer executing.
...@@ -1312,6 +1364,9 @@ static void htab_map_free(struct bpf_map *map) ...@@ -1312,6 +1364,9 @@ static void htab_map_free(struct bpf_map *map)
free_percpu(htab->extra_elems); free_percpu(htab->extra_elems);
bpf_map_area_free(htab->buckets); bpf_map_area_free(htab->buckets);
lockdep_unregister_key(&htab->lockdep_key);
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]);
kfree(htab); kfree(htab);
} }
...@@ -1415,8 +1470,11 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1415,8 +1470,11 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
b = &htab->buckets[batch]; b = &htab->buckets[batch];
head = &b->head; head = &b->head;
/* do not grab the lock unless need it (bucket_cnt > 0). */ /* do not grab the lock unless need it (bucket_cnt > 0). */
if (locked) if (locked) {
flags = htab_lock_bucket(htab, b); ret = htab_lock_bucket(htab, b, batch, &flags);
if (ret)
goto next_batch;
}
bucket_cnt = 0; bucket_cnt = 0;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
...@@ -1433,7 +1491,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1433,7 +1491,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
/* Note that since bucket_cnt > 0 here, it is implicit /* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it. * that the locked was grabbed, so release it.
*/ */
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, batch, flags);
rcu_read_unlock(); rcu_read_unlock();
bpf_enable_instrumentation(); bpf_enable_instrumentation();
goto after_loop; goto after_loop;
...@@ -1444,7 +1502,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1444,7 +1502,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
/* Note that since bucket_cnt > 0 here, it is implicit /* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it. * that the locked was grabbed, so release it.
*/ */
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, batch, flags);
rcu_read_unlock(); rcu_read_unlock();
bpf_enable_instrumentation(); bpf_enable_instrumentation();
kvfree(keys); kvfree(keys);
...@@ -1497,7 +1555,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1497,7 +1555,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
dst_val += value_size; dst_val += value_size;
} }
htab_unlock_bucket(htab, b, flags); htab_unlock_bucket(htab, b, batch, flags);
locked = false; locked = false;
while (node_to_free) { while (node_to_free) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment