Commit 026d1eaf authored by Clark Williams's avatar Clark Williams Committed by Linus Torvalds

mm/kasan/quarantine.c: make quarantine_lock a raw_spinlock_t

The static lock quarantine_lock is used in quarantine.c to protect the
quarantine queue datastructures.  It is taken inside quarantine queue
manipulation routines (quarantine_put(), quarantine_reduce() and
quarantine_remove_cache()), with IRQs disabled.  This is not a problem on
a stock kernel but is problematic on an RT kernel where spin locks are
sleeping spinlocks, which can sleep and can not be acquired with disabled
interrupts.

Convert the quarantine_lock to a raw spinlock_t.  The usage of
quarantine_lock is confined to quarantine.c and the work performed while
the lock is held is used for debug purpose.

[bigeasy@linutronix.de: slightly altered the commit message]
Link: http://lkml.kernel.org/r/20181010214945.5owshc3mlrh74z4b@linutronix.deSigned-off-by: default avatarClark Williams <williams@redhat.com>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: default avatarDmitry Vyukov <dvyukov@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent df06b37f
...@@ -103,7 +103,7 @@ static int quarantine_head; ...@@ -103,7 +103,7 @@ static int quarantine_head;
static int quarantine_tail; static int quarantine_tail;
/* Total size of all objects in global_quarantine across all batches. */ /* Total size of all objects in global_quarantine across all batches. */
static unsigned long quarantine_size; static unsigned long quarantine_size;
static DEFINE_SPINLOCK(quarantine_lock); static DEFINE_RAW_SPINLOCK(quarantine_lock);
DEFINE_STATIC_SRCU(remove_cache_srcu); DEFINE_STATIC_SRCU(remove_cache_srcu);
/* Maximum size of the global queue. */ /* Maximum size of the global queue. */
...@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) ...@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp); qlist_move_all(q, &temp);
spin_lock(&quarantine_lock); raw_spin_lock(&quarantine_lock);
WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
qlist_move_all(&temp, &global_quarantine[quarantine_tail]); qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
if (global_quarantine[quarantine_tail].bytes >= if (global_quarantine[quarantine_tail].bytes >=
...@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) ...@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
if (new_tail != quarantine_head) if (new_tail != quarantine_head)
quarantine_tail = new_tail; quarantine_tail = new_tail;
} }
spin_unlock(&quarantine_lock); raw_spin_unlock(&quarantine_lock);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -230,7 +230,7 @@ void quarantine_reduce(void) ...@@ -230,7 +230,7 @@ void quarantine_reduce(void)
* expected case). * expected case).
*/ */
srcu_idx = srcu_read_lock(&remove_cache_srcu); srcu_idx = srcu_read_lock(&remove_cache_srcu);
spin_lock_irqsave(&quarantine_lock, flags); raw_spin_lock_irqsave(&quarantine_lock, flags);
/* /*
* Update quarantine size in case of hotplug. Allocate a fraction of * Update quarantine size in case of hotplug. Allocate a fraction of
...@@ -254,7 +254,7 @@ void quarantine_reduce(void) ...@@ -254,7 +254,7 @@ void quarantine_reduce(void)
quarantine_head = 0; quarantine_head = 0;
} }
spin_unlock_irqrestore(&quarantine_lock, flags); raw_spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, NULL); qlist_free_all(&to_free, NULL);
srcu_read_unlock(&remove_cache_srcu, srcu_idx); srcu_read_unlock(&remove_cache_srcu, srcu_idx);
...@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache) ...@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache)
*/ */
on_each_cpu(per_cpu_remove_cache, cache, 1); on_each_cpu(per_cpu_remove_cache, cache, 1);
spin_lock_irqsave(&quarantine_lock, flags); raw_spin_lock_irqsave(&quarantine_lock, flags);
for (i = 0; i < QUARANTINE_BATCHES; i++) { for (i = 0; i < QUARANTINE_BATCHES; i++) {
if (qlist_empty(&global_quarantine[i])) if (qlist_empty(&global_quarantine[i]))
continue; continue;
qlist_move_cache(&global_quarantine[i], &to_free, cache); qlist_move_cache(&global_quarantine[i], &to_free, cache);
/* Scanning whole quarantine can take a while. */ /* Scanning whole quarantine can take a while. */
spin_unlock_irqrestore(&quarantine_lock, flags); raw_spin_unlock_irqrestore(&quarantine_lock, flags);
cond_resched(); cond_resched();
spin_lock_irqsave(&quarantine_lock, flags); raw_spin_lock_irqsave(&quarantine_lock, flags);
} }
spin_unlock_irqrestore(&quarantine_lock, flags); raw_spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, cache); qlist_free_all(&to_free, cache);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment