Commit a6cd9570 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot: use read/write lock

Currently, stack depot uses the following locking scheme:

1. Lock-free accesses when looking up a stack record, which allows to
   have multiple users to look up records in parallel;
2. Spinlock for protecting the stack depot pools and the hash table
   when adding a new record.

For implementing the eviction of stack traces from stack depot, the
  lock-free approach is not going to work anymore, as we will need to be
  able to also remove records from the hash table.

Convert the spinlock into a read/write lock, and drop the atomic
  accesses, as they are no longer required.

Looking up stack traces is now protected by the read lock and adding new
  records - by the write lock.  One of the following patches will add a
  new function for evicting stack records, which will be protected by the
  write lock as well.

With this change, multiple users can still look up records in parallel.

This is preparatory patch for implementing the eviction of stack records
  from the stack depot.

Link: https://lkml.kernel.org/r/9f81ffcc4bb422ebb6326a65a770bf1918634cbb.1700502145.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b29d3188
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -91,15 +92,15 @@ static void *new_pool; ...@@ -91,15 +92,15 @@ static void *new_pool;
static int pools_num; static int pools_num;
/* Next stack in the freelist of stack records within stack_pools. */ /* Next stack in the freelist of stack records within stack_pools. */
static struct stack_record *next_stack; static struct stack_record *next_stack;
/* Lock that protects the variables above. */
static DEFINE_RAW_SPINLOCK(pool_lock);
/* /*
* Stack depot tries to keep an extra pool allocated even before it runs out * Stack depot tries to keep an extra pool allocated even before it runs out
* of space in the currently used pool. This flag marks whether this extra pool * of space in the currently used pool. This flag marks whether this extra pool
* needs to be allocated. It has the value 0 when either an extra pool is not * needs to be allocated. It has the value 0 when either an extra pool is not
* yet allocated or if the limit on the number of pools is reached. * yet allocated or if the limit on the number of pools is reached.
*/ */
static int new_pool_required = 1; static bool new_pool_required = true;
/* Lock that protects the variables above. */
static DEFINE_RWLOCK(pool_rwlock);
static int __init disable_stack_depot(char *str) static int __init disable_stack_depot(char *str)
{ {
...@@ -232,6 +233,8 @@ static void depot_init_pool(void *pool) ...@@ -232,6 +233,8 @@ static void depot_init_pool(void *pool)
const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE; const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE;
int i, offset; int i, offset;
lockdep_assert_held_write(&pool_rwlock);
/* Initialize handles and link stack records to each other. */ /* Initialize handles and link stack records to each other. */
for (i = 0, offset = 0; for (i = 0, offset = 0;
offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
...@@ -254,22 +257,17 @@ static void depot_init_pool(void *pool) ...@@ -254,22 +257,17 @@ static void depot_init_pool(void *pool)
/* Save reference to the pool to be used by depot_fetch_stack(). */ /* Save reference to the pool to be used by depot_fetch_stack(). */
stack_pools[pools_num] = pool; stack_pools[pools_num] = pool;
pools_num++;
/*
* WRITE_ONCE() pairs with potential concurrent read in
* depot_fetch_stack().
*/
WRITE_ONCE(pools_num, pools_num + 1);
} }
/* Keeps the preallocated memory to be used for a new stack depot pool. */ /* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc) static void depot_keep_new_pool(void **prealloc)
{ {
lockdep_assert_held_write(&pool_rwlock);
/* /*
* If a new pool is already saved or the maximum number of * If a new pool is already saved or the maximum number of
* pools is reached, do not use the preallocated memory. * pools is reached, do not use the preallocated memory.
* Access new_pool_required non-atomically, as there are no concurrent
* write accesses to this variable.
*/ */
if (!new_pool_required) if (!new_pool_required)
return; return;
...@@ -287,15 +285,15 @@ static void depot_keep_new_pool(void **prealloc) ...@@ -287,15 +285,15 @@ static void depot_keep_new_pool(void **prealloc)
* At this point, either a new pool is kept or the maximum * At this point, either a new pool is kept or the maximum
* number of pools is reached. In either case, take note that * number of pools is reached. In either case, take note that
* keeping another pool is not required. * keeping another pool is not required.
* smp_store_release() pairs with smp_load_acquire() in
* stack_depot_save().
*/ */
smp_store_release(&new_pool_required, 0); new_pool_required = false;
} }
/* Updates references to the current and the next stack depot pools. */ /* Updates references to the current and the next stack depot pools. */
static bool depot_update_pools(void **prealloc) static bool depot_update_pools(void **prealloc)
{ {
lockdep_assert_held_write(&pool_rwlock);
/* Check if we still have objects in the freelist. */ /* Check if we still have objects in the freelist. */
if (next_stack) if (next_stack)
goto out_keep_prealloc; goto out_keep_prealloc;
...@@ -307,7 +305,7 @@ static bool depot_update_pools(void **prealloc) ...@@ -307,7 +305,7 @@ static bool depot_update_pools(void **prealloc)
/* Take note that we might need a new new_pool. */ /* Take note that we might need a new new_pool. */
if (pools_num < DEPOT_MAX_POOLS) if (pools_num < DEPOT_MAX_POOLS)
smp_store_release(&new_pool_required, 1); new_pool_required = true;
/* Try keeping the preallocated memory for new_pool. */ /* Try keeping the preallocated memory for new_pool. */
goto out_keep_prealloc; goto out_keep_prealloc;
...@@ -341,6 +339,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) ...@@ -341,6 +339,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{ {
struct stack_record *stack; struct stack_record *stack;
lockdep_assert_held_write(&pool_rwlock);
/* Update current and new pools if required and possible. */ /* Update current and new pools if required and possible. */
if (!depot_update_pools(prealloc)) if (!depot_update_pools(prealloc))
return NULL; return NULL;
...@@ -376,18 +376,15 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) ...@@ -376,18 +376,15 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
{ {
union handle_parts parts = { .handle = handle }; union handle_parts parts = { .handle = handle };
/*
* READ_ONCE() pairs with potential concurrent write in
* depot_init_pool().
*/
int pools_num_cached = READ_ONCE(pools_num);
void *pool; void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN; size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack; struct stack_record *stack;
if (parts.pool_index > pools_num_cached) { lockdep_assert_held_read(&pool_rwlock);
if (parts.pool_index > pools_num) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
parts.pool_index, pools_num_cached, handle); parts.pool_index, pools_num, handle);
return NULL; return NULL;
} }
...@@ -429,6 +426,8 @@ static inline struct stack_record *find_stack(struct stack_record *bucket, ...@@ -429,6 +426,8 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
{ {
struct stack_record *found; struct stack_record *found;
lockdep_assert_held(&pool_rwlock);
for (found = bucket; found; found = found->next) { for (found = bucket; found; found = found->next) {
if (found->hash == hash && if (found->hash == hash &&
found->size == size && found->size == size &&
...@@ -446,6 +445,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -446,6 +445,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
depot_stack_handle_t handle = 0; depot_stack_handle_t handle = 0;
struct page *page = NULL; struct page *page = NULL;
void *prealloc = NULL; void *prealloc = NULL;
bool need_alloc = false;
unsigned long flags; unsigned long flags;
u32 hash; u32 hash;
...@@ -465,22 +465,26 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -465,22 +465,26 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
hash = hash_stack(entries, nr_entries); hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask]; bucket = &stack_table[hash & stack_hash_mask];
/* read_lock_irqsave(&pool_rwlock, flags);
* Fast path: look the stack trace up without locking.
* smp_load_acquire() pairs with smp_store_release() to |bucket| below. /* Fast path: look the stack trace up without full locking. */
*/ found = find_stack(*bucket, entries, nr_entries, hash);
found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); if (found) {
if (found) read_unlock_irqrestore(&pool_rwlock, flags);
goto exit; goto exit;
}
/* Take note if another stack pool needs to be allocated. */
if (new_pool_required)
need_alloc = true;
read_unlock_irqrestore(&pool_rwlock, flags);
/* /*
* Check if another stack pool needs to be allocated. If so, allocate * Allocate memory for a new pool if required now:
* the memory now: we won't be able to do that under the lock. * we won't be able to do that under the lock.
*
* smp_load_acquire() pairs with smp_store_release() in
* depot_update_pools() and depot_keep_new_pool().
*/ */
if (unlikely(can_alloc && smp_load_acquire(&new_pool_required))) { if (unlikely(can_alloc && need_alloc)) {
/* /*
* Zero out zone modifiers, as we don't have specific zone * Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic * requirements. Keep the flags related to allocation in atomic
...@@ -494,7 +498,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -494,7 +498,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
prealloc = page_address(page); prealloc = page_address(page);
} }
raw_spin_lock_irqsave(&pool_lock, flags); write_lock_irqsave(&pool_rwlock, flags);
found = find_stack(*bucket, entries, nr_entries, hash); found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) { if (!found) {
...@@ -503,11 +507,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -503,11 +507,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
if (new) { if (new) {
new->next = *bucket; new->next = *bucket;
/* *bucket = new;
* smp_store_release() pairs with smp_load_acquire()
* from |bucket| above.
*/
smp_store_release(bucket, new);
found = new; found = new;
} }
} else if (prealloc) { } else if (prealloc) {
...@@ -518,7 +518,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -518,7 +518,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
depot_keep_new_pool(&prealloc); depot_keep_new_pool(&prealloc);
} }
raw_spin_unlock_irqrestore(&pool_lock, flags); write_unlock_irqrestore(&pool_rwlock, flags);
exit: exit:
if (prealloc) { if (prealloc) {
/* Stack depot didn't use this memory, free it. */ /* Stack depot didn't use this memory, free it. */
...@@ -542,6 +542,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, ...@@ -542,6 +542,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries) unsigned long **entries)
{ {
struct stack_record *stack; struct stack_record *stack;
unsigned long flags;
*entries = NULL; *entries = NULL;
/* /*
...@@ -553,8 +554,12 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, ...@@ -553,8 +554,12 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
if (!handle || stack_depot_disabled) if (!handle || stack_depot_disabled)
return 0; return 0;
read_lock_irqsave(&pool_rwlock, flags);
stack = depot_fetch_stack(handle); stack = depot_fetch_stack(handle);
read_unlock_irqrestore(&pool_rwlock, flags);
*entries = stack->entries; *entries = stack->entries;
return stack->size; return stack->size;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment