Commit ee41d11d authored by Ian Munsie's avatar Ian Munsie Committed by Michael Ellerman

cxl: Change contexts_lock to a mutex to fix sleep while atomic bug

We had a known sleep while atomic bug if a CXL device was forcefully
unbound while it was in use. This could occur as a result of EEH, or
manually induced with something like this while the device was in use:

echo 0000:01:00.0 > /sys/bus/pci/drivers/cxl-pci/unbind

The issue was that in this code path we iterated over each context and
forcefully detached it with the contexts_lock spin lock held, however
the detach also needed to take the spu_mutex, and call schedule.

This patch changes the contexts_lock to a mutex so that we are not in
atomic context while doing the detach, thereby avoiding the sleep while
atomic.

Also delete the related TODO comment, which suggested an alternate
solution which turned out to not be workable.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarIan Munsie <imunsie@au1.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 7c5c92ed
...@@ -82,12 +82,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) ...@@ -82,12 +82,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
* Allocating IDR! We better make sure everything's setup that * Allocating IDR! We better make sure everything's setup that
* dereferences from it. * dereferences from it.
*/ */
mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
spin_lock(&afu->contexts_lock);
i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
ctx->afu->num_procs, GFP_NOWAIT); ctx->afu->num_procs, GFP_NOWAIT);
spin_unlock(&afu->contexts_lock);
idr_preload_end(); idr_preload_end();
mutex_unlock(&afu->contexts_lock);
if (i < 0) if (i < 0)
return i; return i;
...@@ -168,21 +168,22 @@ void cxl_context_detach_all(struct cxl_afu *afu) ...@@ -168,21 +168,22 @@ void cxl_context_detach_all(struct cxl_afu *afu)
struct cxl_context *ctx; struct cxl_context *ctx;
int tmp; int tmp;
rcu_read_lock(); mutex_lock(&afu->contexts_lock);
idr_for_each_entry(&afu->contexts_idr, ctx, tmp) idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
/* /*
* Anything done in here needs to be setup before the IDR is * Anything done in here needs to be setup before the IDR is
* created and torn down after the IDR removed * created and torn down after the IDR removed
*/ */
__detach_context(ctx); __detach_context(ctx);
rcu_read_unlock(); }
mutex_unlock(&afu->contexts_lock);
} }
void cxl_context_free(struct cxl_context *ctx) void cxl_context_free(struct cxl_context *ctx)
{ {
spin_lock(&ctx->afu->contexts_lock); mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe); idr_remove(&ctx->afu->contexts_idr, ctx->pe);
spin_unlock(&ctx->afu->contexts_lock); mutex_unlock(&ctx->afu->contexts_lock);
synchronize_rcu(); synchronize_rcu();
free_page((u64)ctx->sstp); free_page((u64)ctx->sstp);
......
...@@ -351,7 +351,7 @@ struct cxl_afu { ...@@ -351,7 +351,7 @@ struct cxl_afu {
struct device *chardev_s, *chardev_m, *chardev_d; struct device *chardev_s, *chardev_m, *chardev_d;
struct idr contexts_idr; struct idr contexts_idr;
struct dentry *debugfs; struct dentry *debugfs;
spinlock_t contexts_lock; struct mutex contexts_lock;
struct mutex spa_mutex; struct mutex spa_mutex;
spinlock_t afu_cntl_lock; spinlock_t afu_cntl_lock;
......
...@@ -610,13 +610,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx) ...@@ -610,13 +610,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx)
return 0; return 0;
} }
/*
* TODO: handle case when this is called inside a rcu_read_lock() which may
* happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
* & remove use a mutex lock and schedule which will not good with lock held.
* May need to write do_process_element_cmd() that handles outstanding page
* faults synchronously.
*/
static inline int detach_process_native_afu_directed(struct cxl_context *ctx) static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
{ {
if (!ctx->pe_inserted) if (!ctx->pe_inserted)
......
...@@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) ...@@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
afu->dev.release = cxl_release_afu; afu->dev.release = cxl_release_afu;
afu->slice = slice; afu->slice = slice;
idr_init(&afu->contexts_idr); idr_init(&afu->contexts_idr);
spin_lock_init(&afu->contexts_lock); mutex_init(&afu->contexts_lock);
spin_lock_init(&afu->afu_cntl_lock); spin_lock_init(&afu->afu_cntl_lock);
mutex_init(&afu->spa_mutex); mutex_init(&afu->spa_mutex);
......
...@@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device, ...@@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device,
int rc; int rc;
/* Not safe to reset if it is currently in use */ /* Not safe to reset if it is currently in use */
spin_lock(&afu->contexts_lock); mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr)) { if (!idr_is_empty(&afu->contexts_idr)) {
rc = -EBUSY; rc = -EBUSY;
goto err; goto err;
...@@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device, ...@@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device,
rc = count; rc = count;
err: err:
spin_unlock(&afu->contexts_lock); mutex_unlock(&afu->contexts_lock);
return rc; return rc;
} }
...@@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr, ...@@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
int rc = -EBUSY; int rc = -EBUSY;
/* can't change this if we have a user */ /* can't change this if we have a user */
spin_lock(&afu->contexts_lock); mutex_lock(&afu->contexts_lock);
if (!idr_is_empty(&afu->contexts_idr)) if (!idr_is_empty(&afu->contexts_idr))
goto err; goto err;
...@@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr, ...@@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
afu->current_mode = 0; afu->current_mode = 0;
afu->num_procs = 0; afu->num_procs = 0;
spin_unlock(&afu->contexts_lock); mutex_unlock(&afu->contexts_lock);
if ((rc = _cxl_afu_deactivate_mode(afu, old_mode))) if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
return rc; return rc;
...@@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr, ...@@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
return count; return count;
err: err:
spin_unlock(&afu->contexts_lock); mutex_unlock(&afu->contexts_lock);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment