Commit 00ba0215 authored by Ian Munsie's avatar Ian Munsie Committed by Greg Kroah-Hartman

cxl: Unmap MMIO regions when detaching a context

commit b123429e upstream.

If we need to force detach a context (e.g. due to EEH or simply force
unbinding the driver) we should prevent the userspace contexts from
being able to access the Problem State Area MMIO region further, which
they may have mapped with mmap().

This patch unmaps any mapped MMIO regions when detaching a userspace
context.
Signed-off-by: default avatarIan Munsie <imunsie@au1.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3db65dba
......@@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
/*
* Initialises a CXL context.
*/
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
struct address_space *mapping)
{
int i;
......@@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
ctx->afu = afu;
ctx->master = master;
ctx->pid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
ctx->mapping = mapping;
/*
* Allocate the segment table before we put it in the IDR so that we
......@@ -147,6 +150,12 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq);
/* Release Problem State Area mapping */
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
}
/*
......
......@@ -390,6 +390,10 @@ struct cxl_context {
phys_addr_t psn_phys;
u64 psn_size;
/* Used to unmap any mmaps when force detaching */
struct address_space *mapping;
struct mutex mapping_lock;
spinlock_t sste_lock; /* Protects segment table entries */
struct cxl_sste *sstp;
u64 sstp0, sstp1;
......@@ -592,7 +596,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void);
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
......
......@@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu;
}
if ((rc = cxl_context_init(ctx, afu, master)))
if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
goto err_put_afu;
pr_devel("afu_open pe: %i\n", ctx->pe);
......@@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
__func__, ctx->pe);
cxl_context_detach(ctx);
mutex_lock(&ctx->mapping_lock);
ctx->mapping = NULL;
mutex_unlock(&ctx->mapping_lock);
put_device(&ctx->afu->dev);
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment