Commit 25b8e08e authored by Matthew R. Ochs's avatar Matthew R. Ochs Committed by Martin K. Petersen

scsi: cxlflash: Staging to support future accelerators

As staging to support future accelerator transports, add a shim layer
such that the underlying services the cxlflash driver requires can be
conditional upon the accelerator infrastructure.
Signed-off-by: default avatarMatthew R. Ochs <mrochs@linux.vnet.ibm.com>
Signed-off-by: default avatarUma Krishnan <ukrishn@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0df69c60
obj-$(CONFIG_CXLFLASH) += cxlflash.o obj-$(CONFIG_CXLFLASH) += cxlflash.o
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
/*
* CXL Flash Device Driver
*
* Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
* Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
*
* Copyright (C) 2018 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
struct cxlflash_backend_ops {
struct module *module;
void __iomem * (*psa_map)(void *);
void (*psa_unmap)(void __iomem *);
int (*process_element)(void *);
int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
void (*unmap_afu_irq)(void *, int, void *);
int (*start_context)(void *);
int (*stop_context)(void *);
int (*afu_reset)(void *);
void (*set_master)(void *);
void * (*get_context)(struct pci_dev *, void *);
void * (*dev_context_init)(struct pci_dev *, void *);
int (*release_context)(void *);
void (*perst_reloads_same_image)(void *, bool);
ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
int (*allocate_afu_irqs)(void *, int);
void (*free_afu_irqs)(void *);
void * (*create_afu)(struct pci_dev *);
struct file * (*get_fd)(void *, struct file_operations *, int *);
void * (*fops_get_context)(struct file *);
int (*start_work)(void *, u64);
int (*fd_mmap)(struct file *, struct vm_area_struct *);
int (*fd_release)(struct inode *, struct file *);
};
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include "backend.h"
extern const struct file_operations cxlflash_cxl_fops; extern const struct file_operations cxlflash_cxl_fops;
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */ #define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
...@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode { ...@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
struct cxlflash_cfg { struct cxlflash_cfg {
struct afu *afu; struct afu *afu;
const struct cxlflash_backend_ops *ops;
struct pci_dev *dev; struct pci_dev *dev;
struct pci_device_id *dev_id; struct pci_device_id *dev_id;
struct Scsi_Host *host; struct Scsi_Host *host;
......
/*
* CXL Flash Device Driver
*
* Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
* Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
*
* Copyright (C) 2018 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <misc/cxl.h>
#include "backend.h"
/*
* The following routines map the cxlflash backend operations to existing CXL
* kernel API function and are largely simple shims that provide an abstraction
* for converting generic context and AFU cookies into cxl_context or cxl_afu
* pointers.
*/
static void __iomem *cxlflash_psa_map(void *ctx_cookie)
{
return cxl_psa_map(ctx_cookie);
}
static void cxlflash_psa_unmap(void __iomem *addr)
{
cxl_psa_unmap(addr);
}
static int cxlflash_process_element(void *ctx_cookie)
{
return cxl_process_element(ctx_cookie);
}
static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
irq_handler_t handler, void *cookie, char *name)
{
return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
}
static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
{
cxl_unmap_afu_irq(ctx_cookie, num, cookie);
}
static int cxlflash_start_context(void *ctx_cookie)
{
return cxl_start_context(ctx_cookie, 0, NULL);
}
static int cxlflash_stop_context(void *ctx_cookie)
{
return cxl_stop_context(ctx_cookie);
}
static int cxlflash_afu_reset(void *ctx_cookie)
{
return cxl_afu_reset(ctx_cookie);
}
static void cxlflash_set_master(void *ctx_cookie)
{
cxl_set_master(ctx_cookie);
}
static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
{
return cxl_get_context(dev);
}
static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
{
return cxl_dev_context_init(dev);
}
static int cxlflash_release_context(void *ctx_cookie)
{
return cxl_release_context(ctx_cookie);
}
static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
{
cxl_perst_reloads_same_image(afu_cookie, image);
}
static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
void *buf, size_t count)
{
return cxl_read_adapter_vpd(dev, buf, count);
}
static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
{
return cxl_allocate_afu_irqs(ctx_cookie, num);
}
static void cxlflash_free_afu_irqs(void *ctx_cookie)
{
cxl_free_afu_irqs(ctx_cookie);
}
static void *cxlflash_create_afu(struct pci_dev *dev)
{
return cxl_pci_to_afu(dev);
}
static struct file *cxlflash_get_fd(void *ctx_cookie,
struct file_operations *fops, int *fd)
{
return cxl_get_fd(ctx_cookie, fops, fd);
}
static void *cxlflash_fops_get_context(struct file *file)
{
return cxl_fops_get_context(file);
}
static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
{
struct cxl_ioctl_start_work work = { 0 };
work.num_interrupts = irqs;
work.flags = CXL_START_WORK_NUM_IRQS;
return cxl_start_work(ctx_cookie, &work);
}
static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
{
return cxl_fd_mmap(file, vm);
}
static int cxlflash_fd_release(struct inode *inode, struct file *file)
{
return cxl_fd_release(inode, file);
}
const struct cxlflash_backend_ops cxlflash_cxl_ops = {
.module = THIS_MODULE,
.psa_map = cxlflash_psa_map,
.psa_unmap = cxlflash_psa_unmap,
.process_element = cxlflash_process_element,
.map_afu_irq = cxlflash_map_afu_irq,
.unmap_afu_irq = cxlflash_unmap_afu_irq,
.start_context = cxlflash_start_context,
.stop_context = cxlflash_stop_context,
.afu_reset = cxlflash_afu_reset,
.set_master = cxlflash_set_master,
.get_context = cxlflash_get_context,
.dev_context_init = cxlflash_dev_context_init,
.release_context = cxlflash_release_context,
.perst_reloads_same_image = cxlflash_perst_reloads_same_image,
.read_adapter_vpd = cxlflash_read_adapter_vpd,
.allocate_afu_irqs = cxlflash_allocate_afu_irqs,
.free_afu_irqs = cxlflash_free_afu_irqs,
.create_afu = cxlflash_create_afu,
.get_fd = cxlflash_get_fd,
.fops_get_context = cxlflash_fops_get_context,
.start_work = cxlflash_start_work,
.fd_mmap = cxlflash_fd_mmap,
.fd_release = cxlflash_fd_release,
};
...@@ -711,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg) ...@@ -711,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
} }
if (likely(afu->afu_map)) { if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map); cfg->ops->psa_unmap(afu->afu_map);
afu->afu_map = NULL; afu->afu_map = NULL;
} }
} }
...@@ -748,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, ...@@ -748,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
case UNMAP_THREE: case UNMAP_THREE:
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ) if (index == PRIMARY_HWQ)
cxl_unmap_afu_irq(hwq->ctx_cookie, 3, hwq); cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
case UNMAP_TWO: case UNMAP_TWO:
cxl_unmap_afu_irq(hwq->ctx_cookie, 2, hwq); cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
case UNMAP_ONE: case UNMAP_ONE:
cxl_unmap_afu_irq(hwq->ctx_cookie, 1, hwq); cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
case FREE_IRQ: case FREE_IRQ:
cxl_free_afu_irqs(hwq->ctx_cookie); cfg->ops->free_afu_irqs(hwq->ctx_cookie);
/* fall through */ /* fall through */
case UNDO_NOOP: case UNDO_NOOP:
/* No action required */ /* No action required */
...@@ -788,9 +788,9 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index) ...@@ -788,9 +788,9 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
return; return;
} }
WARN_ON(cxl_stop_context(hwq->ctx_cookie)); WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
if (index != PRIMARY_HWQ) if (index != PRIMARY_HWQ)
WARN_ON(cxl_release_context(hwq->ctx_cookie)); WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
hwq->ctx_cookie = NULL; hwq->ctx_cookie = NULL;
spin_lock_irqsave(&hwq->hsq_slock, lock_flags); spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
...@@ -1598,25 +1598,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) ...@@ -1598,25 +1598,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/**
* start_context() - starts the master context
* @cfg: Internal structure associated with the host.
* @index: Index of the hardware queue.
*
* Return: A success or failure value from CXL services.
*/
static int start_context(struct cxlflash_cfg *cfg, u32 index)
{
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
rc = cxl_start_context(hwq->ctx_cookie, 0, NULL);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
}
/** /**
* read_vpd() - obtains the WWPNs from VPD * read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host. * @cfg: Internal structure associated with the host.
...@@ -1639,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) ...@@ -1639,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */ /* Get the VPD data from the device */
vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) { if (unlikely(vpd_size <= 0)) {
dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size); __func__, vpd_size);
...@@ -1731,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg) ...@@ -1731,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
struct afu *afu = cfg->afu; struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map; struct sisl_ctrl_map __iomem *ctrl_map;
struct hwq *hwq; struct hwq *hwq;
void *cookie;
int i; int i;
for (i = 0; i < MAX_CONTEXT; i++) { for (i = 0; i < MAX_CONTEXT; i++) {
...@@ -1745,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg) ...@@ -1745,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Copy frequently used fields into hwq */ /* Copy frequently used fields into hwq */
for (i = 0; i < afu->num_hwqs; i++) { for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i); hwq = get_hwq(afu, i);
cookie = hwq->ctx_cookie;
hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx_cookie); hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
...@@ -1930,7 +1913,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, ...@@ -1930,7 +1913,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
int num_irqs = is_primary_hwq ? 3 : 2; int num_irqs = is_primary_hwq ? 3 : 2;
rc = cxl_allocate_afu_irqs(ctx, num_irqs); rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) { if (unlikely(rc)) {
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc); __func__, rc);
...@@ -1938,7 +1921,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, ...@@ -1938,7 +1921,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out; goto out;
} }
rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
"SISL_MSI_SYNC_ERROR"); "SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) { if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
...@@ -1946,7 +1929,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, ...@@ -1946,7 +1929,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out; goto out;
} }
rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
"SISL_MSI_RRQ_UPDATED"); "SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) { if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
...@@ -1958,7 +1941,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, ...@@ -1958,7 +1941,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
if (!is_primary_hwq) if (!is_primary_hwq)
goto out; goto out;
rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
"SISL_MSI_ASYNC_ERROR"); "SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) { if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
...@@ -1989,9 +1972,9 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index) ...@@ -1989,9 +1972,9 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
INIT_LIST_HEAD(&hwq->pending_cmds); INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ) if (index == PRIMARY_HWQ)
ctx = cxl_get_context(cfg->dev); ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
else else
ctx = cxl_dev_context_init(cfg->dev); ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) { if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM; rc = -ENOMEM;
goto err1; goto err1;
...@@ -2001,11 +1984,11 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index) ...@@ -2001,11 +1984,11 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
hwq->ctx_cookie = ctx; hwq->ctx_cookie = ctx;
/* Set it up as a master with the CXL */ /* Set it up as a master with the CXL */
cxl_set_master(ctx); cfg->ops->set_master(ctx);
/* Reset AFU when initializing primary context */ /* Reset AFU when initializing primary context */
if (index == PRIMARY_HWQ) { if (index == PRIMARY_HWQ) {
rc = cxl_afu_reset(ctx); rc = cfg->ops->afu_reset(ctx);
if (unlikely(rc)) { if (unlikely(rc)) {
dev_err(dev, "%s: AFU reset failed rc=%d\n", dev_err(dev, "%s: AFU reset failed rc=%d\n",
__func__, rc); __func__, rc);
...@@ -2019,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index) ...@@ -2019,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
goto err2; goto err2;
} }
/* This performs the equivalent of the CXL_IOCTL_START_WORK. /* Finally, activate the context by starting it */
* The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process rc = cfg->ops->start_context(hwq->ctx_cookie);
* element (pe) that is embedded in the context (ctx)
*/
rc = start_context(cfg, index);
if (unlikely(rc)) { if (unlikely(rc)) {
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
level = UNMAP_THREE; level = UNMAP_THREE;
...@@ -2036,7 +2016,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index) ...@@ -2036,7 +2016,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
err2: err2:
term_intr(cfg, level, index); term_intr(cfg, level, index);
if (index != PRIMARY_HWQ) if (index != PRIMARY_HWQ)
cxl_release_context(ctx); cfg->ops->release_context(ctx);
err1: err1:
hwq->ctx_cookie = NULL; hwq->ctx_cookie = NULL;
goto out; goto out;
...@@ -2093,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg) ...@@ -2093,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
struct hwq *hwq; struct hwq *hwq;
int i; int i;
cxl_perst_reloads_same_image(cfg->afu_cookie, true); cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
afu->num_hwqs = afu->desired_hwqs; afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) { for (i = 0; i < afu->num_hwqs; i++) {
...@@ -2107,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg) ...@@ -2107,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Map the entire MMIO space of the AFU using the first context */ /* Map the entire MMIO space of the AFU using the first context */
hwq = get_hwq(afu, PRIMARY_HWQ); hwq = get_hwq(afu, PRIMARY_HWQ);
afu->afu_map = cxl_psa_map(hwq->ctx_cookie); afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
if (!afu->afu_map) { if (!afu->afu_map) {
dev_err(dev, "%s: cxl_psa_map failed\n", __func__); dev_err(dev, "%s: psa_map failed\n", __func__);
rc = -ENOMEM; rc = -ENOMEM;
goto err1; goto err1;
} }
...@@ -3669,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev, ...@@ -3669,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE; cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev; cfg->dev = pdev;
cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops; cfg->cxl_fops = cxlflash_cxl_fops;
/* /*
...@@ -3700,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev, ...@@ -3700,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg); pci_set_drvdata(pdev, cfg);
cfg->afu_cookie = cxl_pci_to_afu(pdev); cfg->afu_cookie = cfg->ops->create_afu(pdev);
rc = init_pci(cfg); rc = init_pci(cfg);
if (rc) { if (rc) {
......
...@@ -978,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev, ...@@ -978,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
*/ */
static int cxlflash_cxl_release(struct inode *inode, struct file *file) static int cxlflash_cxl_release(struct inode *inode, struct file *file)
{ {
void *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops); cxl_fops);
void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev; struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL; struct ctx_info *ctxi = NULL;
struct dk_cxlflash_detach detach = { { 0 }, 0 }; struct dk_cxlflash_detach detach = { { 0 }, 0 };
...@@ -988,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file) ...@@ -988,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid; int ctxid;
ctxid = cxl_process_element(ctx); ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) { if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n", dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid); __func__, ctx, ctxid);
...@@ -1016,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file) ...@@ -1016,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
out_release: out_release:
cxl_fd_release(inode, file); cfg->ops->fd_release(inode, file);
out: out:
dev_dbg(dev, "%s: returning\n", __func__); dev_dbg(dev, "%s: returning\n", __func__);
return 0; return 0;
...@@ -1091,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf) ...@@ -1091,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file; struct file *file = vma->vm_file;
void *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops); cxl_fops);
void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev; struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL; struct ctx_info *ctxi = NULL;
struct page *err_page = NULL; struct page *err_page = NULL;
...@@ -1101,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf) ...@@ -1101,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
int rc = 0; int rc = 0;
int ctxid; int ctxid;
ctxid = cxl_process_element(ctx); ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) { if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n", dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid); __func__, ctx, ctxid);
...@@ -1164,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = { ...@@ -1164,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
*/ */
static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
{ {
void *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops); cxl_fops);
void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev; struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL; struct ctx_info *ctxi = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid; int ctxid;
int rc = 0; int rc = 0;
ctxid = cxl_process_element(ctx); ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) { if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n", dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid); __func__, ctx, ctxid);
...@@ -1190,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -1190,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid); dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
rc = cxl_fd_mmap(file, vma); rc = cfg->ops->fd_mmap(file, vma);
if (likely(!rc)) { if (likely(!rc)) {
/* Insert ourself in the mmap fault handler path */ /* Insert ourself in the mmap fault handler path */
ctxi->cxl_mmap_vmops = vma->vm_ops; ctxi->cxl_mmap_vmops = vma->vm_ops;
...@@ -1309,7 +1309,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, ...@@ -1309,7 +1309,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
struct afu *afu = cfg->afu; struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata; struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent; struct glun_info *gli = lli->parent;
struct cxl_ioctl_start_work work = { 0 };
struct ctx_info *ctxi = NULL; struct ctx_info *ctxi = NULL;
struct lun_access *lun_access = NULL; struct lun_access *lun_access = NULL;
int rc = 0; int rc = 0;
...@@ -1397,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, ...@@ -1397,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err; goto err;
} }
ctx = cxl_dev_context_init(cfg->dev); ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) { if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n", dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx); __func__, ctx);
...@@ -1405,24 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, ...@@ -1405,24 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err; goto err;
} }
work.num_interrupts = irqs; rc = cfg->ops->start_work(ctx, irqs);
work.flags = CXL_START_WORK_NUM_IRQS;
rc = cxl_start_work(ctx, &work);
if (unlikely(rc)) { if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n", dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc); __func__, rc);
goto err; goto err;
} }
ctxid = cxl_process_element(ctx); ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM; rc = -EPERM;
goto err; goto err;
} }
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) { if (unlikely(fd < 0)) {
rc = -ENODEV; rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__); dev_err(dev, "%s: Could not get file descriptor\n", __func__);
...@@ -1481,8 +1477,8 @@ static int cxlflash_disk_attach(struct scsi_device *sdev, ...@@ -1481,8 +1477,8 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
err: err:
/* Cleanup CXL context; okay to 'stop' even if it was not started */ /* Cleanup CXL context; okay to 'stop' even if it was not started */
if (!IS_ERR_OR_NULL(ctx)) { if (!IS_ERR_OR_NULL(ctx)) {
cxl_stop_context(ctx); cfg->ops->stop_context(ctx);
cxl_release_context(ctx); cfg->ops->release_context(ctx);
ctx = NULL; ctx = NULL;
} }
...@@ -1533,9 +1529,8 @@ static int recover_context(struct cxlflash_cfg *cfg, ...@@ -1533,9 +1529,8 @@ static int recover_context(struct cxlflash_cfg *cfg,
struct file *file; struct file *file;
void *ctx; void *ctx;
struct afu *afu = cfg->afu; struct afu *afu = cfg->afu;
struct cxl_ioctl_start_work work = { 0 };
ctx = cxl_dev_context_init(cfg->dev); ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) { if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n", dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx); __func__, ctx);
...@@ -1543,24 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg, ...@@ -1543,24 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
goto out; goto out;
} }
work.num_interrupts = ctxi->irqs; rc = cfg->ops->start_work(ctx, ctxi->irqs);
work.flags = CXL_START_WORK_NUM_IRQS;
rc = cxl_start_work(ctx, &work);
if (unlikely(rc)) { if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n", dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc); __func__, rc);
goto err1; goto err1;
} }
ctxid = cxl_process_element(ctx); ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM; rc = -EPERM;
goto err2; goto err2;
} }
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) { if (unlikely(fd < 0)) {
rc = -ENODEV; rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__); dev_err(dev, "%s: Could not get file descriptor\n", __func__);
...@@ -1607,9 +1599,9 @@ static int recover_context(struct cxlflash_cfg *cfg, ...@@ -1607,9 +1599,9 @@ static int recover_context(struct cxlflash_cfg *cfg,
fput(file); fput(file);
put_unused_fd(fd); put_unused_fd(fd);
err2: err2:
cxl_stop_context(ctx); cfg->ops->stop_context(ctx);
err1: err1:
cxl_release_context(ctx); cfg->ops->release_context(ctx);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment