Commit cbce0917 authored by Ian Munsie's avatar Ian Munsie Committed by Michael Ellerman

cxl: Add preliminary workaround for CX4 interrupt limitation

The Mellanox CX4 has a hardware limitation where only 4 bits of the
AFU interrupt number can be passed to the XSL when sending an interrupt,
limiting it to only 15 interrupts per context (AFU interrupt number 0 is
invalid).

In order to overcome this, we will allocate additional contexts linked
to the default context as extra address space for the extra interrupts -
this will be implemented in the next patch.

This patch adds the preliminary support to allow this, by way of adding
a linked list in the context structure that we use to keep track of the
contexts dedicated to interrupts, and an API to simultaneously iterate
over the related context structures, AFU interrupt numbers and hardware
interrupt numbers. The point of using a single API to iterate these is
to hide some of the details of the iteration from external code, and to
reduce the number of APIs that need to be exported via base.c to allow
built in code to call.
Signed-off-by: default avatarIan Munsie <imunsie@au1.ibm.com>
Reviewed-by: default avatarFrederic Barrat <fbarrat@linux.vnet.ibm.com>
Reviewed-by: default avatarAndrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 79384e4b
...@@ -97,6 +97,21 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) ...@@ -97,6 +97,21 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0; return 0;
} }
int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
{
if (*ctx == NULL || *afu_irq == 0) {
*afu_irq = 1;
*ctx = cxl_get_context(pdev);
} else {
(*afu_irq)++;
if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
*ctx = list_next_entry(*ctx, extra_irq_contexts);
*afu_irq = 1;
}
}
return cxl_find_afu_irq(*ctx, *afu_irq);
}
/* Exported via cxl_base */
int cxl_set_priv(struct cxl_context *ctx, void *priv) int cxl_set_priv(struct cxl_context *ctx, void *priv)
{ {
......
...@@ -141,6 +141,23 @@ void cxl_pci_disable_device(struct pci_dev *dev) ...@@ -141,6 +141,23 @@ void cxl_pci_disable_device(struct pci_dev *dev)
} }
EXPORT_SYMBOL_GPL(cxl_pci_disable_device); EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
{
int ret;
struct cxl_calls *calls;
calls = cxl_calls_get();
if (!calls)
return -EBUSY;
ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
cxl_calls_put(calls);
return ret;
}
EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
static int __init cxl_base_init(void) static int __init cxl_base_init(void)
{ {
struct device_node *np; struct device_node *np;
......
...@@ -68,6 +68,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ...@@ -68,6 +68,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->pending_afu_err = false; ctx->pending_afu_err = false;
INIT_LIST_HEAD(&ctx->irq_names); INIT_LIST_HEAD(&ctx->irq_names);
INIT_LIST_HEAD(&ctx->extra_irq_contexts);
/* /*
* When we have to destroy all contexts in cxl_context_detach_all() we * When we have to destroy all contexts in cxl_context_detach_all() we
......
...@@ -537,6 +537,14 @@ struct cxl_context { ...@@ -537,6 +537,14 @@ struct cxl_context {
atomic_t afu_driver_events; atomic_t afu_driver_events;
struct rcu_head rcu; struct rcu_head rcu;
/*
* Only used when more interrupts are allocated via
* pci_enable_msix_range than are supported in the default context, to
* use additional contexts to overcome the limitation. i.e. Mellanox
* CX4 only:
*/
struct list_head extra_irq_contexts;
}; };
struct cxl_service_layer_ops { struct cxl_service_layer_ops {
...@@ -722,11 +730,13 @@ ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, ...@@ -722,11 +730,13 @@ ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
/* Internal functions wrapped in cxl_base to allow PHB to call them */ /* Internal functions wrapped in cxl_base to allow PHB to call them */
bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
void _cxl_pci_disable_device(struct pci_dev *dev); void _cxl_pci_disable_device(struct pci_dev *dev);
int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
struct cxl_calls { struct cxl_calls {
void (*cxl_slbia)(struct mm_struct *mm); void (*cxl_slbia)(struct mm_struct *mm);
bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu); bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu);
void (*cxl_pci_disable_device)(struct pci_dev *dev); void (*cxl_pci_disable_device)(struct pci_dev *dev);
int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
struct module *owner; struct module *owner;
}; };
......
...@@ -112,6 +112,7 @@ static struct cxl_calls cxl_calls = { ...@@ -112,6 +112,7 @@ static struct cxl_calls cxl_calls = {
.cxl_slbia = cxl_slbia_core, .cxl_slbia = cxl_slbia_core,
.cxl_pci_associate_default_context = _cxl_pci_associate_default_context, .cxl_pci_associate_default_context = _cxl_pci_associate_default_context,
.cxl_pci_disable_device = _cxl_pci_disable_device, .cxl_pci_disable_device = _cxl_pci_disable_device,
.cxl_next_msi_hwirq = _cxl_next_msi_hwirq,
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
......
...@@ -177,6 +177,15 @@ int cxl_process_element(struct cxl_context *ctx); ...@@ -177,6 +177,15 @@ int cxl_process_element(struct cxl_context *ctx);
int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs); int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs);
int cxl_get_max_irqs_per_process(struct pci_dev *dev); int cxl_get_max_irqs_per_process(struct pci_dev *dev);
/*
* Use to simultaneously iterate over hardware interrupt numbers, contexts and
* afu interrupt numbers allocated for the device via pci_enable_msix_range and
* is a useful convenience function when working with hardware that has
* limitations on the number of interrupts per process. *ctx and *afu_irq
* should be NULL and 0 to start the iteration.
*/
int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
/* /*
* These calls allow drivers to create their own file descriptors and make them * These calls allow drivers to create their own file descriptors and make them
* identical to the cxl file descriptor user API. An example use case: * identical to the cxl file descriptor user API. An example use case:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment