Commit d458d4b4 authored by Brett Creeley's avatar Brett Creeley Committed by Jakub Kicinski

ionic: Keep interrupt affinity up to date

Currently the driver either sets the initial interrupt affinity for its
adminq and tx/rx queues on probe or resets it on various
down/up/reconfigure flows. If any user and/or user process
(i.e. irqbalance) changes IRQ affinity for any of the driver's interrupts
that will be reset to driver defaults whenever any down/up/reconfigure
operation happens. This is incorrect and is fixed by making 2 changes:

1. Allocate an array of cpumasks that's only allocated on probe and
   destroyed on remove.
2. Update the cpumask(s) for interrupts that are in use by registering
   for affinity notifiers.
Signed-off-by: default avatarBrett Creeley <brett.creeley@amd.com>
Signed-off-by: default avatarShannon Nelson <shannon.nelson@amd.com>
Link: https://lore.kernel.org/r/20240619003257.6138-3-shannon.nelson@amd.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 4aaa49a2
......@@ -54,6 +54,7 @@ struct ionic {
unsigned int nrxqs_per_lif;
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
cpumask_var_t *affinity_masks;
struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
......
......@@ -280,9 +280,9 @@ struct ionic_intr_info {
u64 rearm_count;
unsigned int index;
unsigned int vector;
unsigned int cpu;
u32 dim_coal_hw;
cpumask_t affinity_mask;
cpumask_var_t *affinity_mask;
struct irq_affinity_notify aff_notify;
};
struct ionic_cq {
......
......@@ -265,6 +265,18 @@ static void ionic_intr_free(struct ionic *ionic, int index)
clear_bit(index, ionic->intrs);
}
static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify);
cpumask_copy(*intr->affinity_mask, mask);
}
static void ionic_irq_aff_release(struct kref __always_unused *ref)
{
}
static int ionic_qcq_enable(struct ionic_qcq *qcq)
{
struct ionic_queue *q = &qcq->q;
......@@ -299,8 +311,10 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (qcq->flags & IONIC_QCQ_F_INTR) {
napi_enable(&qcq->napi);
irq_set_affinity_notifier(qcq->intr.vector,
&qcq->intr.aff_notify);
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
*qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
......@@ -334,6 +348,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
irq_set_affinity_notifier(qcq->intr.vector, NULL);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
}
......@@ -474,6 +489,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
cpumask_var_t *affinity_mask;
int err;
if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
......@@ -505,10 +521,19 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
}
/* try to get the irq on the local numa node first */
qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
dev_to_node(lif->ionic->dev));
if (qcq->intr.cpu != -1)
cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index];
if (cpumask_empty(*affinity_mask)) {
unsigned int cpu;
cpu = cpumask_local_spread(qcq->intr.index,
dev_to_node(lif->ionic->dev));
if (cpu != -1)
cpumask_set_cpu(cpu, *affinity_mask);
}
qcq->intr.affinity_mask = affinity_mask;
qcq->intr.aff_notify.notify = ionic_irq_aff_notify;
qcq->intr.aff_notify.release = ionic_irq_aff_release;
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
return 0;
......@@ -3120,6 +3145,44 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
return err;
}
static int ionic_affinity_masks_alloc(struct ionic *ionic)
{
cpumask_var_t *affinity_masks;
int nintrs = ionic->nintrs;
int i;
affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL);
if (!affinity_masks)
return -ENOMEM;
for (i = 0; i < nintrs; i++) {
if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL,
dev_to_node(ionic->dev)))
goto err_out;
}
ionic->affinity_masks = affinity_masks;
return 0;
err_out:
for (--i; i >= 0; i--)
free_cpumask_var(affinity_masks[i]);
kfree(affinity_masks);
return -ENOMEM;
}
static void ionic_affinity_masks_free(struct ionic *ionic)
{
int i;
for (i = 0; i < ionic->nintrs; i++)
free_cpumask_var(ionic->affinity_masks[i]);
kfree(ionic->affinity_masks);
ionic->affinity_masks = NULL;
}
int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
......@@ -3211,11 +3274,15 @@ int ionic_lif_alloc(struct ionic *ionic)
ionic_debugfs_add_lif(lif);
err = ionic_affinity_masks_alloc(ionic);
if (err)
goto err_out_free_lif_info;
/* allocate control queues and txrx queue arrays */
ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out_free_lif_info;
goto err_out_free_affinity_masks;
/* allocate rss indirection table */
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
......@@ -3237,6 +3304,8 @@ int ionic_lif_alloc(struct ionic *ionic)
err_out_free_qcqs:
ionic_qcqs_free(lif);
err_out_free_affinity_masks:
ionic_affinity_masks_free(lif->ionic);
err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
......@@ -3410,6 +3479,8 @@ void ionic_lif_free(struct ionic_lif *lif)
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_lif_reset(lif);
ionic_affinity_masks_free(lif->ionic);
/* free lif info */
kfree(lif->identity);
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
......@@ -3487,7 +3558,7 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
*qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment