Commit bf9bf704 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Tony Nguyen

idpf: avoid bloating &idpf_q_vector with big %NR_CPUS

With CONFIG_MAXSMP, sizeof(cpumask_t) is 1 Kb. The queue vector
structure has them embedded, which means 1 additional Kb of not
really hotpath data.
We have cpumask_var_t, which is either an embedded cpumask or a pointer
for allocating it dynamically when it's big. Use it instead of plain
cpumasks and put &idpf_q_vector on a good diet.
Also remove redundant pointer to the interrupt name from the structure.
request_irq() saves it and free_irq() returns it on deinit, so that you
can free the memory.
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent e4891e46
...@@ -69,7 +69,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) ...@@ -69,7 +69,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{ {
clear_bit(IDPF_MB_INTR_MODE, adapter->flags); clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
free_irq(adapter->msix_entries[0].vector, adapter); kfree(free_irq(adapter->msix_entries[0].vector, adapter));
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
} }
...@@ -124,15 +124,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter) ...@@ -124,15 +124,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
*/ */
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{ {
struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err; int irq_num, mb_vidx = 0, err;
char *name;
irq_num = adapter->msix_entries[mb_vidx].vector; irq_num = adapter->msix_entries[mb_vidx].vector;
mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", name = kasprintf(GFP_KERNEL, "%s-%s-%d",
dev_driver_string(&adapter->pdev->dev), dev_driver_string(&adapter->pdev->dev),
"Mailbox", mb_vidx); "Mailbox", mb_vidx);
err = request_irq(irq_num, adapter->irq_mb_handler, 0, err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
mb_vector->name, adapter);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err); "IRQ request for mailbox failed, error: %d\n", err);
......
...@@ -3613,6 +3613,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) ...@@ -3613,6 +3613,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL; q_vector->tx = NULL;
kfree(q_vector->rx); kfree(q_vector->rx);
q_vector->rx = NULL; q_vector->rx = NULL;
free_cpumask_var(q_vector->affinity_mask);
} }
/* Clean up the mapping of queues to vectors */ /* Clean up the mapping of queues to vectors */
...@@ -3661,7 +3663,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) ...@@ -3661,7 +3663,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
/* clear the affinity_mask in the IRQ descriptor */ /* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL);
free_irq(irq_num, q_vector); kfree(free_irq(irq_num, q_vector));
} }
} }
...@@ -3812,6 +3814,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) ...@@ -3812,6 +3814,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
for (vector = 0; vector < vport->num_q_vectors; vector++) { for (vector = 0; vector < vport->num_q_vectors; vector++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
char *name;
vidx = vport->q_vector_idxs[vector]; vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector; irq_num = adapter->msix_entries[vidx].vector;
...@@ -3825,18 +3828,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) ...@@ -3825,18 +3828,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
else else
continue; continue;
q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", name = kasprintf(GFP_KERNEL, "%s-%s-%d", basename, vec_name,
basename, vec_name, vidx); vidx);
err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
q_vector->name, q_vector); name, q_vector);
if (err) { if (err) {
netdev_err(vport->netdev, netdev_err(vport->netdev,
"Request_irq failed, error: %d\n", err); "Request_irq failed, error: %d\n", err);
goto free_q_irqs; goto free_q_irqs;
} }
/* assign the mask for this irq */ /* assign the mask for this irq */
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
} }
return 0; return 0;
...@@ -3845,7 +3848,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) ...@@ -3845,7 +3848,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
while (--vector >= 0) { while (--vector >= 0) {
vidx = vport->q_vector_idxs[vector]; vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector; irq_num = adapter->msix_entries[vidx].vector;
free_irq(irq_num, &vport->q_vectors[vector]); kfree(free_irq(irq_num, &vport->q_vectors[vector]));
} }
return err; return err;
...@@ -4255,7 +4258,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) ...@@ -4255,7 +4258,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
/* only set affinity_mask if the CPU is online */ /* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx)) if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask); cpumask_set_cpu(v_idx, q_vector->affinity_mask);
} }
} }
...@@ -4299,6 +4302,9 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) ...@@ -4299,6 +4302,9 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
goto error;
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL); GFP_KERNEL);
if (!q_vector->tx) if (!q_vector->tx)
......
...@@ -505,7 +505,6 @@ struct idpf_intr_reg { ...@@ -505,7 +505,6 @@ struct idpf_intr_reg {
/** /**
* struct idpf_q_vector * struct idpf_q_vector
* @vport: Vport back pointer * @vport: Vport back pointer
* @affinity_mask: CPU affinity mask
* @napi: napi handler * @napi: napi handler
* @v_idx: Vector index * @v_idx: Vector index
* @intr_reg: See struct idpf_intr_reg * @intr_reg: See struct idpf_intr_reg
...@@ -526,11 +525,10 @@ struct idpf_intr_reg { ...@@ -526,11 +525,10 @@ struct idpf_intr_reg {
* @num_bufq: Number of buffer queues * @num_bufq: Number of buffer queues
* @bufq: Array of buffer queues to service * @bufq: Array of buffer queues to service
* @total_events: Number of interrupts processed * @total_events: Number of interrupts processed
* @name: Queue vector name * @affinity_mask: CPU affinity mask
*/ */
struct idpf_q_vector { struct idpf_q_vector {
struct idpf_vport *vport; struct idpf_vport *vport;
cpumask_t affinity_mask;
struct napi_struct napi; struct napi_struct napi;
u16 v_idx; u16 v_idx;
struct idpf_intr_reg intr_reg; struct idpf_intr_reg intr_reg;
...@@ -556,7 +554,8 @@ struct idpf_q_vector { ...@@ -556,7 +554,8 @@ struct idpf_q_vector {
struct idpf_buf_queue **bufq; struct idpf_buf_queue **bufq;
u16 total_events; u16 total_events;
char *name;
cpumask_var_t affinity_mask;
}; };
struct idpf_rx_queue_stats { struct idpf_rx_queue_stats {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment