Commit 44fbc1b6 authored by Nick Child's avatar Nick Child Committed by David S. Miller

ibmvnic: Assign IRQ affinity hints to device queues

Assign affinity hints to ibmvnic device queue interrupts.
Affinity hints are assigned and removed during sub-crq init and
teardown, respectively. This update should improve latency if
utilized as interrupt lines and processing are more equally
distributed among CPU's. This implementation is based on the
virtio_net driver.
Signed-off-by: default avatarThomas Falcon <tlfalcon@linux.ibm.com>
Signed-off-by: default avatarDany Madden <drt@linux.ibm.com>
Signed-off-by: default avatarNick Child <nnac123@linux.ibm.com>
Reviewed-by: default avatarRick Lindsley <ricklind@linux.ibm.com>
Reviewed-by: default avatarHaren Myneni <haren@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e384cf35
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/cpu.h>
#include "ibmvnic.h" #include "ibmvnic.h"
...@@ -171,6 +172,132 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter) ...@@ -171,6 +172,132 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
return ibmvnic_send_crq(adapter, &crq); return ibmvnic_send_crq(adapter, &crq);
} }
static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *queue)
{
if (!(queue && queue->irq))
return;
cpumask_clear(queue->affinity_mask);
if (irq_set_affinity_and_hint(queue->irq, NULL))
netdev_warn(adapter->netdev,
"%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
__func__, queue, queue->irq);
}
static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_sub_crq_queue **rxqs;
struct ibmvnic_sub_crq_queue **txqs;
int num_rxqs, num_txqs;
int rc, i;
rc = 0;
rxqs = adapter->rx_scrq;
txqs = adapter->tx_scrq;
num_txqs = adapter->num_active_tx_scrqs;
num_rxqs = adapter->num_active_rx_scrqs;
netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
if (txqs) {
for (i = 0; i < num_txqs; i++)
ibmvnic_clean_queue_affinity(adapter, txqs[i]);
}
if (rxqs) {
for (i = 0; i < num_rxqs; i++)
ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
}
}
static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
unsigned int *cpu, int *stragglers,
int stride)
{
cpumask_var_t mask;
int i;
int rc = 0;
if (!(queue && queue->irq))
return rc;
/* cpumask_var_t is either a pointer or array, allocation works here */
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
/* while we have extra cpu give one extra to this irq */
if (*stragglers) {
stride++;
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
for (i = 0; i < stride; i++) {
cpumask_set_cpu(*cpu, mask);
*cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
nr_cpu_ids, false);
}
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
free_cpumask_var(mask);
return rc;
}
/* assumes cpu read lock is held */
static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
struct ibmvnic_sub_crq_queue *queue;
int num_rxqs = adapter->num_active_rx_scrqs;
int num_txqs = adapter->num_active_tx_scrqs;
int total_queues, stride, stragglers, i;
unsigned int num_cpu, cpu;
int rc = 0;
netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
if (!(adapter->rx_scrq && adapter->tx_scrq)) {
netdev_warn(adapter->netdev,
"%s: Set affinity failed, queues not allocated\n",
__func__);
return;
}
total_queues = num_rxqs + num_txqs;
num_cpu = num_online_cpus();
/* number of cpu's assigned per irq */
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
/* next available cpu to assign irq to */
cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < num_txqs; i++) {
queue = txqs[i];
rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
stride);
if (rc)
goto out;
}
for (i = 0; i < num_rxqs; i++) {
queue = rxqs[i];
rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
stride);
if (rc)
goto out;
}
out:
if (rc) {
netdev_warn(adapter->netdev,
"%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
__func__, queue, queue->irq, rc);
ibmvnic_clean_affinity(adapter);
}
}
static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
unsigned long length, unsigned long *number, unsigned long length, unsigned long *number,
unsigned long *irq) unsigned long *irq)
...@@ -3626,6 +3753,8 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) ...@@ -3626,6 +3753,8 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq) if (!adapter->tx_scrq || !adapter->rx_scrq)
return -EINVAL; return -EINVAL;
ibmvnic_clean_affinity(adapter);
for (i = 0; i < adapter->req_tx_queues; i++) { for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
...@@ -3675,6 +3804,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, ...@@ -3675,6 +3804,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2); free_pages((unsigned long)scrq->msgs, 2);
free_cpumask_var(scrq->affinity_mask);
kfree(scrq); kfree(scrq);
} }
...@@ -3695,6 +3825,8 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter ...@@ -3695,6 +3825,8 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
dev_warn(dev, "Couldn't allocate crq queue messages page\n"); dev_warn(dev, "Couldn't allocate crq queue messages page\n");
goto zero_page_failed; goto zero_page_failed;
} }
if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
goto cpumask_alloc_failed;
scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
...@@ -3747,6 +3879,8 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter ...@@ -3747,6 +3879,8 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
map_failed: map_failed:
free_cpumask_var(scrq->affinity_mask);
cpumask_alloc_failed:
free_pages((unsigned long)scrq->msgs, 2); free_pages((unsigned long)scrq->msgs, 2);
zero_page_failed: zero_page_failed:
kfree(scrq); kfree(scrq);
...@@ -3758,6 +3892,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) ...@@ -3758,6 +3892,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
{ {
int i; int i;
ibmvnic_clean_affinity(adapter);
if (adapter->tx_scrq) { if (adapter->tx_scrq) {
for (i = 0; i < adapter->num_active_tx_scrqs; i++) { for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
if (!adapter->tx_scrq[i]) if (!adapter->tx_scrq[i])
...@@ -4035,6 +4170,11 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) ...@@ -4035,6 +4170,11 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
goto req_rx_irq_failed; goto req_rx_irq_failed;
} }
} }
cpus_read_lock();
ibmvnic_set_affinity(adapter);
cpus_read_unlock();
return rc; return rc;
req_rx_irq_failed: req_rx_irq_failed:
......
...@@ -825,6 +825,7 @@ struct ibmvnic_sub_crq_queue { ...@@ -825,6 +825,7 @@ struct ibmvnic_sub_crq_queue {
atomic_t used; atomic_t used;
char name[32]; char name[32];
u64 handle; u64 handle;
cpumask_var_t affinity_mask;
} ____cacheline_aligned; } ____cacheline_aligned;
struct ibmvnic_long_term_buff { struct ibmvnic_long_term_buff {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment