Commit af061a64 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Roland Dreier

IB/qib: Use RCU for qpn lookup

The heavy weight spinlock in qib_lookup_qpn() is replaced with RCU.
The hash list itself is now accessed via jhash functions instead of mod.

The changes should benefit multiple receive contexts in different
processors by not contending for the lock just to read the hash
structures.

The patch also adds a lookaside_qp (pointer) and a lookaside_qpn in
the context.  The interrupt handler will test the current packet's qpn
against lookaside_qpn if the lookaside_qp pointer is non-NULL.  The
pointer is NULL'ed when the interrupt handler exits.
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 9e1c0e43
...@@ -223,6 +223,9 @@ struct qib_ctxtdata { ...@@ -223,6 +223,9 @@ struct qib_ctxtdata {
/* ctxt rcvhdrq head offset */ /* ctxt rcvhdrq head offset */
u32 head; u32 head;
u32 pkt_count; u32 pkt_count;
/* lookaside fields */
struct qib_qp *lookaside_qp;
u32 lookaside_qpn;
/* QPs waiting for context processing */ /* QPs waiting for context processing */
struct list_head qp_wait_list; struct list_head qp_wait_list;
}; };
......
...@@ -547,6 +547,15 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) ...@@ -547,6 +547,15 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
updegr = 0; updegr = 0;
} }
} }
/*
* Notify qib_destroy_qp() if it is waiting
* for lookaside_qp to finish.
*/
if (rcd->lookaside_qp) {
if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
wake_up(&rcd->lookaside_qp->wait);
rcd->lookaside_qp = NULL;
}
rcd->head = l; rcd->head = l;
rcd->pkt_count += i; rcd->pkt_count += i;
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/jhash.h>
#include "qib.h" #include "qib.h"
...@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) ...@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
} }
static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
{
return jhash_1word(qpn, dev->qp_rnd) &
(dev->qp_table_size - 1);
}
/* /*
* Put the QP into the hash table. * Put the QP into the hash table.
* The hash table holds a reference to the QP. * The hash table holds a reference to the QP.
...@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) ...@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{ {
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
unsigned long flags; unsigned long flags;
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
spin_lock_irqsave(&dev->qpt_lock, flags); spin_lock_irqsave(&dev->qpt_lock, flags);
atomic_inc(&qp->refcount);
if (qp->ibqp.qp_num == 0) if (qp->ibqp.qp_num == 0)
ibp->qp0 = qp; rcu_assign_pointer(ibp->qp0, qp);
else if (qp->ibqp.qp_num == 1) else if (qp->ibqp.qp_num == 1)
ibp->qp1 = qp; rcu_assign_pointer(ibp->qp1, qp);
else { else {
qp->next = dev->qp_table[n]; qp->next = dev->qp_table[n];
dev->qp_table[n] = qp; rcu_assign_pointer(dev->qp_table[n], qp);
} }
atomic_inc(&qp->refcount);
spin_unlock_irqrestore(&dev->qpt_lock, flags); spin_unlock_irqrestore(&dev->qpt_lock, flags);
synchronize_rcu();
} }
/* /*
...@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) ...@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{ {
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_qp *q, **qpp; unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
unsigned long flags; unsigned long flags;
qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
spin_lock_irqsave(&dev->qpt_lock, flags); spin_lock_irqsave(&dev->qpt_lock, flags);
if (ibp->qp0 == qp) { if (ibp->qp0 == qp) {
ibp->qp0 = NULL;
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
rcu_assign_pointer(ibp->qp0, NULL);
} else if (ibp->qp1 == qp) { } else if (ibp->qp1 == qp) {
ibp->qp1 = NULL;
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
} else rcu_assign_pointer(ibp->qp1, NULL);
} else {
struct qib_qp *q, **qpp;
qpp = &dev->qp_table[n];
for (; (q = *qpp) != NULL; qpp = &q->next) for (; (q = *qpp) != NULL; qpp = &q->next)
if (q == qp) { if (q == qp) {
*qpp = qp->next;
qp->next = NULL;
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
rcu_assign_pointer(*qpp, qp->next);
qp->next = NULL;
break; break;
} }
}
spin_unlock_irqrestore(&dev->qpt_lock, flags); spin_unlock_irqrestore(&dev->qpt_lock, flags);
synchronize_rcu();
} }
/** /**
...@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) ...@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
if (!qib_mcast_tree_empty(ibp)) if (!qib_mcast_tree_empty(ibp))
qp_inuse++; qp_inuse++;
if (ibp->qp0) rcu_read_lock();
if (rcu_dereference(ibp->qp0))
qp_inuse++; qp_inuse++;
if (ibp->qp1) if (rcu_dereference(ibp->qp1))
qp_inuse++; qp_inuse++;
rcu_read_unlock();
} }
spin_lock_irqsave(&dev->qpt_lock, flags); spin_lock_irqsave(&dev->qpt_lock, flags);
for (n = 0; n < dev->qp_table_size; n++) { for (n = 0; n < dev->qp_table_size; n++) {
qp = dev->qp_table[n]; qp = dev->qp_table[n];
dev->qp_table[n] = NULL; rcu_assign_pointer(dev->qp_table[n], NULL);
for (; qp; qp = qp->next) for (; qp; qp = qp->next)
qp_inuse++; qp_inuse++;
} }
spin_unlock_irqrestore(&dev->qpt_lock, flags); spin_unlock_irqrestore(&dev->qpt_lock, flags);
synchronize_rcu();
return qp_inuse; return qp_inuse;
} }
...@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) ...@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
*/ */
struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
{ {
struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; struct qib_qp *qp = NULL;
unsigned long flags;
struct qib_qp *qp;
spin_lock_irqsave(&dev->qpt_lock, flags);
if (unlikely(qpn <= 1)) {
rcu_read_lock();
if (qpn == 0) if (qpn == 0)
qp = ibp->qp0; qp = rcu_dereference(ibp->qp0);
else if (qpn == 1)
qp = ibp->qp1;
else else
for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp; qp = rcu_dereference(ibp->qp1);
qp = qp->next) } else {
struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
unsigned n = qpn_hash(dev, qpn);
rcu_read_lock();
for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next)
if (qp->ibqp.qp_num == qpn) if (qp->ibqp.qp_num == qpn)
break; break;
}
if (qp) if (qp)
atomic_inc(&qp->refcount); if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
qp = NULL;
spin_unlock_irqrestore(&dev->qpt_lock, flags); rcu_read_unlock();
return qp; return qp;
} }
...@@ -1015,6 +1033,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, ...@@ -1015,6 +1033,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail_swq; goto bail_swq;
} }
RCU_INIT_POINTER(qp->next, NULL);
if (init_attr->srq) if (init_attr->srq)
sz = 0; sz = 0;
else { else {
......
...@@ -38,11 +38,12 @@ ...@@ -38,11 +38,12 @@
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/random.h>
#include "qib.h" #include "qib.h"
#include "qib_common.h" #include "qib_common.h"
static unsigned int ib_qib_qp_table_size = 251; static unsigned int ib_qib_qp_table_size = 256;
module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size"); MODULE_PARM_DESC(qp_table_size, "QP table size");
...@@ -659,17 +660,25 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) ...@@ -659,17 +660,25 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
if (atomic_dec_return(&mcast->refcount) <= 1) if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait); wake_up(&mcast->wait);
} else { } else {
if (rcd->lookaside_qp) {
if (rcd->lookaside_qpn != qp_num) {
if (atomic_dec_and_test(
&rcd->lookaside_qp->refcount))
wake_up(
&rcd->lookaside_qp->wait);
rcd->lookaside_qp = NULL;
}
}
if (!rcd->lookaside_qp) {
qp = qib_lookup_qpn(ibp, qp_num); qp = qib_lookup_qpn(ibp, qp_num);
if (!qp) if (!qp)
goto drop; goto drop;
rcd->lookaside_qp = qp;
rcd->lookaside_qpn = qp_num;
} else
qp = rcd->lookaside_qp;
ibp->n_unicast_rcv++; ibp->n_unicast_rcv++;
qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
/*
* Notify qib_destroy_qp() if it is waiting
* for us to finish.
*/
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
} }
return; return;
...@@ -1974,6 +1983,8 @@ static void init_ibport(struct qib_pportdata *ppd) ...@@ -1974,6 +1983,8 @@ static void init_ibport(struct qib_pportdata *ppd)
ibp->z_excessive_buffer_overrun_errors = ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors; cntrs.excessive_buffer_overrun_errors;
ibp->z_vl15_dropped = cntrs.vl15_dropped; ibp->z_vl15_dropped = cntrs.vl15_dropped;
RCU_INIT_POINTER(ibp->qp0, NULL);
RCU_INIT_POINTER(ibp->qp1, NULL);
} }
/** /**
...@@ -1990,12 +2001,15 @@ int qib_register_ib_device(struct qib_devdata *dd) ...@@ -1990,12 +2001,15 @@ int qib_register_ib_device(struct qib_devdata *dd)
int ret; int ret;
dev->qp_table_size = ib_qib_qp_table_size; dev->qp_table_size = ib_qib_qp_table_size;
dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
GFP_KERNEL); GFP_KERNEL);
if (!dev->qp_table) { if (!dev->qp_table) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_qpt; goto err_qpt;
} }
for (i = 0; i < dev->qp_table_size; i++)
RCU_INIT_POINTER(dev->qp_table[i], NULL);
for (i = 0; i < dd->num_pports; i++) for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i); init_ibport(ppd + i);
......
...@@ -724,7 +724,8 @@ struct qib_ibdev { ...@@ -724,7 +724,8 @@ struct qib_ibdev {
dma_addr_t pio_hdrs_phys; dma_addr_t pio_hdrs_phys;
/* list of QPs waiting for RNR timer */ /* list of QPs waiting for RNR timer */
spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
unsigned qp_table_size; /* size of the hash table */ u32 qp_table_size; /* size of the hash table */
u32 qp_rnd; /* random bytes for hash */
spinlock_t qpt_lock; spinlock_t qpt_lock;
u32 n_piowait; u32 n_piowait;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment