Commit 5136bfea authored by Kamenee Arumugam's avatar Kamenee Arumugam Committed by Jason Gunthorpe

IB/{hfi1, qib, rdmavt}: Put qp in error state when cq is full

When a completion queue is full, the associated queue pairs are not put
into the error state. According to the IBTA specification, this is a
violation.

Quote from IBTA spec:
C9-218: A Requester Class F error occurs when the CQ is inaccessible or
full and an attempt is made to complete a WQE.  The Affected QP shall be
moved to the error state and affiliated asynchronous errors generated as
described in 11.6.3.1 Affiliated Asynchronous Events on page 678. The
current WQE and any subsequent WQEs are left in an unknown state.

C11-37: The CI shall generate a CQ Error when a CQ overrun is
detected. This condition will result in an Affiliated Asynchronous Error
for any associated Work Queues when they attempt to use that
CQ. Completions can no longer be added to the CQ. It is not guaranteed
that completions present in the CQ at the time the error occurred can be
retrieved. Possible causes include a CQ overrun or a CQ protection error.

Put the qp in error state when cq is full. Implement a state called full
to continue to put other associated QPs in error state.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarKamenee Arumugam <kamenee.arumugam@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f592ae3c
...@@ -3008,8 +3008,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -3008,8 +3008,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
wc.dlid_path_bits = 0; wc.dlid_path_bits = 0;
wc.port_num = 0; wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
ib_bth_is_solicited(ohdr));
break; break;
case OP(RDMA_WRITE_ONLY): case OP(RDMA_WRITE_ONLY):
......
...@@ -476,8 +476,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -476,8 +476,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
wc.dlid_path_bits = 0; wc.dlid_path_bits = 0;
wc.port_num = 0; wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
ib_bth_is_solicited(ohdr));
break; break;
case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_FIRST):
......
...@@ -255,8 +255,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -255,8 +255,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1); wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num; wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
swqe->wr.send_flags & IB_SEND_SOLICITED);
ibp->rvp.n_loop_pkts++; ibp->rvp.n_loop_pkts++;
bail_unlock: bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags); spin_unlock_irqrestore(&qp->r_lock, flags);
...@@ -1061,7 +1060,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) ...@@ -1061,7 +1060,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num; wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, solicited); rvt_recv_cq(qp, &wc, solicited);
return; return;
drop: drop:
......
...@@ -1891,8 +1891,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, ...@@ -1891,8 +1891,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
wc.dlid_path_bits = 0; wc.dlid_path_bits = 0;
wc.port_num = 0; wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
ib_bth_is_solicited(ohdr));
break; break;
case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_FIRST):
......
...@@ -400,8 +400,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, ...@@ -400,8 +400,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
wc.dlid_path_bits = 0; wc.dlid_path_bits = 0;
wc.port_num = 0; wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
ib_bth_is_solicited(ohdr));
break; break;
case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_FIRST):
......
...@@ -210,8 +210,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -210,8 +210,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1); wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num; wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
swqe->wr.send_flags & IB_SEND_SOLICITED);
ibp->rvp.n_loop_pkts++; ibp->rvp.n_loop_pkts++;
bail_unlock: bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags); spin_unlock_irqrestore(&qp->r_lock, flags);
...@@ -573,8 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, ...@@ -573,8 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num; wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
ib_bth_is_solicited(ohdr));
return; return;
drop: drop:
......
...@@ -60,8 +60,11 @@ static struct workqueue_struct *comp_vector_wq; ...@@ -60,8 +60,11 @@ static struct workqueue_struct *comp_vector_wq;
* @solicited: true if @entry is solicited * @solicited: true if @entry is solicited
* *
* This may be called with qp->s_lock held. * This may be called with qp->s_lock held.
*
* Return: return true on success, else return
* false if cq is full.
*/ */
void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
{ {
struct ib_uverbs_wc *uqueue = NULL; struct ib_uverbs_wc *uqueue = NULL;
struct ib_wc *kqueue = NULL; struct ib_wc *kqueue = NULL;
...@@ -97,7 +100,12 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) ...@@ -97,7 +100,12 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
next = head + 1; next = head + 1;
} }
if (unlikely(next == tail)) { if (unlikely(next == tail || cq->cq_full)) {
struct rvt_dev_info *rdi = cq->rdi;
if (!cq->cq_full)
rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
cq->cq_full = true;
spin_unlock_irqrestore(&cq->lock, flags); spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) { if (cq->ibcq.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -107,7 +115,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) ...@@ -107,7 +115,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
ev.event = IB_EVENT_CQ_ERR; ev.event = IB_EVENT_CQ_ERR;
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
} }
return; return false;
} }
trace_rvt_cq_enter(cq, entry, head); trace_rvt_cq_enter(cq, entry, head);
if (uqueue) { if (uqueue) {
...@@ -146,6 +154,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) ...@@ -146,6 +154,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
} }
spin_unlock_irqrestore(&cq->lock, flags); spin_unlock_irqrestore(&cq->lock, flags);
return true;
} }
EXPORT_SYMBOL(rvt_cq_enter); EXPORT_SYMBOL(rvt_cq_enter);
......
...@@ -3103,8 +3103,7 @@ void rvt_ruc_loopback(struct rvt_qp *sqp) ...@@ -3103,8 +3103,7 @@ void rvt_ruc_loopback(struct rvt_qp *sqp)
wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
wc.port_num = 1; wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */ /* Signal completion event if the solicited bit is set. */
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp: send_comp:
spin_unlock_irqrestore(&qp->r_lock, flags); spin_unlock_irqrestore(&qp->r_lock, flags);
......
...@@ -78,6 +78,12 @@ ...@@ -78,6 +78,12 @@
fmt, \ fmt, \
##__VA_ARGS__) ##__VA_ARGS__)
#define rvt_pr_err_ratelimited(rdi, fmt, ...) \
__rvt_pr_err_ratelimited((rdi)->driver_f.get_pci_dev(rdi), \
rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define __rvt_pr_info(pdev, name, fmt, ...) \ #define __rvt_pr_info(pdev, name, fmt, ...) \
dev_info(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__) dev_info(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
...@@ -87,6 +93,9 @@ ...@@ -87,6 +93,9 @@
#define __rvt_pr_err(pdev, name, fmt, ...) \ #define __rvt_pr_err(pdev, name, fmt, ...) \
dev_err(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__) dev_err(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
#define __rvt_pr_err_ratelimited(pdev, name, fmt, ...) \
dev_err_ratelimited(&(pdev)->dev, "%s: " fmt, name, ##__VA_ARGS__)
static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num) static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num)
{ {
struct rvt_dev_info *rdi = ib_to_rvt(ibdev); struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
......
...@@ -93,6 +93,7 @@ struct rvt_cq { ...@@ -93,6 +93,7 @@ struct rvt_cq {
spinlock_t lock; /* protect changes in this struct */ spinlock_t lock; /* protect changes in this struct */
u8 notify; u8 notify;
u8 triggered; u8 triggered;
u8 cq_full;
int comp_vector_cpu; int comp_vector_cpu;
struct rvt_dev_info *rdi; struct rvt_dev_info *rdi;
struct rvt_cq_wc *queue; struct rvt_cq_wc *queue;
...@@ -105,6 +106,6 @@ static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq) ...@@ -105,6 +106,6 @@ static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
return container_of(ibcq, struct rvt_cq, ibcq); return container_of(ibcq, struct rvt_cq, ibcq);
} }
void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited); bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
#endif /* DEF_RDMAVT_INCCQH */ #endif /* DEF_RDMAVT_INCCQH */
...@@ -718,6 +718,48 @@ rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val) ...@@ -718,6 +718,48 @@ rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
return val; return val;
} }
int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
/**
* rvt_recv_cq - add a new entry to completion queue
* by receive queue
* @qp: receive queue
* @wc: work completion entry to add
* @solicited: true if @entry is solicited
*
* This is wrapper function for rvt_enter_cq function call by
* receive queue. If rvt_cq_enter return false, it means cq is
* full and the qp is put into error state.
*/
static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
bool solicited)
{
struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
}
/**
* rvt_send_cq - add a new entry to completion queue
* by send queue
* @qp: send queue
* @wc: work completion entry to add
* @solicited: true if @entry is solicited
*
* This is wrapper function for rvt_enter_cq function call by
* send queue. If rvt_cq_enter return false, it means cq is
* full and the qp is put into error state.
*/
static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
bool solicited)
{
struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
}
/** /**
* rvt_qp_complete_swqe - insert send completion * rvt_qp_complete_swqe - insert send completion
* @qp - the qp * @qp - the qp
...@@ -768,9 +810,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp, ...@@ -768,9 +810,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
.qp = &qp->ibqp, .qp = &qp->ibqp,
.byte_len = byte_len, .byte_len = byte_len,
}; };
rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &w,
status != IB_WC_SUCCESS);
} }
return last; return last;
} }
...@@ -780,7 +820,6 @@ extern const int ib_rvt_state_ops[]; ...@@ -780,7 +820,6 @@ extern const int ib_rvt_state_ops[];
struct rvt_dev_info; struct rvt_dev_info;
int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only); int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
void rvt_comm_est(struct rvt_qp *qp); void rvt_comm_est(struct rvt_qp *qp);
int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
unsigned long rvt_rnr_tbl_to_usec(u32 index); unsigned long rvt_rnr_tbl_to_usec(u32 index);
enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment