Commit a2f3bde8 authored by Doug Ledford's avatar Doug Ledford

Merge branch 'tid-read' into hfi1-tid

This is the series for adding TID RDMA read. Kaike put in a lot of
effort into making this more consumable for review so special thanks to
him.

Allocating resources and tracing are separated out followed by patches
which build up the read request. Then we have the patches to receive
incoming TID RDMA read requests and handle integration with the RC
protocol.

See the cover letter of the original posting for more of a detailed
overview of TID.

https://www.spinics.net/lists/linux-rdma/msg66611.html

* tid-read:
  IB/hfi1: Add static trace for TID RDMA READ protocol
  IB/hfi1: Enable TID RDMA READ protocol
  IB/hfi1: Add interlock between a TID RDMA request and other requests
  IB/hfi1: Integrate TID RDMA READ protocol into RC protocol
  IB/hfi1: Increment the retry timeout value for TID RDMA READ request
  IB/hfi1: Add functions for restarting TID RDMA READ request
  IB/hfi1: Add TID RDMA handlers
  IB/hfi1: Add functions to receive TID RDMA READ response
  IB/hfi1: Add a function to build TID RDMA READ response
  IB/hfi1: Add functions to receive TID RDMA READ request
  IB/hfi1: Set PbcInsertHcrc for TID RDMA packets
  IB/hfi1: Add functions to build TID RDMA READ request
  IB/hfi1: Add static trace for flow and TID management functions
  IB/hfi1: Add the counter n_tidwait
  IB/hfi1: TID RDMA RcvArray programming and TID allocation
  IB/hfi1: TID RDMA flow allocation
  IB/hfi: Move RC functions into a header file
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents 2a642396 3ce5daa2
...@@ -4253,6 +4253,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { ...@@ -4253,6 +4253,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
access_sw_pio_drain), access_sw_pio_drain),
[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL, [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
access_sw_kmem_wait), access_sw_kmem_wait),
[C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
hfi1_access_sw_tid_wait),
[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
access_sw_send_schedule), access_sw_send_schedule),
[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn", [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
......
...@@ -927,6 +927,7 @@ enum { ...@@ -927,6 +927,7 @@ enum {
C_SW_PIO_WAIT, C_SW_PIO_WAIT,
C_SW_PIO_DRAIN, C_SW_PIO_DRAIN,
C_SW_KMEM_WAIT, C_SW_KMEM_WAIT,
C_SW_TID_WAIT,
C_SW_SEND_SCHED, C_SW_SEND_SCHED,
C_SDMA_DESC_FETCHED_CNT, C_SDMA_DESC_FETCHED_CNT,
C_SDMA_INT_CNT, C_SDMA_INT_CNT,
......
...@@ -340,6 +340,10 @@ struct diag_pkt { ...@@ -340,6 +340,10 @@ struct diag_pkt {
#define HFI1_PSM_IOC_BASE_SEQ 0x0 #define HFI1_PSM_IOC_BASE_SEQ 0x0
/* Number of BTH.PSN bits used for sequence number in expected rcvs */
#define HFI1_KDETH_BTH_SEQ_SHIFT 11
#define HFI1_KDETH_BTH_SEQ_MASK (BIT(HFI1_KDETH_BTH_SEQ_SHIFT) - 1)
static inline __u64 rhf_to_cpu(const __le32 *rbuf) static inline __u64 rhf_to_cpu(const __le32 *rbuf)
{ {
return __le64_to_cpu(*((__le64 *)rbuf)); return __le64_to_cpu(*((__le64 *)rbuf));
......
...@@ -1575,25 +1575,32 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) ...@@ -1575,25 +1575,32 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
return -EINVAL; return -EINVAL;
} }
void handle_eflags(struct hfi1_packet *packet) static void show_eflags_errs(struct hfi1_packet *packet)
{ {
struct hfi1_ctxtdata *rcd = packet->rcd; struct hfi1_ctxtdata *rcd = packet->rcd;
u32 rte = rhf_rcv_type_err(packet->rhf); u32 rte = rhf_rcv_type_err(packet->rhf);
dd_dev_err(rcd->dd,
"receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
rcd->ctxt, packet->rhf,
packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
packet->rhf & RHF_DC_ERR ? "dc " : "",
packet->rhf & RHF_TID_ERR ? "tid " : "",
packet->rhf & RHF_LEN_ERR ? "len " : "",
packet->rhf & RHF_ECC_ERR ? "ecc " : "",
packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
rte);
}
void handle_eflags(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
rcv_hdrerr(rcd, rcd->ppd, packet); rcv_hdrerr(rcd, rcd->ppd, packet);
if (rhf_err_flags(packet->rhf)) if (rhf_err_flags(packet->rhf))
dd_dev_err(rcd->dd, show_eflags_errs(packet);
"receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
rcd->ctxt, packet->rhf,
packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
packet->rhf & RHF_DC_ERR ? "dc " : "",
packet->rhf & RHF_TID_ERR ? "tid " : "",
packet->rhf & RHF_LEN_ERR ? "len " : "",
packet->rhf & RHF_ECC_ERR ? "ecc " : "",
packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
rte);
} }
/* /*
...@@ -1699,11 +1706,14 @@ static int kdeth_process_expected(struct hfi1_packet *packet) ...@@ -1699,11 +1706,14 @@ static int kdeth_process_expected(struct hfi1_packet *packet)
if (unlikely(hfi1_dbg_should_fault_rx(packet))) if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE; return RHF_RCV_CONTINUE;
if (unlikely(rhf_err_flags(packet->rhf))) if (unlikely(rhf_err_flags(packet->rhf))) {
handle_eflags(packet); struct hfi1_ctxtdata *rcd = packet->rcd;
dd_dev_err(packet->rcd->dd, if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
"Unhandled expected packet received. Dropping.\n"); return RHF_RCV_CONTINUE;
}
hfi1_kdeth_expected_rcv(packet);
return RHF_RCV_CONTINUE; return RHF_RCV_CONTINUE;
} }
...@@ -1712,11 +1722,17 @@ static int kdeth_process_eager(struct hfi1_packet *packet) ...@@ -1712,11 +1722,17 @@ static int kdeth_process_eager(struct hfi1_packet *packet)
hfi1_setup_9B_packet(packet); hfi1_setup_9B_packet(packet);
if (unlikely(hfi1_dbg_should_fault_rx(packet))) if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE; return RHF_RCV_CONTINUE;
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
dd_dev_err(packet->rcd->dd, trace_hfi1_rcvhdr(packet);
"Unhandled eager packet received. Dropping.\n"); if (unlikely(rhf_err_flags(packet->rhf))) {
struct hfi1_ctxtdata *rcd = packet->rcd;
show_eflags_errs(packet);
if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
return RHF_RCV_CONTINUE;
}
hfi1_kdeth_eager_rcv(packet);
return RHF_RCV_CONTINUE; return RHF_RCV_CONTINUE;
} }
......
...@@ -198,6 +198,14 @@ struct exp_tid_set { ...@@ -198,6 +198,14 @@ struct exp_tid_set {
}; };
typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
struct tid_queue {
struct list_head queue_head;
/* queue head for QP TID resource waiters */
u32 enqueue; /* count of tid enqueues */
u32 dequeue; /* count of tid dequeues */
};
struct hfi1_ctxtdata { struct hfi1_ctxtdata {
/* rcvhdrq base, needs mmap before useful */ /* rcvhdrq base, needs mmap before useful */
void *rcvhdrq; void *rcvhdrq;
...@@ -291,6 +299,12 @@ struct hfi1_ctxtdata { ...@@ -291,6 +299,12 @@ struct hfi1_ctxtdata {
/* PSM Specific fields */ /* PSM Specific fields */
/* lock protecting all Expected TID data */ /* lock protecting all Expected TID data */
struct mutex exp_mutex; struct mutex exp_mutex;
/* lock protecting all Expected TID data of kernel contexts */
spinlock_t exp_lock;
/* Queue for QP's waiting for HW TID flows */
struct tid_queue flow_queue;
/* Queue for QP's waiting for HW receive array entries */
struct tid_queue rarr_queue;
/* when waiting for rcv or pioavail */ /* when waiting for rcv or pioavail */
wait_queue_head_t wait; wait_queue_head_t wait;
/* uuid from PSM */ /* uuid from PSM */
...@@ -323,6 +337,9 @@ struct hfi1_ctxtdata { ...@@ -323,6 +337,9 @@ struct hfi1_ctxtdata {
*/ */
u8 subctxt_cnt; u8 subctxt_cnt;
/* Bit mask to track free TID RDMA HW flows */
unsigned long flow_mask;
struct tid_flow_state flows[RXE_NUM_TID_FLOWS];
}; };
/** /**
...@@ -2103,7 +2120,7 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, ...@@ -2103,7 +2120,7 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
#endif #endif
HFI1_PKT_USER_SC_INTEGRITY; HFI1_PKT_USER_SC_INTEGRITY;
else else if (ctxt_type != SC_KERNEL)
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
/* turn on send-side job key checks if !A0 */ /* turn on send-side job key checks if !A0 */
......
...@@ -370,6 +370,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, ...@@ -370,6 +370,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
mutex_init(&rcd->exp_mutex); mutex_init(&rcd->exp_mutex);
spin_lock_init(&rcd->exp_lock);
INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
...@@ -472,6 +475,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, ...@@ -472,6 +475,9 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
GFP_KERNEL, numa); GFP_KERNEL, numa);
if (!rcd->opstats) if (!rcd->opstats)
goto bail; goto bail;
/* Initialize TID flow generations for the context */
hfi1_kern_init_ctxt_generations(rcd);
} }
*context = rcd; *context = rcd;
...@@ -771,6 +777,8 @@ static void enable_chip(struct hfi1_devdata *dd) ...@@ -771,6 +777,8 @@ static void enable_chip(struct hfi1_devdata *dd)
rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
if (HFI1_CAP_IS_KSET(TID_RDMA))
rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
hfi1_rcvctrl(dd, rcvmask, rcd); hfi1_rcvctrl(dd, rcvmask, rcd);
sc_enable(rcd->sc); sc_enable(rcd->sc);
hfi1_rcd_put(rcd); hfi1_rcd_put(rcd);
...@@ -1589,7 +1597,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) ...@@ -1589,7 +1597,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
if (rcd) { if (rcd) {
hfi1_clear_tids(rcd); hfi1_free_ctxt_rcv_groups(rcd);
hfi1_free_ctxt(rcd); hfi1_free_ctxt(rcd);
} }
} }
......
...@@ -319,6 +319,7 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) ...@@ -319,6 +319,7 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
switch (qp->ibqp.qp_type) { switch (qp->ibqp.qp_type) {
case IB_QPT_RC: case IB_QPT_RC:
hfi1_setup_tid_rdma_wqe(qp, wqe);
case IB_QPT_UC: case IB_QPT_UC:
if (wqe->length > 0x80000000U) if (wqe->length > 0x80000000U)
return -EINVAL; return -EINVAL;
...@@ -738,6 +739,7 @@ void flush_qp_waiters(struct rvt_qp *qp) ...@@ -738,6 +739,7 @@ void flush_qp_waiters(struct rvt_qp *qp)
{ {
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
flush_iowait(qp); flush_iowait(qp);
hfi1_tid_rdma_flush_wait(qp);
} }
void stop_send_queue(struct rvt_qp *qp) void stop_send_queue(struct rvt_qp *qp)
...@@ -745,6 +747,8 @@ void stop_send_queue(struct rvt_qp *qp) ...@@ -745,6 +747,8 @@ void stop_send_queue(struct rvt_qp *qp)
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
iowait_cancel_work(&priv->s_iowait); iowait_cancel_work(&priv->s_iowait);
if (cancel_work_sync(&priv->tid_rdma.trigger_work))
rvt_put_qp(qp);
} }
void quiesce_qp(struct rvt_qp *qp) void quiesce_qp(struct rvt_qp *qp)
...@@ -758,6 +762,7 @@ void quiesce_qp(struct rvt_qp *qp) ...@@ -758,6 +762,7 @@ void quiesce_qp(struct rvt_qp *qp)
void notify_qp_reset(struct rvt_qp *qp) void notify_qp_reset(struct rvt_qp *qp)
{ {
hfi1_qp_kern_exp_rcv_clear_all(qp);
qp->r_adefered = 0; qp->r_adefered = 0;
clear_ahg(qp); clear_ahg(qp);
......
...@@ -63,11 +63,13 @@ extern const struct rvt_operation_params hfi1_post_parms[]; ...@@ -63,11 +63,13 @@ extern const struct rvt_operation_params hfi1_post_parms[];
* HFI1_S_AHG_VALID - ahg header valid on chip * HFI1_S_AHG_VALID - ahg header valid on chip
* HFI1_S_AHG_CLEAR - have send engine clear ahg state * HFI1_S_AHG_CLEAR - have send engine clear ahg state
* HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain * HFI1_S_WAIT_PIO_DRAIN - qp waiting for PIOs to drain
* HFI1_S_WAIT_TID_SPACE - a QP is waiting for TID resource
* HFI1_S_MIN_BIT_MASK - the lowest bit that can be used by hfi1 * HFI1_S_MIN_BIT_MASK - the lowest bit that can be used by hfi1
*/ */
#define HFI1_S_AHG_VALID 0x80000000 #define HFI1_S_AHG_VALID 0x80000000
#define HFI1_S_AHG_CLEAR 0x40000000 #define HFI1_S_AHG_CLEAR 0x40000000
#define HFI1_S_WAIT_PIO_DRAIN 0x20000000 #define HFI1_S_WAIT_PIO_DRAIN 0x20000000
#define HFI1_S_WAIT_TID_SPACE 0x10000000
#define HFI1_S_MIN_BIT_MASK 0x01000000 #define HFI1_S_MIN_BIT_MASK 0x01000000
/* /*
......
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2018 Intel Corporation.
*
*/
#ifndef HFI1_RC_H
#define HFI1_RC_H
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x
static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
{
unsigned int next;
next = n + 1;
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
next = 0;
qp->s_tail_ack_queue = next;
qp->s_ack_state = OP(ACKNOWLEDGE);
}
static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
struct rvt_qp *qp)
{
if (list_empty(&qp->rspwait)) {
qp->r_flags |= RVT_R_RSP_NAK;
rvt_get_qp(qp);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
{
u32 len;
len = delta_psn(psn, wqe->psn) * pmtu;
return rvt_restart_sge(ss, wqe, len);
}
struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
u8 *prev_ack, bool *scheduled);
int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
struct hfi1_ctxtdata *rcd);
struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct hfi1_ibport *ibp);
#endif /* HFI1_RC_H */
This diff is collapsed.
...@@ -6,7 +6,27 @@ ...@@ -6,7 +6,27 @@
#ifndef HFI1_TID_RDMA_H #ifndef HFI1_TID_RDMA_H
#define HFI1_TID_RDMA_H #define HFI1_TID_RDMA_H
#include <linux/circ_buf.h>
#include "common.h"
/* Add a convenience helper */
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */ #define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
/*
* Bit definitions for priv->s_flags.
* These bit flags overload the bit flags defined for the QP's s_flags.
* Due to the fact that these bit fields are used only for the QP priv
* s_flags, there are no collisions.
*
* HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock
*/
#define HFI1_S_TID_WAIT_INTERLCK BIT(5)
struct tid_rdma_params { struct tid_rdma_params {
struct rcu_head rcu_head; struct rcu_head rcu_head;
...@@ -21,10 +41,128 @@ struct tid_rdma_params { ...@@ -21,10 +41,128 @@ struct tid_rdma_params {
}; };
struct tid_rdma_qp_params { struct tid_rdma_qp_params {
struct work_struct trigger_work;
struct tid_rdma_params local; struct tid_rdma_params local;
struct tid_rdma_params __rcu *remote; struct tid_rdma_params __rcu *remote;
}; };
/* Track state for each hardware flow */
struct tid_flow_state {
u32 generation;
u32 psn;
u32 r_next_psn; /* next PSN to be received (in TID space) */
u8 index;
u8 last_index;
u8 flags;
};
enum tid_rdma_req_state {
TID_REQUEST_INACTIVE = 0,
TID_REQUEST_INIT,
TID_REQUEST_INIT_RESEND,
TID_REQUEST_ACTIVE,
TID_REQUEST_RESEND,
TID_REQUEST_RESEND_ACTIVE,
TID_REQUEST_QUEUED,
TID_REQUEST_SYNC,
TID_REQUEST_RNR_NAK,
TID_REQUEST_COMPLETE,
};
struct tid_rdma_request {
struct rvt_qp *qp;
struct hfi1_ctxtdata *rcd;
union {
struct rvt_swqe *swqe;
struct rvt_ack_entry *ack;
} e;
struct tid_rdma_flow *flows; /* array of tid flows */
u16 n_flows; /* size of the flow buffer window */
u16 setup_head; /* flow index we are setting up */
u16 clear_tail; /* flow index we are clearing */
u16 flow_idx; /* flow index most recently set up */
u32 seg_len;
u32 total_len;
u32 r_flow_psn; /* IB PSN of next segment start */
u32 s_next_psn; /* IB PSN of next segment start for read */
u32 total_segs; /* segments required to complete a request */
u32 cur_seg; /* index of current segment */
u32 comp_seg; /* index of last completed segment */
u32 ack_seg; /* index of last ack'ed segment */
u32 isge; /* index of "current" sge */
u32 ack_pending; /* num acks pending for this request */
enum tid_rdma_req_state state;
};
/*
* When header suppression is used, PSNs associated with a "flow" are
* relevant (and not the PSNs maintained by verbs). Track per-flow
* PSNs here for a TID RDMA segment.
*
*/
struct flow_state {
u32 flags;
u32 resp_ib_psn; /* The IB PSN of the response for this flow */
u32 generation; /* generation of flow */
u32 spsn; /* starting PSN in TID space */
u32 lpsn; /* last PSN in TID space */
u32 r_next_psn; /* next PSN to be received (in TID space) */
/* For tid rdma read */
u32 ib_spsn; /* starting PSN in Verbs space */
u32 ib_lpsn; /* last PSn in Verbs space */
};
struct tid_rdma_pageset {
dma_addr_t addr : 48; /* Only needed for the first page */
u8 idx: 8;
u8 count : 7;
u8 mapped: 1;
};
/**
* kern_tid_node - used for managing TID's in TID groups
*
* @grp_idx: rcd relative index to tid_group
* @map: grp->map captured prior to programming this TID group in HW
* @cnt: Only @cnt of available group entries are actually programmed
*/
struct kern_tid_node {
struct tid_group *grp;
u8 map;
u8 cnt;
};
/* Overall info for a TID RDMA segment */
struct tid_rdma_flow {
/*
* While a TID RDMA segment is being transferred, it uses a QP number
* from the "KDETH section of QP numbers" (which is different from the
* QP number that originated the request). Bits 11-15 of these QP
* numbers identify the "TID flow" for the segment.
*/
struct flow_state flow_state;
struct tid_rdma_request *req;
u32 tid_qpn;
u32 tid_offset;
u32 length;
u32 sent;
u8 tnode_cnt;
u8 tidcnt;
u8 tid_idx;
u8 idx;
u8 npagesets;
u8 npkts;
u8 pkt;
struct kern_tid_node tnode[TID_RDMA_MAX_PAGES];
struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES];
u32 tid_entry[TID_RDMA_MAX_PAGES];
};
bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data); bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data); bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data); bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
...@@ -32,9 +170,67 @@ void tid_rdma_conn_error(struct rvt_qp *qp); ...@@ -32,9 +170,67 @@ void tid_rdma_conn_error(struct rvt_qp *qp);
void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p); void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit); int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
struct rvt_sge_state *ss, bool *last);
int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
/**
* trdma_clean_swqe - clean flows for swqe if large send queue
* @qp: the qp
* @wqe: the send wqe
*/
static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
{
if (!wqe->priv)
return;
__trdma_clean_swqe(qp, wqe);
}
void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp);
int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
struct ib_qp_init_attr *init_attr); struct ib_qp_init_attr *init_attr);
void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp);
int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
struct cntr_entry;
u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data);
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len);
u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, u32 *bth1,
u32 *bth2, u32 *len);
void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
struct ib_other_headers *ohdr, u32 *bth0,
u32 *bth1, u32 *bth2, u32 *len, bool *last);
void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet);
bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
struct hfi1_pportdata *ppd,
struct hfi1_packet *packet);
void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
u32 *bth2);
void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp);
bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
struct rvt_swqe *wqe)
{
if (wqe->priv &&
wqe->wr.opcode == IB_WR_RDMA_READ &&
wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
setup_tid_rdma_wqe(qp, wqe);
}
#endif /* HFI1_TID_RDMA_H */ #endif /* HFI1_TID_RDMA_H */
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
*/ */
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
#include "exp_rcv.h"
static u8 __get_ib_hdr_len(struct ib_header *hdr) static u8 __get_ib_hdr_len(struct ib_header *hdr)
{ {
...@@ -128,6 +129,10 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2) ...@@ -128,6 +129,10 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2)
#define IETH_PRN "ieth rkey:0x%.8x" #define IETH_PRN "ieth rkey:0x%.8x"
#define ATOMICACKETH_PRN "origdata:%llx" #define ATOMICACKETH_PRN "origdata:%llx"
#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx" #define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
#define TID_RDMA_KDETH "kdeth0 0x%x kdeth1 0x%x"
#define TID_RDMA_KDETH_DATA "kdeth0 0x%x: kver %u sh %u intr %u tidctrl %u tid %x offset %x kdeth1 0x%x: jkey %x"
#define TID_READ_REQ_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
#define TID_READ_RSP_PRN "verbs_qp 0x%x"
#define OP(transport, op) IB_OPCODE_## transport ## _ ## op #define OP(transport, op) IB_OPCODE_## transport ## _ ## op
...@@ -322,6 +327,38 @@ const char *parse_everbs_hdrs( ...@@ -322,6 +327,38 @@ const char *parse_everbs_hdrs(
parse_syndrome(be32_to_cpu(eh->aeth) >> 24), parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
be32_to_cpu(eh->aeth) & IB_MSN_MASK); be32_to_cpu(eh->aeth) & IB_MSN_MASK);
break; break;
case OP(TID_RDMA, READ_REQ):
trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
TID_READ_REQ_PRN,
le32_to_cpu(eh->tid_rdma.r_req.kdeth0),
le32_to_cpu(eh->tid_rdma.r_req.kdeth1),
ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr),
be32_to_cpu(eh->tid_rdma.r_req.reth.rkey),
be32_to_cpu(eh->tid_rdma.r_req.reth.length),
be32_to_cpu(eh->tid_rdma.r_req.tid_flow_psn),
be32_to_cpu(eh->tid_rdma.r_req.tid_flow_qp),
be32_to_cpu(eh->tid_rdma.r_req.verbs_qp));
break;
case OP(TID_RDMA, READ_RESP):
trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " "
TID_READ_RSP_PRN,
le32_to_cpu(eh->tid_rdma.r_rsp.kdeth0),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, KVER),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, SH),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, INTR),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TIDCTRL),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TID),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, OFFSET),
le32_to_cpu(eh->tid_rdma.r_rsp.kdeth1),
KDETH_GET(eh->tid_rdma.r_rsp.kdeth1, JKEY),
be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24,
parse_syndrome(/* aeth */
be32_to_cpu(eh->tid_rdma.r_rsp.aeth)
>> 24),
(be32_to_cpu(eh->tid_rdma.r_rsp.aeth) &
IB_MSN_MASK),
be32_to_cpu(eh->tid_rdma.r_rsp.verbs_qp));
break;
/* aeth + atomicacketh */ /* aeth + atomicacketh */
case OP(RC, ATOMIC_ACKNOWLEDGE): case OP(RC, ATOMIC_ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN, trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
...@@ -394,6 +431,21 @@ const char *print_u32_array( ...@@ -394,6 +431,21 @@ const char *print_u32_array(
return ret; return ret;
} }
u8 hfi1_trace_get_tid_ctrl(u32 ent)
{
return EXP_TID_GET(ent, CTRL);
}
u16 hfi1_trace_get_tid_len(u32 ent)
{
return EXP_TID_GET(ent, LEN);
}
u16 hfi1_trace_get_tid_idx(u32 ent)
{
return EXP_TID_GET(ent, IDX);
}
__hfi1_trace_fn(AFFINITY); __hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT); __hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC); __hfi1_trace_fn(PROC);
......
...@@ -79,6 +79,8 @@ __print_symbolic(opcode, \ ...@@ -79,6 +79,8 @@ __print_symbolic(opcode, \
ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
ib_opcode_name(RC_COMPARE_SWAP), \ ib_opcode_name(RC_COMPARE_SWAP), \
ib_opcode_name(RC_FETCH_ADD), \ ib_opcode_name(RC_FETCH_ADD), \
ib_opcode_name(TID_RDMA_READ_REQ), \
ib_opcode_name(TID_RDMA_READ_RESP), \
ib_opcode_name(UC_SEND_FIRST), \ ib_opcode_name(UC_SEND_FIRST), \
ib_opcode_name(UC_SEND_MIDDLE), \ ib_opcode_name(UC_SEND_MIDDLE), \
ib_opcode_name(UC_SEND_LAST), \ ib_opcode_name(UC_SEND_LAST), \
......
...@@ -109,6 +109,54 @@ DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error, ...@@ -109,6 +109,54 @@ DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error,
TP_ARGS(qp, psn) TP_ARGS(qp, psn)
); );
DEFINE_EVENT(/* event */
hfi1_rc_template, hfi1_rc_completion,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DECLARE_EVENT_CLASS(/* rc_ack */
hfi1_rc_ack_template,
TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
struct rvt_swqe *wqe),
TP_ARGS(qp, aeth, psn, wqe),
TP_STRUCT__entry(/* entry */
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
__field(u32, qpn)
__field(u32, aeth)
__field(u32, psn)
__field(u8, opcode)
__field(u32, spsn)
__field(u32, lpsn)
),
TP_fast_assign(/* assign */
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->qpn = qp->ibqp.qp_num;
__entry->aeth = aeth;
__entry->psn = psn;
__entry->opcode = wqe->wr.opcode;
__entry->spsn = wqe->psn;
__entry->lpsn = wqe->lpsn;
),
TP_printk(/* print */
"[%s] qpn 0x%x aeth 0x%x psn 0x%x opcode 0x%x spsn 0x%x lpsn 0x%x",
__get_str(dev),
__entry->qpn,
__entry->aeth,
__entry->psn,
__entry->opcode,
__entry->spsn,
__entry->lpsn
)
);
DEFINE_EVENT(/* do_rc_ack */
hfi1_rc_ack_template, hfi1_rc_ack_do,
TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
struct rvt_swqe *wqe),
TP_ARGS(qp, aeth, psn, wqe)
);
#endif /* __HFI1_TRACE_RC_H */ #endif /* __HFI1_TRACE_RC_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
This diff is collapsed.
...@@ -114,19 +114,27 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template, ...@@ -114,19 +114,27 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
__field(u32, qpn) __field(u32, qpn)
__field(u32, flags) __field(u32, flags)
__field(u32, s_flags) __field(u32, s_flags)
__field(u32, ps_flags)
__field(unsigned long, iow_flags)
), ),
TP_fast_assign( TP_fast_assign(
DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
__entry->flags = flags; __entry->flags = flags;
__entry->qpn = qp->ibqp.qp_num; __entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags; __entry->s_flags = qp->s_flags;
__entry->ps_flags =
((struct hfi1_qp_priv *)qp->priv)->s_flags;
__entry->iow_flags =
((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
), ),
TP_printk( TP_printk(
"[%s] qpn 0x%x flags 0x%x s_flags 0x%x", "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
__get_str(dev), __get_str(dev),
__entry->qpn, __entry->qpn,
__entry->flags, __entry->flags,
__entry->s_flags __entry->s_flags,
__entry->ps_flags,
__entry->iow_flags
) )
); );
......
...@@ -48,7 +48,6 @@ ...@@ -48,7 +48,6 @@
*/ */
#include "hfi.h" #include "hfi.h"
#include "exp_rcv.h" #include "exp_rcv.h"
struct tid_pageset { struct tid_pageset {
......
...@@ -165,6 +165,7 @@ const enum ib_wc_opcode ib_hfi1_wc_opcode[] = { ...@@ -165,6 +165,7 @@ const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
[IB_WR_SEND] = IB_WC_SEND, [IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND, [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ, [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_TID_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD, [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[IB_WR_SEND_WITH_INV] = IB_WC_SEND, [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
...@@ -200,6 +201,8 @@ const u8 hdr_len_by_opcode[256] = { ...@@ -200,6 +201,8 @@ const u8 hdr_len_by_opcode[256] = {
[IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4, [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4, [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
[IB_OPCODE_TID_RDMA_READ_REQ] = 12 + 8 + 36,
[IB_OPCODE_TID_RDMA_READ_RESP] = 12 + 8 + 36,
/* UC */ /* UC */
[IB_OPCODE_UC_SEND_FIRST] = 12 + 8, [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
[IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
...@@ -243,6 +246,11 @@ static const opcode_handler opcode_handler_tbl[256] = { ...@@ -243,6 +246,11 @@ static const opcode_handler opcode_handler_tbl[256] = {
[IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv, [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv, [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
/* TID RDMA has separate handlers for different opcodes.*/
[IB_OPCODE_TID_RDMA_READ_REQ] = &hfi1_rc_rcv_tid_rdma_read_req,
[IB_OPCODE_TID_RDMA_READ_RESP] = &hfi1_rc_rcv_tid_rdma_read_resp,
/* UC */ /* UC */
[IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv, [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
[IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv, [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
...@@ -308,7 +316,7 @@ static inline opcode_handler qp_ok(struct hfi1_packet *packet) ...@@ -308,7 +316,7 @@ static inline opcode_handler qp_ok(struct hfi1_packet *packet)
static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc) static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
{ {
#ifdef CONFIG_FAULT_INJECTION #ifdef CONFIG_FAULT_INJECTION
if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) {
/* /*
* In order to drop non-IB traffic we * In order to drop non-IB traffic we
* set PbcInsertHrc to NONE (0x2). * set PbcInsertHrc to NONE (0x2).
...@@ -319,8 +327,9 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc) ...@@ -319,8 +327,9 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
* packet will not be delivered to the * packet will not be delivered to the
* correct context. * correct context.
*/ */
pbc &= ~PBC_INSERT_HCRC_SMASK;
pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT; pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
else } else {
/* /*
* In order to drop regular verbs * In order to drop regular verbs
* traffic we set the PbcTestEbp * traffic we set the PbcTestEbp
...@@ -330,10 +339,129 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc) ...@@ -330,10 +339,129 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
* triggered and will be dropped. * triggered and will be dropped.
*/ */
pbc |= PBC_TEST_EBP; pbc |= PBC_TEST_EBP;
}
#endif #endif
return pbc; return pbc;
} }
static opcode_handler tid_qp_ok(int opcode, struct hfi1_packet *packet)
{
if (packet->qp->ibqp.qp_type != IB_QPT_RC ||
!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
return NULL;
if ((opcode & RVT_OPCODE_QP_MASK) == IB_OPCODE_TID_RDMA)
return opcode_handler_tbl[opcode];
return NULL;
}
void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
struct ib_header *hdr = packet->hdr;
u32 tlen = packet->tlen;
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
opcode_handler opcode_handler;
unsigned long flags;
u32 qp_num;
int lnh;
u8 opcode;
/* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
if (unlikely(tlen < 15 * sizeof(u32)))
goto drop;
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
if (lnh != HFI1_LRH_BTH)
goto drop;
packet->ohdr = &hdr->u.oth;
trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
inc_opstats(tlen, &rcd->opstats->stats[opcode]);
/* verbs_qp can be picked up from any tid_rdma header struct */
qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) &
RVT_QPN_MASK;
rcu_read_lock();
packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
if (!packet->qp)
goto drop_rcu;
spin_lock_irqsave(&packet->qp->r_lock, flags);
opcode_handler = tid_qp_ok(opcode, packet);
if (likely(opcode_handler))
opcode_handler(packet);
else
goto drop_unlock;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
rcu_read_unlock();
return;
drop_unlock:
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
drop_rcu:
rcu_read_unlock();
drop:
ibp->rvp.n_pkt_drops++;
}
void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
struct ib_header *hdr = packet->hdr;
u32 tlen = packet->tlen;
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
opcode_handler opcode_handler;
unsigned long flags;
u32 qp_num;
int lnh;
u8 opcode;
/* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
if (unlikely(tlen < 15 * sizeof(u32)))
goto drop;
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
if (lnh != HFI1_LRH_BTH)
goto drop;
packet->ohdr = &hdr->u.oth;
trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
inc_opstats(tlen, &rcd->opstats->stats[opcode]);
/* verbs_qp can be picked up from any tid_rdma header struct */
qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) &
RVT_QPN_MASK;
rcu_read_lock();
packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
if (!packet->qp)
goto drop_rcu;
spin_lock_irqsave(&packet->qp->r_lock, flags);
opcode_handler = tid_qp_ok(opcode, packet);
if (likely(opcode_handler))
opcode_handler(packet);
else
goto drop_unlock;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
rcu_read_unlock();
return;
drop_unlock:
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
drop_rcu:
rcu_read_unlock();
drop:
ibp->rvp.n_pkt_drops++;
}
static int hfi1_do_pkey_check(struct hfi1_packet *packet) static int hfi1_do_pkey_check(struct hfi1_packet *packet)
{ {
struct hfi1_ctxtdata *rcd = packet->rcd; struct hfi1_ctxtdata *rcd = packet->rcd;
...@@ -504,11 +632,28 @@ static void verbs_sdma_complete( ...@@ -504,11 +632,28 @@ static void verbs_sdma_complete(
hfi1_put_txreq(tx); hfi1_put_txreq(tx);
} }
void hfi1_wait_kmem(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct ib_qp *ibqp = &qp->ibqp;
struct ib_device *ibdev = ibqp->device;
struct hfi1_ibdev *dev = to_idev(ibdev);
if (list_empty(&priv->s_iowait.list)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->s_iowait.list, &dev->memwait);
priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
rvt_get_qp(qp);
}
}
static int wait_kmem(struct hfi1_ibdev *dev, static int wait_kmem(struct hfi1_ibdev *dev,
struct rvt_qp *qp, struct rvt_qp *qp,
struct hfi1_pkt_state *ps) struct hfi1_pkt_state *ps)
{ {
struct hfi1_qp_priv *priv = qp->priv;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
...@@ -517,15 +662,7 @@ static int wait_kmem(struct hfi1_ibdev *dev, ...@@ -517,15 +662,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
write_seqlock(&dev->iowait_lock); write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list, list_add_tail(&ps->s_txreq->txreq.list,
&ps->wait->tx_head); &ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) { hfi1_wait_kmem(qp);
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->s_iowait.list, &dev->memwait);
priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
hfi1_qp_unbusy(qp, ps->wait); hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY; ret = -EBUSY;
...@@ -674,6 +811,15 @@ static int build_verbs_tx_desc( ...@@ -674,6 +811,15 @@ static int build_verbs_tx_desc(
return ret; return ret;
} }
static u64 update_hcrc(u8 opcode, u64 pbc)
{
if ((opcode & IB_OPCODE_TID_RDMA) == IB_OPCODE_TID_RDMA) {
pbc &= ~PBC_INSERT_HCRC_SMASK;
pbc |= (u64)PBC_IHCRC_LKDETH << PBC_INSERT_HCRC_SHIFT;
}
return pbc;
}
int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc) u64 pbc)
{ {
...@@ -719,6 +865,9 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -719,6 +865,9 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
qp->srate_mbps, qp->srate_mbps,
vl, vl,
plen); plen);
/* Update HCRC based on packet opcode */
pbc = update_hcrc(ps->opcode, pbc);
} }
tx->wqe = qp->s_wqe; tx->wqe = qp->s_wqe;
ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
...@@ -867,6 +1016,9 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -867,6 +1016,9 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc); pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
/* Update HCRC based on packet opcode */
pbc = update_hcrc(ps->opcode, pbc);
} }
if (cb) if (cb)
iowait_pio_inc(&priv->s_iowait); iowait_pio_inc(&priv->s_iowait);
......
...@@ -159,17 +159,38 @@ struct hfi1_qp_priv { ...@@ -159,17 +159,38 @@ struct hfi1_qp_priv {
struct sdma_engine *s_sde; /* current sde */ struct sdma_engine *s_sde; /* current sde */
struct send_context *s_sendcontext; /* current sendcontext */ struct send_context *s_sendcontext; /* current sendcontext */
struct hfi1_ctxtdata *rcd; /* QP's receive context */ struct hfi1_ctxtdata *rcd; /* QP's receive context */
struct page **pages; /* for TID page scan */
u32 tid_enqueue; /* saved when tid waited */
u8 s_sc; /* SC[0..4] for next packet */ u8 s_sc; /* SC[0..4] for next packet */
struct iowait s_iowait; struct iowait s_iowait;
struct list_head tid_wait; /* for queueing tid space */
struct hfi1_opfn_data opfn; struct hfi1_opfn_data opfn;
struct tid_flow_state flow_state;
struct tid_rdma_qp_params tid_rdma; struct tid_rdma_qp_params tid_rdma;
struct rvt_qp *owner; struct rvt_qp *owner;
u8 hdr_type; /* 9B or 16B */ u8 hdr_type; /* 9B or 16B */
unsigned long tid_timer_timeout_jiffies; unsigned long tid_timer_timeout_jiffies;
/* variables for the TID RDMA SE state machine */
u32 s_flags;
/* For TID RDMA READ */
u32 tid_r_reqs; /* Num of tid reads requested */
u32 tid_r_comp; /* Num of tid reads completed */
u32 pending_tid_r_segs; /* Num of pending tid read segments */
u16 pkts_ps; /* packets per segment */ u16 pkts_ps; /* packets per segment */
u8 timeout_shift; /* account for number of packets per segment */ u8 timeout_shift; /* account for number of packets per segment */
}; };
struct hfi1_swqe_priv {
struct tid_rdma_request tid_req;
struct rvt_sge_state ss; /* Used for TID RDMA READ Request */
};
struct hfi1_ack_priv {
struct tid_rdma_request tid_req;
};
/* /*
* This structure is used to hold commonly lookedup and computed values during * This structure is used to hold commonly lookedup and computed values during
* the send engine progress. * the send engine progress.
...@@ -231,6 +252,7 @@ struct hfi1_ibdev { ...@@ -231,6 +252,7 @@ struct hfi1_ibdev {
struct kmem_cache *verbs_txreq_cache; struct kmem_cache *verbs_txreq_cache;
u64 n_txwait; u64 n_txwait;
u64 n_kmem_wait; u64 n_kmem_wait;
u64 n_tidwait;
/* protect iowait lists */ /* protect iowait lists */
seqlock_t iowait_lock ____cacheline_aligned_in_smp; seqlock_t iowait_lock ____cacheline_aligned_in_smp;
...@@ -318,6 +340,31 @@ static inline u32 delta_psn(u32 a, u32 b) ...@@ -318,6 +340,31 @@ static inline u32 delta_psn(u32 a, u32 b)
return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT; return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
} }
static inline struct tid_rdma_request *wqe_to_tid_req(struct rvt_swqe *wqe)
{
return &((struct hfi1_swqe_priv *)wqe->priv)->tid_req;
}
static inline struct tid_rdma_request *ack_to_tid_req(struct rvt_ack_entry *e)
{
return &((struct hfi1_ack_priv *)e->priv)->tid_req;
}
/*
* Look through all the active flows for a TID RDMA request and find
* the one (if it exists) that contains the specified PSN.
*/
static inline u32 __full_flow_psn(struct flow_state *state, u32 psn)
{
return mask_psn((state->generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
(psn & HFI1_KDETH_BTH_SEQ_MASK));
}
static inline u32 full_flow_psn(struct tid_rdma_flow *flow, u32 psn)
{
return __full_flow_psn(&flow->flow_state, psn);
}
struct verbs_txreq; struct verbs_txreq;
void hfi1_put_txreq(struct verbs_txreq *tx); void hfi1_put_txreq(struct verbs_txreq *tx);
...@@ -383,6 +430,10 @@ int hfi1_register_ib_device(struct hfi1_devdata *); ...@@ -383,6 +430,10 @@ int hfi1_register_ib_device(struct hfi1_devdata *);
void hfi1_unregister_ib_device(struct hfi1_devdata *); void hfi1_unregister_ib_device(struct hfi1_devdata *);
void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet);
void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet);
void hfi1_ib_rcv(struct hfi1_packet *packet); void hfi1_ib_rcv(struct hfi1_packet *packet);
void hfi1_16B_rcv(struct hfi1_packet *packet); void hfi1_16B_rcv(struct hfi1_packet *packet);
...@@ -400,6 +451,16 @@ static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr) ...@@ -400,6 +451,16 @@ static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ); return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
} }
void hfi1_wait_kmem(struct rvt_qp *qp);
static inline void hfi1_trdma_send_complete(struct rvt_qp *qp,
struct rvt_swqe *wqe,
enum ib_wc_status status)
{
trdma_clean_swqe(qp, wqe);
rvt_send_complete(qp, wqe, status);
}
extern const enum ib_wc_opcode ib_hfi1_wc_opcode[]; extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
extern const u8 hdr_len_by_opcode[]; extern const u8 hdr_len_by_opcode[];
......
...@@ -45,12 +45,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, ...@@ -45,12 +45,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 len; u32 len;
len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
ss->sge = wqe->sg_list[0]; return rvt_restart_sge(ss, wqe, len);
ss->sg_list = wqe->sg_list + 1;
ss->num_sge = wqe->wr.num_sge;
ss->total_len = wqe->length;
rvt_skip_sge(ss, len, false);
return wqe->length - len;
} }
/** /**
......
...@@ -1642,11 +1642,11 @@ int rvt_destroy_qp(struct ib_qp *ibqp) ...@@ -1642,11 +1642,11 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
kref_put(&qp->ip->ref, rvt_release_mmap_info); kref_put(&qp->ip->ref, rvt_release_mmap_info);
else else
vfree(qp->r_rq.wq); vfree(qp->r_rq.wq);
vfree(qp->s_wq);
rdi->driver_f.qp_priv_free(rdi, qp); rdi->driver_f.qp_priv_free(rdi, qp);
kfree(qp->s_ack_queue); kfree(qp->s_ack_queue);
rdma_destroy_ah_attr(&qp->remote_ah_attr); rdma_destroy_ah_attr(&qp->remote_ah_attr);
rdma_destroy_ah_attr(&qp->alt_ah_attr); rdma_destroy_ah_attr(&qp->alt_ah_attr);
vfree(qp->s_wq);
kfree(qp); kfree(qp);
return 0; return 0;
} }
...@@ -2393,11 +2393,12 @@ static inline unsigned long rvt_aeth_to_usec(u32 aeth) ...@@ -2393,11 +2393,12 @@ static inline unsigned long rvt_aeth_to_usec(u32 aeth)
} }
/* /*
* rvt_add_retry_timer - add/start a retry timer * rvt_add_retry_timer_ext - add/start a retry timer
* @qp - the QP * @qp - the QP
* @shift - timeout shift to wait for multiple packets
* add a retry timer on the QP * add a retry timer on the QP
*/ */
void rvt_add_retry_timer(struct rvt_qp *qp) void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
{ {
struct ib_qp *ibqp = &qp->ibqp; struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
...@@ -2405,11 +2406,11 @@ void rvt_add_retry_timer(struct rvt_qp *qp) ...@@ -2405,11 +2406,11 @@ void rvt_add_retry_timer(struct rvt_qp *qp)
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER; qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */ /* 4.096 usec. * (1 << qp->timeout) */
qp->s_timer.expires = jiffies + qp->timeout_jiffies + qp->s_timer.expires = jiffies + rdi->busy_jiffies +
rdi->busy_jiffies; (qp->timeout_jiffies << shift);
add_timer(&qp->s_timer); add_timer(&qp->s_timer);
} }
EXPORT_SYMBOL(rvt_add_retry_timer); EXPORT_SYMBOL(rvt_add_retry_timer_ext);
/** /**
* rvt_add_rnr_timer - add/start an rnr timer * rvt_add_rnr_timer - add/start an rnr timer
......
...@@ -187,3 +187,16 @@ void rvt_get_credit(struct rvt_qp *qp, u32 aeth) ...@@ -187,3 +187,16 @@ void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
} }
} }
EXPORT_SYMBOL(rvt_get_credit); EXPORT_SYMBOL(rvt_get_credit);
/* rvt_restart_sge - rewind the sge state for a wqe */
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
{
ss->sge = wqe->sg_list[0];
ss->sg_list = wqe->sg_list + 1;
ss->num_sge = wqe->wr.num_sge;
ss->total_len = wqe->length;
rvt_skip_sge(ss, len, false);
return wqe->length - len;
}
EXPORT_SYMBOL(rvt_restart_sge);
/* /*
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -100,6 +100,8 @@ struct ib_atomic_eth { ...@@ -100,6 +100,8 @@ struct ib_atomic_eth {
__be64 compare_data; /* potentially unaligned */ __be64 compare_data; /* potentially unaligned */
} __packed; } __packed;
#include <rdma/tid_rdma_defs.h>
union ib_ehdrs { union ib_ehdrs {
struct { struct {
__be32 deth[2]; __be32 deth[2];
...@@ -117,6 +119,11 @@ union ib_ehdrs { ...@@ -117,6 +119,11 @@ union ib_ehdrs {
__be32 aeth; __be32 aeth;
__be32 ieth; __be32 ieth;
struct ib_atomic_eth atomic_eth; struct ib_atomic_eth atomic_eth;
/* TID RDMA headers */
union {
struct tid_rdma_read_req r_req;
struct tid_rdma_read_resp r_rsp;
} tid_rdma;
} __packed; } __packed;
struct ib_other_headers { struct ib_other_headers {
......
...@@ -574,9 +574,10 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, ...@@ -574,9 +574,10 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
/** /**
* rvt_mod_retry_timer - mod a retry timer * rvt_mod_retry_timer - mod a retry timer
* @qp - the QP * @qp - the QP
* @shift - timeout shift to wait for multiple packets
* Modify a potentially already running retry timer * Modify a potentially already running retry timer
*/ */
static inline void rvt_mod_retry_timer(struct rvt_qp *qp) static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
{ {
struct ib_qp *ibqp = &qp->ibqp; struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
...@@ -584,8 +585,13 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp) ...@@ -584,8 +585,13 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER; qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */ /* 4.096 usec. * (1 << qp->timeout) */
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
rdi->busy_jiffies); (qp->timeout_jiffies << shift));
}
static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
{
return rvt_mod_retry_timer_ext(qp, 0);
} }
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
......
...@@ -174,6 +174,7 @@ struct rvt_swqe { ...@@ -174,6 +174,7 @@ struct rvt_swqe {
u32 lpsn; /* last packet sequence number */ u32 lpsn; /* last packet sequence number */
u32 ssn; /* send sequence number */ u32 ssn; /* send sequence number */
u32 length; /* total length of data in sg_list */ u32 length; /* total length of data in sg_list */
void *priv; /* driver dependent field */
struct rvt_sge sg_list[0]; struct rvt_sge sg_list[0];
}; };
...@@ -235,6 +236,7 @@ struct rvt_ack_entry { ...@@ -235,6 +236,7 @@ struct rvt_ack_entry {
u32 lpsn; u32 lpsn;
u8 opcode; u8 opcode;
u8 sent; u8 sent;
void *priv;
}; };
#define RC_QP_SCALING_INTERVAL 5 #define RC_QP_SCALING_INTERVAL 5
...@@ -628,6 +630,16 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp); ...@@ -628,6 +630,16 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp);
*/ */
void rvt_get_credit(struct rvt_qp *qp, u32 aeth); void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
/**
* rvt_restart_sge - rewind the sge state for a wqe
* @ss: the sge state pointer
* @wqe: the wqe to rewind
* @len: the data length from the start of the wqe in bytes
*
* Returns the remaining data length.
*/
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
/** /**
* @qp - the qp pair * @qp - the qp pair
* @len - the length * @len - the length
...@@ -676,7 +688,11 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); ...@@ -676,7 +688,11 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth); void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
void rvt_del_timers_sync(struct rvt_qp *qp); void rvt_del_timers_sync(struct rvt_qp *qp);
void rvt_stop_rc_timers(struct rvt_qp *qp); void rvt_stop_rc_timers(struct rvt_qp *qp);
void rvt_add_retry_timer(struct rvt_qp *qp); void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
static inline void rvt_add_retry_timer(struct rvt_qp *qp)
{
rvt_add_retry_timer_ext(qp, 0);
}
void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
void *data, u32 length, void *data, u32 length,
......
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Copyright(c) 2018 Intel Corporation.
*
*/
#ifndef TID_RDMA_DEFS_H
#define TID_RDMA_DEFS_H
#include <rdma/ib_pack.h>
struct tid_rdma_read_req {
__le32 kdeth0;
__le32 kdeth1;
struct ib_reth reth;
__be32 tid_flow_psn;
__be32 tid_flow_qp;
__be32 verbs_qp;
};
struct tid_rdma_read_resp {
__le32 kdeth0;
__le32 kdeth1;
__be32 aeth;
__be32 reserved[4];
__be32 verbs_psn;
__be32 verbs_qp;
};
/*
* TID RDMA Opcodes
*/
#define IB_OPCODE_TID_RDMA 0xe0
enum {
IB_OPCODE_READ_REQ = 0x4,
IB_OPCODE_READ_RESP = 0x5,
IB_OPCODE(TID_RDMA, READ_REQ),
IB_OPCODE(TID_RDMA, READ_RESP),
};
#define TID_OP(x) IB_OPCODE_TID_RDMA_##x
/*
* Define TID RDMA specific WR opcodes. The ib_wr_opcode
* enum already provides some reserved values for use by
* low level drivers. Two of those are used but renamed
* to be more descriptive.
*/
#define IB_WR_TID_RDMA_READ IB_WR_RESERVED2
#endif /* TID_RDMA_DEFS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment