Commit 100d6de2 authored by Chien Tin Tung's avatar Chien Tin Tung Committed by Jason Gunthorpe

i40iw: Allocate a sdbuf per CQP WQE

Currently there is only one sdbuf per Control QP (CQP) for
programming Segment Descriptor (SD). If multiple SD work
requests are posted simultaneously, the sdbuf is reused
by all WQEs and new WQEs can corrupt previous WQEs sdbuf
leading to incorrect SD programming.

Fix this by allocating one sdbuf per CQP SQ WQE. When an
SD command is posted, it will use the corresponding sdbuf
for the WQE.

Fixes: 86dbcd0f ("i40iw: add file to handle cqp calls")
Signed-off-by: default avatarChien Tin Tung <chien.tin.tung@intel.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent db0acbc4
...@@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp, ...@@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
ret_code = i40iw_allocate_dma_mem(cqp->dev->hw, ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
&cqp->sdbuf, &cqp->sdbuf,
128, I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
I40IW_SD_BUF_ALIGNMENT); I40IW_SD_BUF_ALIGNMENT);
if (ret_code) if (ret_code)
...@@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp) ...@@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
} }
/** /**
* i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
* @cqp: struct for cqp hw * @cqp: pointer to CQP structure
* @wqe_idx: we index of cqp ring * @scratch: private data for CQP WQE
* @wqe_idx: WQE index for next WQE on CQP SQ
*/ */
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
u64 scratch, u32 *wqe_idx)
{ {
u64 *wqe = NULL; u64 *wqe = NULL;
u32 wqe_idx;
enum i40iw_status_code ret_code; enum i40iw_status_code ret_code;
if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
...@@ -616,20 +617,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) ...@@ -616,20 +617,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
cqp->sq_ring.size); cqp->sq_ring.size);
return NULL; return NULL;
} }
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code); I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++; cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
if (ret_code) if (ret_code)
return NULL; return NULL;
if (!wqe_idx) if (!*wqe_idx)
cqp->polarity = !cqp->polarity; cqp->polarity = !cqp->polarity;
wqe = cqp->sq_base[wqe_idx].elem; wqe = cqp->sq_base[*wqe_idx].elem;
cqp->scratch_array[wqe_idx] = scratch; cqp->scratch_array[*wqe_idx] = scratch;
I40IW_CQP_INIT_WQE(wqe); I40IW_CQP_INIT_WQE(wqe);
return wqe; return wqe;
} }
/**
* i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
* @cqp: struct for cqp hw
* @scratch: private data for CQP WQE
*/
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
{
u32 wqe_idx;
return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
}
/** /**
* i40iw_sc_cqp_destroy - destroy cqp during close * i40iw_sc_cqp_destroy - destroy cqp during close
* @cqp: struct for cqp hw * @cqp: struct for cqp hw
...@@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, ...@@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
u64 *wqe; u64 *wqe;
int mem_entries, wqe_entries; int mem_entries, wqe_entries;
struct i40iw_dma_mem *sdbuf = &cqp->sdbuf; struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
u64 offset;
u32 wqe_idx;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
if (!wqe) if (!wqe)
return I40IW_ERR_RING_FULL; return I40IW_ERR_RING_FULL;
...@@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, ...@@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT); LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
if (mem_entries) { if (mem_entries) {
memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4)); offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
data = sdbuf->pa; memcpy((char *)sdbuf->va + offset, &info->entry[3],
mem_entries << 4);
data = (u64)sdbuf->pa + offset;
} else { } else {
data = 0; data = 0;
} }
......
...@@ -1526,7 +1526,7 @@ enum i40iw_alignment { ...@@ -1526,7 +1526,7 @@ enum i40iw_alignment {
I40IW_AEQ_ALIGNMENT = 0x100, I40IW_AEQ_ALIGNMENT = 0x100,
I40IW_CEQ_ALIGNMENT = 0x100, I40IW_CEQ_ALIGNMENT = 0x100,
I40IW_CQ0_ALIGNMENT = 0x100, I40IW_CQ0_ALIGNMENT = 0x100,
I40IW_SD_BUF_ALIGNMENT = 0x100 I40IW_SD_BUF_ALIGNMENT = 0x80
}; };
#define I40IW_WQE_SIZE_64 64 #define I40IW_WQE_SIZE_64 64
...@@ -1534,6 +1534,8 @@ enum i40iw_alignment { ...@@ -1534,6 +1534,8 @@ enum i40iw_alignment {
#define I40IW_QP_WQE_MIN_SIZE 32 #define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128 #define I40IW_QP_WQE_MAX_SIZE 128
#define I40IW_UPDATE_SD_BUF_SIZE 128
#define I40IW_CQE_QTYPE_RQ 0 #define I40IW_CQE_QTYPE_RQ 0
#define I40IW_CQE_QTYPE_SQ 1 #define I40IW_CQE_QTYPE_SQ 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment