Commit 22bb1365 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/hfi1: Use a common pad buffer for 9B and 16B packets

There is no reason for a different pad buffer for the two
packet types.

Expand the current buffer allocation to allow for both
packet types.

Fixes: f8195f3b ("IB/hfi1: Eliminate allocation while atomic")
Reported-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: default avatarKaike Wan <kaike.wan@intel.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Link: https://lore.kernel.org/r/20191004204934.26838.13099.stgit@awfm-01.aw.intel.comSigned-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 9ed5bd7d
......@@ -65,6 +65,7 @@
#define SDMA_DESCQ_CNT 2048
#define SDMA_DESC_INTR 64
#define INVALID_TAIL 0xffff
#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
module_param(sdma_descq_cnt, uint, S_IRUGO);
......@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
struct sdma_engine *sde;
if (dd->sdma_pad_dma) {
dma_free_coherent(&dd->pcidev->dev, 4,
dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
(void *)dd->sdma_pad_dma,
dd->sdma_pad_phys);
dd->sdma_pad_dma = NULL;
......@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
}
/* Allocate memory for pad */
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
&dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
......
......@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
/* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24
/* 16B trailing buffer */
static const u8 trail_buf[MAX_16B_PADDING];
static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
......@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
(void *)trail_buf, extra_bytes);
ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
sde->dd->sdma_pad_phys, extra_bytes);
bail_txadd:
return ret;
......@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
}
/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
extra_bytes);
seg_pio_copy_end(pbuf);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment