Commit 3ca633f1 authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Jason Gunthorpe

IB/hfi1: Right size user_sdma sequence numbers and related variables

Hardware limits the maximum number of packets to u16 packets.

Match that size for all relevant sequence numbers in the user_sdma
engine.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 28a9a9e8
...@@ -2444,7 +2444,7 @@ int sdma_send_txreq(struct sdma_engine *sde, ...@@ -2444,7 +2444,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
* @sde: sdma engine to use * @sde: sdma engine to use
* @wait: wait structure to use when full (may be NULL) * @wait: wait structure to use when full (may be NULL)
* @tx_list: list of sdma_txreqs to submit * @tx_list: list of sdma_txreqs to submit
* @count: pointer to a u32 which, after return will contain the total number of * @count: pointer to a u16 which, after return will contain the total number of
* sdma_txreqs removed from the tx_list. This will include sdma_txreqs * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
* whose SDMA descriptors are submitted to the ring and the sdma_txreqs * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
* which are added to SDMA engine flush list if the SDMA engine state is * which are added to SDMA engine flush list if the SDMA engine state is
...@@ -2468,7 +2468,7 @@ int sdma_send_txreq(struct sdma_engine *sde, ...@@ -2468,7 +2468,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/ */
int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
struct list_head *tx_list, u32 *count_out) struct list_head *tx_list, u16 *count_out)
{ {
struct sdma_txreq *tx, *tx_next; struct sdma_txreq *tx, *tx_next;
int ret = 0; int ret = 0;
......
...@@ -849,7 +849,7 @@ int sdma_send_txreq(struct sdma_engine *sde, ...@@ -849,7 +849,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
int sdma_send_txlist(struct sdma_engine *sde, int sdma_send_txlist(struct sdma_engine *sde,
struct iowait *wait, struct iowait *wait,
struct list_head *tx_list, struct list_head *tx_list,
u32 *count); u16 *count_out);
int sdma_ahg_alloc(struct sdma_engine *sde); int sdma_ahg_alloc(struct sdma_engine *sde);
void sdma_ahg_free(struct sdma_engine *sde, int ahg_index); void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
......
...@@ -76,8 +76,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 ...@@ -76,8 +76,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
static unsigned initial_pkt_count = 8; static unsigned initial_pkt_count = 8;
static int user_sdma_send_pkts(struct user_sdma_request *req, static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
unsigned maxpkts);
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq); static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin); static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
...@@ -756,9 +755,10 @@ static int user_sdma_txadd(struct user_sdma_request *req, ...@@ -756,9 +755,10 @@ static int user_sdma_txadd(struct user_sdma_request *req,
return ret; return ret;
} }
static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
{ {
int ret = 0, count; int ret = 0;
u16 count;
unsigned npkts = 0; unsigned npkts = 0;
struct user_sdma_txreq *tx = NULL; struct user_sdma_txreq *tx = NULL;
struct hfi1_user_sdma_pkt_q *pq = NULL; struct hfi1_user_sdma_pkt_q *pq = NULL;
......
...@@ -204,12 +204,12 @@ struct user_sdma_request { ...@@ -204,12 +204,12 @@ struct user_sdma_request {
s8 ahg_idx; s8 ahg_idx;
/* Writeable fields shared with interrupt */ /* Writeable fields shared with interrupt */
u64 seqcomp ____cacheline_aligned_in_smp; u16 seqcomp ____cacheline_aligned_in_smp;
u64 seqsubmitted; u16 seqsubmitted;
/* Send side fields */ /* Send side fields */
struct list_head txps ____cacheline_aligned_in_smp; struct list_head txps ____cacheline_aligned_in_smp;
u64 seqnum; u16 seqnum;
/* /*
* KDETH.OFFSET (TID) field * KDETH.OFFSET (TID) field
* The offset can cover multiple packets, depending on the * The offset can cover multiple packets, depending on the
...@@ -246,7 +246,7 @@ struct user_sdma_txreq { ...@@ -246,7 +246,7 @@ struct user_sdma_txreq {
struct user_sdma_request *req; struct user_sdma_request *req;
u16 flags; u16 flags;
unsigned int busycount; unsigned int busycount;
u64 seqnum; u16 seqnum;
}; };
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment