Commit 9e10af47 authored by Ira Weiny's avatar Ira Weiny Committed by Greg Kroah-Hartman

staging/rdma/hfi1: Remove file pointer macros

Remove the following macros in favor of explicit use of struct hfi1_filedata and
various sub structures.

ctxt_fp
subctxt_fp
tidcursor_fp
user_sdma_pkt_fp
user_sdma_comp_fp
Reviewed-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9062305b
This diff is collapsed.
......@@ -1423,18 +1423,6 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
/* for use in system calls, where we want to know device type, etc. */
#define ctxt_fp(fp) \
(((struct hfi1_filedata *)(fp)->private_data)->uctxt)
#define subctxt_fp(fp) \
(((struct hfi1_filedata *)(fp)->private_data)->subctxt)
#define tidcursor_fp(fp) \
(((struct hfi1_filedata *)(fp)->private_data)->tidcursor)
#define user_sdma_pkt_fp(fp) \
(((struct hfi1_filedata *)(fp)->private_data)->pq)
#define user_sdma_comp_fp(fp) \
(((struct hfi1_filedata *)(fp)->private_data)->cq)
static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
{
return ppd->dd;
......
......@@ -352,6 +352,7 @@ static void sdma_kmem_cache_ctor(void *obj)
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
{
struct hfi1_filedata *fd;
int ret = 0;
unsigned memsize;
char buf[64];
......@@ -365,6 +366,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
goto done;
}
fd = fp->private_data;
if (!hfi1_sdma_comp_ring_size) {
ret = -EINVAL;
goto done;
......@@ -384,7 +387,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
pq->subctxt = subctxt_fp(fp);
pq->subctxt = fd->subctxt;
pq->n_max_reqs = hfi1_sdma_comp_ring_size;
pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
......@@ -393,7 +396,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
activate_packet_queue);
pq->reqidx = 0;
snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
subctxt_fp(fp));
fd->subctxt);
pq->txreq_cache = kmem_cache_create(buf,
sizeof(struct user_sdma_txreq),
L1_CACHE_BYTES,
......@@ -404,7 +407,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
uctxt->ctxt);
goto pq_txreq_nomem;
}
user_sdma_pkt_fp(fp) = pq;
fd->pq = pq;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
goto cq_nomem;
......@@ -416,7 +419,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
goto cq_comps_nomem;
cq->nentries = hfi1_sdma_comp_ring_size;
user_sdma_comp_fp(fp) = cq;
fd->cq = cq;
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
list_add(&pq->list, &uctxt->sdma_queues);
......@@ -431,7 +434,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
kfree(pq->reqs);
pq_reqs_nomem:
kfree(pq);
user_sdma_pkt_fp(fp) = NULL;
fd->pq = NULL;
pq_nomem:
ret = -ENOMEM;
done:
......@@ -485,9 +488,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
unsigned long dim, unsigned long *count)
{
int ret = 0, i = 0, sent;
struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
struct hfi1_user_sdma_pkt_q *pq = user_sdma_pkt_fp(fp);
struct hfi1_user_sdma_comp_q *cq = user_sdma_comp_fp(fp);
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
struct hfi1_devdata *dd = pq->dd;
unsigned long idx = 0;
u8 pcount = initial_pkt_count;
......@@ -499,7 +503,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
hfi1_cdbg(
SDMA,
"[%u:%u:%u] First vector not big enough for header %lu/%lu",
dd->unit, uctxt->ctxt, subctxt_fp(fp),
dd->unit, uctxt->ctxt, fd->subctxt,
iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
ret = -EINVAL;
goto done;
......@@ -507,15 +511,15 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
if (ret) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
dd->unit, uctxt->ctxt, subctxt_fp(fp), ret);
dd->unit, uctxt->ctxt, fd->subctxt, ret);
ret = -EFAULT;
goto done;
}
trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, subctxt_fp(fp),
trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
(u16 *)&info);
if (cq->comps[info.comp_idx].status == QUEUED) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
dd->unit, uctxt->ctxt, subctxt_fp(fp),
dd->unit, uctxt->ctxt, fd->subctxt,
info.comp_idx);
ret = -EBADSLT;
goto done;
......@@ -523,7 +527,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
if (!info.fragsize) {
hfi1_cdbg(SDMA,
"[%u:%u:%u:%u] Request does not specify fragsize",
dd->unit, uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
ret = -EINVAL;
goto done;
}
......@@ -532,7 +536,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
* "allocate" the request entry.
*/
hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
uctxt->ctxt, fd->subctxt, info.comp_idx);
req = pq->reqs + info.comp_idx;
memset(req, 0, sizeof(*req));
/* Mark the request as IN_USE before we start filling it in. */
......@@ -659,7 +663,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
/* Have to select the engine */
req->sde = sdma_select_engine_vl(dd,
(u32)(uctxt->ctxt + subctxt_fp(fp)),
(u32)(uctxt->ctxt + fd->subctxt),
vl);
if (!req->sde || !sdma_running(req->sde)) {
ret = -ECOMM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment