Commit 364e282c authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Split MEM into MR and MW

In the original rxe implementation it was intended to use a common object
to represent MRs and MWs but they are different enough to separate these
into two objects.

This allows replacing the mem name with mr for MRs which is more
consistent with the style for the other objects and less likely to be
confusing. This is a long patch that mostly changes mem to mr where it
makes sense and adds a new rxe_mw struct.

Link: https://lore.kernel.org/r/20210325212425.2792-1-rpearson@hpe.comSigned-off-by: default avatarBob Pearson <rpearson@hpe.com>
Acked-by: default avatarZhu Yanjun <zyjzyj2000@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 7410c2d0
......@@ -345,7 +345,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mem_obj, NULL);
payload_size(pkt), to_mr_obj, NULL);
if (ret)
return COMPST_ERROR;
......@@ -365,7 +365,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mem_obj, NULL);
sizeof(u64), to_mr_obj, NULL);
if (ret)
return COMPST_ERROR;
else
......
......@@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
enum copy_direction {
to_mem_obj,
from_mem_obj,
to_mr_obj,
from_mr_obj,
};
void rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem);
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mr);
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct ib_udata *udata, struct rxe_mr *mr);
int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem);
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
int length, enum copy_direction dir, u32 *crcp);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum copy_direction dir, u32 *crcp);
int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
enum lookup_type {
lookup_local,
lookup_remote,
};
struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
enum lookup_type type);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum lookup_type type);
int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
void rxe_mem_cleanup(struct rxe_pool_entry *arg);
void rxe_mr_cleanup(struct rxe_pool_entry *arg);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
......
This diff is collapsed.
......@@ -8,8 +8,6 @@
#include "rxe_loc.h"
/* info about object pools
* note that mr and mw share a single index space
* so that one can map an lkey to the correct type of object
*/
struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
......@@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
.size = sizeof(struct rxe_mem),
.elem_offset = offsetof(struct rxe_mem, pelem),
.cleanup = rxe_mem_cleanup,
.size = sizeof(struct rxe_mr),
.elem_offset = offsetof(struct rxe_mr, pelem),
.cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
.max_index = RXE_MAX_MR_INDEX,
.min_index = RXE_MIN_MR_INDEX,
},
[RXE_TYPE_MW] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mem),
.elem_offset = offsetof(struct rxe_mem, pelem),
.flags = RXE_POOL_INDEX,
.size = sizeof(struct rxe_mw),
.elem_offset = offsetof(struct rxe_mw, pelem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
},
......
......@@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
} else {
err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
from_mem_obj,
from_mr_obj,
&crc);
if (err)
return err;
......@@ -596,7 +596,7 @@ int rxe_requester(void *arg)
if (wqe->mask & WR_REG_MASK) {
if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_mem *rmr;
struct rxe_mr *rmr;
rmr = rxe_pool_get_index(&rxe->mr_pool,
wqe->wr.ex.invalidate_rkey >> 8);
......@@ -607,14 +607,14 @@ int rxe_requester(void *arg)
wqe->status = IB_WC_MW_BIND_ERR;
goto exit;
}
rmr->state = RXE_MEM_STATE_FREE;
rmr->state = RXE_MR_STATE_FREE;
rxe_drop_ref(rmr);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
rmr->state = RXE_MEM_STATE_VALID;
rmr->state = RXE_MR_STATE_VALID;
rmr->access = wqe->wr.wr.reg.access;
rmr->ibmr.lkey = wqe->wr.wr.reg.key;
rmr->ibmr.rkey = wqe->wr.wr.reg.key;
......
......@@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct rxe_mem *mem = NULL;
struct rxe_mr *mr = NULL;
u64 va;
u32 rkey;
u32 resid;
......@@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
resid = qp->resp.resid;
pktlen = payload_size(pkt);
mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
if (!mem) {
mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
if (!mr) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
if (mem_check_range(mem, va, resid)) {
if (mr_check_range(mr, va, resid)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
......@@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
WARN_ON_ONCE(qp->resp.mr);
qp->resp.mr = mem;
qp->resp.mr = mr;
return RESPST_EXECUTE;
err:
if (mem)
rxe_drop_ref(mem);
if (mr)
rxe_drop_ref(mr);
return state;
}
......@@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int err;
err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
data_addr, data_len, to_mem_obj, NULL);
data_addr, data_len, to_mr_obj, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
: RESPST_ERR_MALFORMED_WQE;
......@@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
int err;
int data_len = payload_size(pkt);
err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
data_len, to_mem_obj, NULL);
err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
to_mr_obj, NULL);
if (err) {
rc = RESPST_ERR_RKEY_VIOLATION;
goto out;
......@@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
u64 iova = atmeth_va(pkt);
u64 *vaddr;
enum resp_states ret;
struct rxe_mem *mr = qp->resp.mr;
struct rxe_mr *mr = qp->resp.mr;
if (mr->state != RXE_MEM_STATE_VALID) {
if (mr->state != RXE_MR_STATE_VALID) {
ret = RESPST_ERR_RKEY_VIOLATION;
goto out;
}
......@@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb)
return RESPST_ERR_RNR;
err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
payload, from_mem_obj, &icrc);
err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
payload, from_mr_obj, &icrc);
if (err)
pr_err("Failed copying memory\n");
......@@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
if (pkt->mask & RXE_IETH_MASK) {
struct rxe_mem *rmr;
struct rxe_mr *rmr;
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
wc->ex.invalidate_rkey = ieth_rkey(pkt);
......@@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
wc->ex.invalidate_rkey);
return RESPST_ERROR;
}
rmr->state = RXE_MEM_STATE_FREE;
rmr->state = RXE_MR_STATE_FREE;
rxe_drop_ref(rmr);
}
......
......@@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mem *mr;
struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
if (!mr)
......@@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_index(mr);
rxe_add_ref(pd);
rxe_mem_init_dma(pd, access, mr);
rxe_mr_init_dma(pd, access, mr);
return &mr->ibmr;
}
......@@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mem *mr;
struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
if (!mr) {
......@@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
err = rxe_mem_init_user(pd, start, length, iova,
access, udata, mr);
err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
if (err)
goto err3;
......@@ -916,9 +915,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mem *mr = to_rmr(ibmr);
struct rxe_mr *mr = to_rmr(ibmr);
mr->state = RXE_MEM_STATE_ZOMBIE;
mr->state = RXE_MR_STATE_ZOMBIE;
rxe_drop_ref(mr_pd(mr));
rxe_drop_index(mr);
rxe_drop_ref(mr);
......@@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
{
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mem *mr;
struct rxe_mr *mr;
int err;
if (mr_type != IB_MR_TYPE_MEM_REG)
......@@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
rxe_add_ref(pd);
err = rxe_mem_init_fast(pd, max_num_sg, mr);
err = rxe_mr_init_fast(pd, max_num_sg, mr);
if (err)
goto err2;
......@@ -962,7 +961,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
{
struct rxe_mem *mr = to_rmr(ibmr);
struct rxe_mr *mr = to_rmr(ibmr);
struct rxe_map *map;
struct rxe_phys_buf *buf;
......@@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rxe_mem *mr = to_rmr(ibmr);
struct rxe_mr *mr = to_rmr(ibmr);
int n;
mr->nbuf = 0;
......@@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
};
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
......
......@@ -156,7 +156,7 @@ struct resp_res {
struct sk_buff *skb;
} atomic;
struct {
struct rxe_mem *mr;
struct rxe_mr *mr;
u64 va_org;
u32 rkey;
u32 length;
......@@ -183,7 +183,7 @@ struct rxe_resp_info {
/* RDMA read / atomic only */
u64 va;
struct rxe_mem *mr;
struct rxe_mr *mr;
u32 resid;
u32 rkey;
u32 length;
......@@ -262,18 +262,18 @@ struct rxe_qp {
struct execute_work cleanup_work;
};
enum rxe_mem_state {
RXE_MEM_STATE_ZOMBIE,
RXE_MEM_STATE_INVALID,
RXE_MEM_STATE_FREE,
RXE_MEM_STATE_VALID,
enum rxe_mr_state {
RXE_MR_STATE_ZOMBIE,
RXE_MR_STATE_INVALID,
RXE_MR_STATE_FREE,
RXE_MR_STATE_VALID,
};
enum rxe_mem_type {
RXE_MEM_TYPE_NONE,
RXE_MEM_TYPE_DMA,
RXE_MEM_TYPE_MR,
RXE_MEM_TYPE_MW,
enum rxe_mr_type {
RXE_MR_TYPE_NONE,
RXE_MR_TYPE_DMA,
RXE_MR_TYPE_MR,
RXE_MR_TYPE_MW,
};
#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
......@@ -287,17 +287,14 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
struct rxe_mem {
struct rxe_mr {
struct rxe_pool_entry pelem;
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
};
struct ib_mr ibmr;
struct ib_umem *umem;
enum rxe_mem_state state;
enum rxe_mem_type type;
enum rxe_mr_state state;
enum rxe_mr_type type;
u64 va;
u64 iova;
size_t length;
......@@ -318,6 +315,17 @@ struct rxe_mem {
struct rxe_map **map;
};
enum rxe_mw_state {
RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
};
struct rxe_mw {
struct ib_mw ibmw;
struct rxe_pool_entry pelem;
};
struct rxe_mc_grp {
struct rxe_pool_entry pelem;
spinlock_t mcg_lock; /* guard group */
......@@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
}
static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
{
return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
}
static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
{
return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
}
static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
{
return to_rpd(mr->ibmr.pd);
}
static inline u32 mr_lkey(struct rxe_mem *mr)
static inline u32 mr_lkey(struct rxe_mr *mr)
{
return mr->ibmr.lkey;
}
static inline u32 mr_rkey(struct rxe_mem *mr)
static inline u32 mr_rkey(struct rxe_mr *mr)
{
return mr->ibmr.rkey;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment