Commit ef4e359d authored by Roland Dreier's avatar Roland Dreier

Merge branches 'core', 'cxgb4', 'ipoib', 'iser', 'misc', 'mlx4', 'qib' and 'srp' into for-next

...@@ -1238,15 +1238,4 @@ static struct pci_driver c2_pci_driver = { ...@@ -1238,15 +1238,4 @@ static struct pci_driver c2_pci_driver = {
.remove = c2_remove, .remove = c2_remove,
}; };
static int __init c2_init_module(void) module_pci_driver(c2_pci_driver);
{
return pci_register_driver(&c2_pci_driver);
}
static void __exit c2_exit_module(void)
{
pci_unregister_driver(&c2_pci_driver);
}
module_init(c2_init_module);
module_exit(c2_exit_module);
This diff is collapsed.
...@@ -533,7 +533,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -533,7 +533,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
"qpmask 0x%x cqshift %lu cqmask 0x%x\n", "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
(unsigned)pci_resource_len(rdev->lldi.pdev, 2), (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
(void *)pci_resource_start(rdev->lldi.pdev, 2), (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),
rdev->lldi.db_reg, rdev->lldi.db_reg,
rdev->lldi.gts_reg, rdev->lldi.gts_reg,
rdev->qpshift, rdev->qpmask, rdev->qpshift, rdev->qpmask,
...@@ -797,7 +797,8 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, ...@@ -797,7 +797,8 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
"RSS %#llx, FL %#llx, len %u\n", "RSS %#llx, FL %#llx, len %u\n",
pci_name(ctx->lldi.pdev), gl->va, pci_name(ctx->lldi.pdev), gl->va,
(unsigned long long)be64_to_cpu(*rsp), (unsigned long long)be64_to_cpu(*rsp),
(unsigned long long)be64_to_cpu(*(u64 *)gl->va), (unsigned long long)be64_to_cpu(
*(__force __be64 *)gl->va),
gl->tot_len); gl->tot_len);
return 0; return 0;
......
...@@ -46,9 +46,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, ...@@ -46,9 +46,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
PDBG("%s AE received after RTS - " pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\
"qp state %d qpid 0x%x status 0x%x\n", __func__, "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
return; return;
} }
......
...@@ -716,6 +716,8 @@ enum c4iw_ep_flags { ...@@ -716,6 +716,8 @@ enum c4iw_ep_flags {
ABORT_REQ_IN_PROGRESS = 1, ABORT_REQ_IN_PROGRESS = 1,
RELEASE_RESOURCES = 2, RELEASE_RESOURCES = 2,
CLOSE_SENT = 3, CLOSE_SENT = 3,
TIMEOUT = 4,
QP_REFERENCED = 5,
}; };
enum c4iw_ep_history { enum c4iw_ep_history {
......
...@@ -1383,6 +1383,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1383,6 +1383,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->ep = NULL; qhp->ep = NULL;
set_state(qhp, C4IW_QP_STATE_ERROR); set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1; free = 1;
abort = 1;
wake_up(&qhp->wait); wake_up(&qhp->wait);
BUG_ON(!ep); BUG_ON(!ep);
flush_qp(qhp); flush_qp(qhp);
......
...@@ -1999,16 +1999,17 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) ...@@ -1999,16 +1999,17 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
goto demux_err; goto demux_err;
err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
if (err) if (err)
goto demux_err; goto free_pv;
} }
mlx4_ib_master_tunnels(dev, 1); mlx4_ib_master_tunnels(dev, 1);
return 0; return 0;
free_pv:
free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
demux_err: demux_err:
while (i > 0) { while (--i >= 0) {
free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
--i;
} }
mlx4_ib_device_unregister_sysfs(dev); mlx4_ib_device_unregister_sysfs(dev);
......
...@@ -137,6 +137,14 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, ...@@ -137,6 +137,14 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_XRC;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
}
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff; 0xffffff;
...@@ -1434,6 +1442,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -1434,6 +1442,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
} }
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
ibdev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
...@@ -1601,8 +1620,7 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) ...@@ -1601,8 +1620,7 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
} }
out: out:
if (dm) kfree(dm);
kfree(dm);
return; return;
} }
......
...@@ -116,6 +116,11 @@ struct mlx4_ib_mr { ...@@ -116,6 +116,11 @@ struct mlx4_ib_mr {
struct ib_umem *umem; struct ib_umem *umem;
}; };
struct mlx4_ib_mw {
struct ib_mw ibmw;
struct mlx4_mw mmw;
};
struct mlx4_ib_fast_reg_page_list { struct mlx4_ib_fast_reg_page_list {
struct ib_fast_reg_page_list ibfrpl; struct ib_fast_reg_page_list ibfrpl;
__be64 *mapped_page_list; __be64 *mapped_page_list;
...@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr) ...@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
return container_of(ibmr, struct mlx4_ib_mr, ibmr); return container_of(ibmr, struct mlx4_ib_mr, ibmr);
} }
static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct mlx4_ib_mw, ibmw);
}
static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
{ {
return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
...@@ -581,6 +591,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -581,6 +591,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags, u64 virt_addr, int access_flags,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr); int mlx4_ib_dereg_mr(struct ib_mr *mr);
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
struct ib_mw_bind *mw_bind);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len); int max_page_list_len);
struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
...@@ -652,12 +666,12 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, ...@@ -652,12 +666,12 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
u8 *mac, int *is_mcast, u8 port); u8 *mac, int *is_mcast, u8 port);
static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{ {
u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET) if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
return 1; return true;
return !!(ah->av.ib.g_slid & 0x80); return !!(ah->av.ib.g_slid & 0x80);
} }
......
...@@ -41,9 +41,19 @@ static u32 convert_access(int acc) ...@@ -41,9 +41,19 @@ static u32 convert_access(int acc)
(acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
(acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
MLX4_PERM_LOCAL_READ; MLX4_PERM_LOCAL_READ;
} }
static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
{
switch (type) {
case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
default: return -1;
}
}
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
{ {
struct mlx4_ib_mr *mr; struct mlx4_ib_mr *mr;
...@@ -68,7 +78,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -68,7 +78,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_free: err_free:
kfree(mr); kfree(mr);
...@@ -163,7 +173,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -163,7 +173,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_umem: err_umem:
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
...@@ -177,8 +187,11 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -177,8 +187,11 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int mlx4_ib_dereg_mr(struct ib_mr *ibmr) int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
{ {
struct mlx4_ib_mr *mr = to_mmr(ibmr); struct mlx4_ib_mr *mr = to_mmr(ibmr);
int ret;
mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
if (ret)
return ret;
if (mr->umem) if (mr->umem)
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
kfree(mr); kfree(mr);
...@@ -186,6 +199,70 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr) ...@@ -186,6 +199,70 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
return 0; return 0;
} }
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_mw *mw;
int err;
mw = kmalloc(sizeof(*mw), GFP_KERNEL);
if (!mw)
return ERR_PTR(-ENOMEM);
err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
to_mlx4_type(type), &mw->mmw);
if (err)
goto err_free;
err = mlx4_mw_enable(dev->dev, &mw->mmw);
if (err)
goto err_mw;
mw->ibmw.rkey = mw->mmw.key;
return &mw->ibmw;
err_mw:
mlx4_mw_free(dev->dev, &mw->mmw);
err_free:
kfree(mw);
return ERR_PTR(err);
}
int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
struct ib_mw_bind *mw_bind)
{
struct ib_send_wr wr;
struct ib_send_wr *bad_wr;
int ret;
memset(&wr, 0, sizeof(wr));
wr.opcode = IB_WR_BIND_MW;
wr.wr_id = mw_bind->wr_id;
wr.send_flags = mw_bind->send_flags;
wr.wr.bind_mw.mw = mw;
wr.wr.bind_mw.bind_info = mw_bind->bind_info;
wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey);
ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
if (!ret)
mw->rkey = wr.wr.bind_mw.rkey;
return ret;
}
int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
{
struct mlx4_ib_mw *mw = to_mmw(ibmw);
mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
kfree(mw);
return 0;
}
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len) int max_page_list_len)
{ {
...@@ -212,7 +289,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, ...@@ -212,7 +289,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
mlx4_mr_free(dev->dev, &mr->mmr); (void) mlx4_mr_free(dev->dev, &mr->mmr);
err_free: err_free:
kfree(mr); kfree(mr);
...@@ -291,7 +368,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, ...@@ -291,7 +368,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
return &fmr->ibfmr; return &fmr->ibfmr;
err_mr: err_mr:
mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
err_free: err_free:
kfree(fmr); kfree(fmr);
......
...@@ -104,6 +104,7 @@ static const __be32 mlx4_ib_opcode[] = { ...@@ -104,6 +104,7 @@ static const __be32 mlx4_ib_opcode[] = {
[IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
[IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
[IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW),
}; };
static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
...@@ -1746,11 +1747,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, ...@@ -1746,11 +1747,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
int header_size; int header_size;
int spc; int spc;
int i; int i;
int is_eth;
int is_vlan = 0;
int is_grh;
u16 vlan;
int err = 0; int err = 0;
u16 vlan = 0xffff;
bool is_eth;
bool is_vlan = false;
bool is_grh;
send_size = 0; send_size = 0;
for (i = 0; i < wr->num_sge; ++i) for (i = 0; i < wr->num_sge; ++i)
...@@ -1953,9 +1954,12 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq ...@@ -1953,9 +1954,12 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
static __be32 convert_access(int acc) static __be32 convert_access(int acc)
{ {
return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) | return (acc & IB_ACCESS_REMOTE_ATOMIC ?
(acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) |
(acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) | (acc & IB_ACCESS_REMOTE_WRITE ?
cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
(acc & IB_ACCESS_REMOTE_READ ?
cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
} }
...@@ -1981,12 +1985,28 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) ...@@ -1981,12 +1985,28 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
fseg->reserved[1] = 0; fseg->reserved[1] = 0;
} }
static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
{
bseg->flags1 =
convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
bseg->flags2 = 0;
if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
}
static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
{ {
iseg->flags = 0; memset(iseg, 0, sizeof(*iseg));
iseg->mem_key = cpu_to_be32(rkey); iseg->mem_key = cpu_to_be32(rkey);
iseg->guest_id = 0;
iseg->pa = 0;
} }
static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
...@@ -2291,6 +2311,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -2291,6 +2311,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_fmr_seg) / 16; size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
break; break;
case IB_WR_BIND_MW:
ctrl->srcrb_flags |=
cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
set_bind_seg(wqe, wr);
wqe += sizeof(struct mlx4_wqe_bind_seg);
size += sizeof(struct mlx4_wqe_bind_seg) / 16;
break;
default: default:
/* No extra segments required for sends */ /* No extra segments required for sends */
break; break;
......
...@@ -732,7 +732,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev) ...@@ -732,7 +732,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
dev->ports_parent = dev->ports_parent =
kobject_create_and_add("ports", kobject_create_and_add("ports",
kobject_get(dev->iov_parent)); kobject_get(dev->iov_parent));
if (!dev->iov_parent) { if (!dev->ports_parent) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_ports; goto err_ports;
} }
......
...@@ -268,8 +268,9 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) ...@@ -268,8 +268,9 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
qpp = &q->next) qpp = &q->next)
if (q == qp) { if (q == qp) {
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
*qpp = qp->next; rcu_assign_pointer(*qpp,
rcu_assign_pointer(qp->next, NULL); rcu_dereference_protected(qp->next,
lockdep_is_held(&dev->qpt_lock)));
break; break;
} }
} }
......
...@@ -117,6 +117,8 @@ enum { ...@@ -117,6 +117,8 @@ enum {
#define IPOIB_OP_CM (0) #define IPOIB_OP_CM (0)
#endif #endif
#define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF))
/* structs */ /* structs */
struct ipoib_header { struct ipoib_header {
...@@ -760,4 +762,6 @@ extern int ipoib_debug_level; ...@@ -760,4 +762,6 @@ extern int ipoib_debug_level;
#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
extern const char ipoib_driver_version[];
#endif /* _IPOIB_H */ #endif /* _IPOIB_H */
...@@ -39,7 +39,24 @@ ...@@ -39,7 +39,24 @@
static void ipoib_get_drvinfo(struct net_device *netdev, static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo) struct ethtool_drvinfo *drvinfo)
{ {
strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); struct ipoib_dev_priv *priv = netdev_priv(netdev);
struct ib_device_attr *attr;
attr = kmalloc(sizeof(*attr), GFP_KERNEL);
if (attr && !ib_query_device(priv->ca, attr))
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", (int)(attr->fw_ver >> 32),
(int)(attr->fw_ver >> 16) & 0xffff,
(int)attr->fw_ver & 0xffff);
kfree(attr);
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device),
sizeof(drvinfo->bus_info));
strlcpy(drvinfo->version, ipoib_driver_version,
sizeof(drvinfo->version));
strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
} }
static int ipoib_get_coalesce(struct net_device *dev, static int ipoib_get_coalesce(struct net_device *dev,
......
...@@ -49,9 +49,14 @@ ...@@ -49,9 +49,14 @@
#include <linux/jhash.h> #include <linux/jhash.h>
#include <net/arp.h> #include <net/arp.h>
#define DRV_VERSION "1.0.0"
const char ipoib_driver_version[] = DRV_VERSION;
MODULE_AUTHOR("Roland Dreier"); MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
...@@ -505,6 +510,9 @@ static void path_rec_completion(int status, ...@@ -505,6 +510,9 @@ static void path_rec_completion(int status,
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
if (IS_ERR_OR_NULL(ah))
ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
if (old_ah) if (old_ah)
ipoib_put_ah(old_ah); ipoib_put_ah(old_ah);
...@@ -844,10 +852,10 @@ static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) ...@@ -844,10 +852,10 @@ static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
* different subnets. * different subnets.
*/ */
/* qpn octets[1:4) & port GUID octets[12:20) */ /* qpn octets[1:4) & port GUID octets[12:20) */
u32 *daddr_32 = (u32 *) daddr; u32 *d32 = (u32 *) daddr;
u32 hv; u32 hv;
hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0); hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
return hv & htbl->mask; return hv & htbl->mask;
} }
...@@ -1688,6 +1696,8 @@ static void ipoib_remove_one(struct ib_device *device) ...@@ -1688,6 +1696,8 @@ static void ipoib_remove_one(struct ib_device *device)
return; return;
dev_list = ib_get_client_data(device, &ipoib_client); dev_list = ib_get_client_data(device, &ipoib_client);
if (!dev_list)
return;
list_for_each_entry_safe(priv, tmp, dev_list, list) { list_for_each_entry_safe(priv, tmp, dev_list, list) {
ib_unregister_event_handler(&priv->event_handler); ib_unregister_event_handler(&priv->event_handler);
......
...@@ -94,7 +94,7 @@ ...@@ -94,7 +94,7 @@
/* support up to 512KB in one RDMA */ /* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISER_DEF_CMD_PER_LUN 128 #define ISER_DEF_CMD_PER_LUN ISCSI_DEF_XMIT_CMDS_MAX
/* QP settings */ /* QP settings */
/* Maximal bounds on received asynchronous PDUs */ /* Maximal bounds on received asynchronous PDUs */
......
...@@ -369,10 +369,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, ...@@ -369,10 +369,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
regd_buf = &iser_task->rdma_regd[cmd_dir]; regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev); aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) { if (aligned_len != mem->dma_nents ||
(!ib_conn->fmr_pool && mem->dma_nents > 1)) {
iscsi_conn->fmr_unalign_cnt++; iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation %d/%d aligned\n", iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
aligned_len, mem->size); aligned_len, mem->size);
iser_data_buf_dump(mem, ibdev); iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */ /* unmap the command data before accessing it */
...@@ -404,7 +405,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, ...@@ -404,7 +405,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
} else { /* use FMR for multiple dma entries */ } else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, ib_conn->page_vec, ibdev); iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
if (err) { if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev); iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents, mem->dma_nents,
......
...@@ -242,10 +242,14 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -242,10 +242,14 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
IB_ACCESS_REMOTE_READ); IB_ACCESS_REMOTE_READ);
ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
if (IS_ERR(ib_conn->fmr_pool)) { ret = PTR_ERR(ib_conn->fmr_pool);
ret = PTR_ERR(ib_conn->fmr_pool); if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) {
ib_conn->fmr_pool = NULL; ib_conn->fmr_pool = NULL;
goto out_err; goto out_err;
} else if (ret == -ENOSYS) {
ib_conn->fmr_pool = NULL;
iser_warn("FMRs are not supported, using unaligned mode\n");
ret = 0;
} }
memset(&init_attr, 0, sizeof init_attr); memset(&init_attr, 0, sizeof init_attr);
......
...@@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target) ...@@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target)
struct Scsi_Host *shost = target->scsi_host; struct Scsi_Host *shost = target->scsi_host;
int i, ret; int i, ret;
if (target->state != SRP_TARGET_LIVE)
return -EAGAIN;
scsi_target_block(&shost->shost_gendev); scsi_target_block(&shost->shost_gendev);
srp_disconnect_target(target); srp_disconnect_target(target);
/* /*
* Now get a new local CM ID so that we avoid confusing the * Now get a new local CM ID so that we avoid confusing the target in
* target in case things are really fouled up. * case things are really fouled up. Doing so also ensures that all CM
* callbacks will have finished before a new QP is allocated.
*/ */
ret = srp_new_cm_id(target); ret = srp_new_cm_id(target);
if (ret) /*
goto unblock; * Whether or not creating a new CM ID succeeded, create a new
* QP. This guarantees that all completion callback function
ret = srp_create_target_ib(target); * invocations have finished before request resetting starts.
if (ret) */
goto unblock; if (ret == 0)
ret = srp_create_target_ib(target);
else
srp_create_target_ib(target);
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i]; struct srp_request *req = &target->req_ring[i];
...@@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target) ...@@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target)
for (i = 0; i < SRP_SQ_SIZE; ++i) for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx); list_add(&target->tx_ring[i]->list, &target->free_tx);
ret = srp_connect_target(target); if (ret == 0)
ret = srp_connect_target(target);
unblock:
scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
SDEV_TRANSPORT_OFFLINE); SDEV_TRANSPORT_OFFLINE);
target->transport_offline = !!ret;
if (ret) if (ret)
goto err; goto err;
...@@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ...@@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
unsigned long flags; unsigned long flags;
int len; int len;
if (unlikely(target->transport_offline)) {
scmnd->result = DID_NO_CONNECT << 16;
scmnd->scsi_done(scmnd);
return 0;
}
spin_lock_irqsave(&target->lock, flags); spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD); iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu) if (!iu)
...@@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, ...@@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_iu *iu; struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt; struct srp_tsk_mgmt *tsk_mgmt;
if (!target->connected || target->qp_in_error)
return -1;
init_completion(&target->tsk_mgmt_done); init_completion(&target->tsk_mgmt_done);
spin_lock_irq(&target->lock); spin_lock_irq(&target->lock);
...@@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) ...@@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) if (!req || !srp_claim_req(target, req, scmnd))
return FAILED; return FAILED;
srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
SRP_TSK_ABORT_TASK); SRP_TSK_ABORT_TASK);
...@@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) ...@@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
if (target->qp_in_error)
return FAILED;
if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
SRP_TSK_LUN_RESET)) SRP_TSK_LUN_RESET))
return FAILED; return FAILED;
...@@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) ...@@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
spin_unlock(&host->target_lock); spin_unlock(&host->target_lock);
target->state = SRP_TARGET_LIVE; target->state = SRP_TARGET_LIVE;
target->connected = false;
scsi_scan_target(&target->scsi_host->shost_gendev, scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0); 0, target->scsi_id, SCAN_WILD_CARD, 0);
......
...@@ -140,6 +140,7 @@ struct srp_target_port { ...@@ -140,6 +140,7 @@ struct srp_target_port {
unsigned int cmd_sg_cnt; unsigned int cmd_sg_cnt;
unsigned int indirect_size; unsigned int indirect_size;
bool allow_ext_sg; bool allow_ext_sg;
bool transport_offline;
/* Everything above this point is used in the hot path of /* Everything above this point is used in the hot path of
* command processing. Try to keep them packed into cachelines. * command processing. Try to keep them packed into cachelines.
......
...@@ -176,7 +176,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) ...@@ -176,7 +176,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
flush_workqueue(mdev->workqueue); flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue); destroy_workqueue(mdev->workqueue);
mlx4_mr_free(dev, &mdev->mr); (void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map); iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar); mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn); mlx4_pd_free(dev, mdev->priv_pdn);
...@@ -283,7 +283,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -283,7 +283,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
return mdev; return mdev;
err_mr: err_mr:
mlx4_mr_free(dev, &mdev->mr); (void) mlx4_mr_free(dev, &mdev->mr);
err_map: err_map:
if (!mdev->uar_map) if (!mdev->uar_map)
iounmap(mdev->uar_map); iounmap(mdev->uar_map);
......
...@@ -757,15 +757,19 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -757,15 +757,19 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
u64 flags; u64 flags;
int err = 0; int err = 0;
u8 field; u8 field;
u32 bmme_flags;
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err) if (err)
return err; return err;
/* add port mng change event capability unconditionally to slaves */ /* add port mng change event capability and disable mw type 1
* unconditionally to slaves
*/
MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
/* For guests, report Blueflame disabled */ /* For guests, report Blueflame disabled */
...@@ -773,6 +777,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -773,6 +777,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f; field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
/* For guests, disable mw type 2 */
MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
return 0; return 0;
} }
...@@ -1198,6 +1207,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) ...@@ -1198,6 +1207,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0 #define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
...@@ -1314,6 +1324,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) ...@@ -1314,6 +1324,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* TPT attributes */ /* TPT attributes */
MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
...@@ -1410,6 +1421,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, ...@@ -1410,6 +1421,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */ /* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
......
...@@ -170,6 +170,7 @@ struct mlx4_init_hca_param { ...@@ -170,6 +170,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz; u8 log_mc_table_sz;
u8 log_mpt_sz; u8 log_mpt_sz;
u8 log_uar_sz; u8 log_uar_sz;
u8 mw_enabled; /* Enable memory windows */
u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 fs_hash_enable_bits; u8 fs_hash_enable_bits;
u8 steering_mode; /* for QUERY_HCA */ u8 steering_mode; /* for QUERY_HCA */
......
...@@ -1447,6 +1447,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1447,6 +1447,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
init_hca.log_uar_sz = ilog2(dev->caps.num_uars); init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
init_hca.uar_page_sz = PAGE_SHIFT - 12; init_hca.uar_page_sz = PAGE_SHIFT - 12;
init_hca.mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err) if (err)
......
...@@ -60,6 +60,8 @@ ...@@ -60,6 +60,8 @@
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 #define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
#define MLX4_FS_NUM_MCG (1 << 17) #define MLX4_FS_NUM_MCG (1 << 17)
#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
enum { enum {
MLX4_FS_L2_HASH = 0, MLX4_FS_L2_HASH = 0,
MLX4_FS_L2_L3_L4_HASH, MLX4_FS_L2_L3_L4_HASH,
...@@ -118,10 +120,10 @@ enum { ...@@ -118,10 +120,10 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
}; };
enum mlx4_mr_state { enum mlx4_mpt_state {
MLX4_MR_DISABLED = 0, MLX4_MPT_DISABLED = 0,
MLX4_MR_EN_HW, MLX4_MPT_EN_HW,
MLX4_MR_EN_SW MLX4_MPT_EN_SW
}; };
#define MLX4_COMM_TIME 10000 #define MLX4_COMM_TIME 10000
...@@ -268,6 +270,22 @@ struct mlx4_icm_table { ...@@ -268,6 +270,22 @@ struct mlx4_icm_table {
struct mlx4_icm **icm; struct mlx4_icm **icm;
}; };
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7)
#define MLX4_MPT_STATUS_SW 0xF0
#define MLX4_MPT_STATUS_HW 0x00
/* /*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/ */
...@@ -871,10 +889,10 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); ...@@ -871,10 +889,10 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
int __mlx4_mr_reserve(struct mlx4_dev *dev); int __mlx4_mpt_reserve(struct mlx4_dev *dev);
void __mlx4_mr_release(struct mlx4_dev *dev, u32 index); void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index); int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index); void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
......
...@@ -44,20 +44,6 @@ ...@@ -44,20 +44,6 @@
#include "mlx4.h" #include "mlx4.h"
#include "icm.h" #include "icm.h"
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
#define MLX4_MPT_STATUS_SW 0xF0
#define MLX4_MPT_STATUS_HW 0x00
static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
{ {
int o; int o;
...@@ -321,7 +307,7 @@ static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, ...@@ -321,7 +307,7 @@ static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
mr->size = size; mr->size = size;
mr->pd = pd; mr->pd = pd;
mr->access = access; mr->access = access;
mr->enabled = MLX4_MR_DISABLED; mr->enabled = MLX4_MPT_DISABLED;
mr->key = hw_index_to_key(mridx); mr->key = hw_index_to_key(mridx);
return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
...@@ -335,14 +321,14 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev, ...@@ -335,14 +321,14 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
} }
int __mlx4_mr_reserve(struct mlx4_dev *dev) int __mlx4_mpt_reserve(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
} }
static int mlx4_mr_reserve(struct mlx4_dev *dev) static int mlx4_mpt_reserve(struct mlx4_dev *dev)
{ {
u64 out_param; u64 out_param;
...@@ -353,17 +339,17 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev) ...@@ -353,17 +339,17 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev)
return -1; return -1;
return get_param_l(&out_param); return get_param_l(&out_param);
} }
return __mlx4_mr_reserve(dev); return __mlx4_mpt_reserve(dev);
} }
void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
} }
static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{ {
u64 in_param; u64 in_param;
...@@ -376,17 +362,17 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) ...@@ -376,17 +362,17 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
index); index);
return; return;
} }
__mlx4_mr_release(dev, index); __mlx4_mpt_release(dev, index);
} }
int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{ {
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
return mlx4_table_get(dev, &mr_table->dmpt_table, index); return mlx4_table_get(dev, &mr_table->dmpt_table, index);
} }
static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{ {
u64 param; u64 param;
...@@ -397,17 +383,17 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) ...@@ -397,17 +383,17 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED); MLX4_CMD_WRAPPED);
} }
return __mlx4_mr_alloc_icm(dev, index); return __mlx4_mpt_alloc_icm(dev, index);
} }
void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{ {
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
mlx4_table_put(dev, &mr_table->dmpt_table, index); mlx4_table_put(dev, &mr_table->dmpt_table, index);
} }
static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{ {
u64 in_param; u64 in_param;
...@@ -420,7 +406,7 @@ static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) ...@@ -420,7 +406,7 @@ static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
index); index);
return; return;
} }
return __mlx4_mr_free_icm(dev, index); return __mlx4_mpt_free_icm(dev, index);
} }
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
...@@ -429,41 +415,52 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, ...@@ -429,41 +415,52 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
u32 index; u32 index;
int err; int err;
index = mlx4_mr_reserve(dev); index = mlx4_mpt_reserve(dev);
if (index == -1) if (index == -1)
return -ENOMEM; return -ENOMEM;
err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
access, npages, page_shift, mr); access, npages, page_shift, mr);
if (err) if (err)
mlx4_mr_release(dev, index); mlx4_mpt_release(dev, index);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx4_mr_alloc); EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{ {
int err; int err;
if (mr->enabled == MLX4_MR_EN_HW) { if (mr->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL, err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) & key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1)); (dev->caps.num_mpts - 1));
if (err) if (err) {
mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
mlx4_warn(dev, "MR has MWs bound to it.\n");
return err;
}
mr->enabled = MLX4_MR_EN_SW; mr->enabled = MLX4_MPT_EN_SW;
} }
mlx4_mtt_cleanup(dev, &mr->mtt); mlx4_mtt_cleanup(dev, &mr->mtt);
return 0;
} }
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{ {
mlx4_mr_free_reserved(dev, mr); int ret;
ret = mlx4_mr_free_reserved(dev, mr);
if (ret)
return ret;
if (mr->enabled) if (mr->enabled)
mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
mlx4_mr_release(dev, key_to_hw_index(mr->key)); mlx4_mpt_release(dev, key_to_hw_index(mr->key));
return 0;
} }
EXPORT_SYMBOL_GPL(mlx4_mr_free); EXPORT_SYMBOL_GPL(mlx4_mr_free);
...@@ -473,7 +470,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -473,7 +470,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
struct mlx4_mpt_entry *mpt_entry; struct mlx4_mpt_entry *mpt_entry;
int err; int err;
err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err) if (err)
return err; return err;
...@@ -520,7 +517,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -520,7 +517,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd; goto err_cmd;
} }
mr->enabled = MLX4_MR_EN_HW; mr->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
...@@ -530,7 +527,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -530,7 +527,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
err_table: err_table:
mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx4_mr_enable); EXPORT_SYMBOL_GPL(mlx4_mr_enable);
...@@ -657,6 +654,101 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -657,6 +654,101 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
} }
EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
struct mlx4_mw *mw)
{
u32 index;
if ((type == MLX4_MW_TYPE_1 &&
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
(type == MLX4_MW_TYPE_2 &&
!(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
return -ENOTSUPP;
index = mlx4_mpt_reserve(dev);
if (index == -1)
return -ENOMEM;
mw->key = hw_index_to_key(index);
mw->pd = pd;
mw->type = type;
mw->enabled = MLX4_MPT_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
if (err)
return err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_table;
}
mpt_entry = mailbox->buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
* off, thus creating a memory window and not a memory region.
*/
mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
mpt_entry->pd_flags = cpu_to_be32(mw->pd);
if (mw->type == MLX4_MW_TYPE_2) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
}
err = mlx4_SW2HW_MPT(dev, mailbox,
key_to_hw_index(mw->key) &
(dev->caps.num_mpts - 1));
if (err) {
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
mw->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
return 0;
err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mw_enable);
void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
{
int err;
if (mw->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mw->key) &
(dev->caps.num_mpts - 1));
if (err)
mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
mw->enabled = MLX4_MPT_EN_SW;
}
if (mw->enabled)
mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
mlx4_mpt_release(dev, key_to_hw_index(mw->key));
}
EXPORT_SYMBOL_GPL(mlx4_mw_free);
int mlx4_init_mr_table(struct mlx4_dev *dev) int mlx4_init_mr_table(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
...@@ -831,7 +923,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, ...@@ -831,7 +923,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
return 0; return 0;
err_free: err_free:
mlx4_mr_free(dev, &fmr->mr); (void) mlx4_mr_free(dev, &fmr->mr);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
...@@ -882,17 +974,21 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, ...@@ -882,17 +974,21 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
err); err);
return; return;
} }
fmr->mr.enabled = MLX4_MR_EN_SW; fmr->mr.enabled = MLX4_MPT_EN_SW;
} }
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{ {
int ret;
if (fmr->maps) if (fmr->maps)
return -EBUSY; return -EBUSY;
mlx4_mr_free(dev, &fmr->mr); ret = mlx4_mr_free(dev, &fmr->mr);
fmr->mr.enabled = MLX4_MR_DISABLED; if (ret)
return ret;
fmr->mr.enabled = MLX4_MPT_DISABLED;
return 0; return 0;
} }
......
...@@ -1231,14 +1231,14 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1231,14 +1231,14 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) { switch (op) {
case RES_OP_RESERVE: case RES_OP_RESERVE:
index = __mlx4_mr_reserve(dev); index = __mlx4_mpt_reserve(dev);
if (index == -1) if (index == -1)
break; break;
id = index & mpt_mask(dev); id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index); err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) { if (err) {
__mlx4_mr_release(dev, index); __mlx4_mpt_release(dev, index);
break; break;
} }
set_param_l(out_param, index); set_param_l(out_param, index);
...@@ -1251,7 +1251,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1251,7 +1251,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err) if (err)
return err; return err;
err = __mlx4_mr_alloc_icm(dev, mpt->key); err = __mlx4_mpt_alloc_icm(dev, mpt->key);
if (err) { if (err) {
res_abort_move(dev, slave, RES_MPT, id); res_abort_move(dev, slave, RES_MPT, id);
return err; return err;
...@@ -1586,7 +1586,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1586,7 +1586,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err) if (err)
break; break;
__mlx4_mr_release(dev, index); __mlx4_mpt_release(dev, index);
break; break;
case RES_OP_MAP_ICM: case RES_OP_MAP_ICM:
index = get_param_l(&in_param); index = get_param_l(&in_param);
...@@ -1596,7 +1596,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1596,7 +1596,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err) if (err)
return err; return err;
__mlx4_mr_free_icm(dev, mpt->key); __mlx4_mpt_free_icm(dev, mpt->key);
res_end_move(dev, slave, RES_MPT, id); res_end_move(dev, slave, RES_MPT, id);
return err; return err;
break; break;
...@@ -1796,6 +1796,26 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) ...@@ -1796,6 +1796,26 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
return be32_to_cpu(mpt->mtt_sz); return be32_to_cpu(mpt->mtt_sz);
} }
static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
}
static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
}
static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
}
static int mr_is_region(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
}
static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
{ {
return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
...@@ -1856,12 +1876,41 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1856,12 +1876,41 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys; int phys;
int id; int id;
u32 pd;
int pd_slave;
id = index & mpt_mask(dev); id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
if (err) if (err)
return err; return err;
/* Disable memory windows for VFs. */
if (!mr_is_region(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
/* Make sure that the PD bits related to the slave id are zeros. */
pd = mr_get_pd(inbox->buf);
pd_slave = (pd >> 17) & 0x7f;
if (pd_slave != 0 && pd_slave != slave) {
err = -EPERM;
goto ex_abort;
}
if (mr_is_fmr(inbox->buf)) {
/* FMR and Bind Enable are forbidden in slave devices. */
if (mr_is_bind_enabled(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
/* FMR and Memory Windows are also forbidden. */
if (!mr_is_region(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
}
phys = mr_phys_mpt(inbox->buf); phys = mr_phys_mpt(inbox->buf);
if (!phys) { if (!phys) {
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
...@@ -3480,7 +3529,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) ...@@ -3480,7 +3529,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
while (state != 0) { while (state != 0) {
switch (state) { switch (state) {
case RES_MPT_RESERVED: case RES_MPT_RESERVED:
__mlx4_mr_release(dev, mpt->key); __mlx4_mpt_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mpt->com.node, rb_erase(&mpt->com.node,
&tracker->res_tree[RES_MPT]); &tracker->res_tree[RES_MPT]);
...@@ -3491,7 +3540,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) ...@@ -3491,7 +3540,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
break; break;
case RES_MPT_MAPPED: case RES_MPT_MAPPED:
__mlx4_mr_free_icm(dev, mpt->key); __mlx4_mpt_free_icm(dev, mpt->key);
state = RES_MPT_RESERVED; state = RES_MPT_RESERVED;
break; break;
......
...@@ -170,6 +170,7 @@ enum { ...@@ -170,6 +170,7 @@ enum {
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum { enum {
MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1,
MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
...@@ -237,7 +238,8 @@ enum { ...@@ -237,7 +238,8 @@ enum {
MLX4_PERM_LOCAL_WRITE = 1 << 11, MLX4_PERM_LOCAL_WRITE = 1 << 11,
MLX4_PERM_REMOTE_READ = 1 << 12, MLX4_PERM_REMOTE_READ = 1 << 12,
MLX4_PERM_REMOTE_WRITE = 1 << 13, MLX4_PERM_REMOTE_WRITE = 1 << 13,
MLX4_PERM_ATOMIC = 1 << 14 MLX4_PERM_ATOMIC = 1 << 14,
MLX4_PERM_BIND_MW = 1 << 15,
}; };
enum { enum {
...@@ -503,6 +505,18 @@ struct mlx4_mr { ...@@ -503,6 +505,18 @@ struct mlx4_mr {
int enabled; int enabled;
}; };
enum mlx4_mw_type {
MLX4_MW_TYPE_1 = 1,
MLX4_MW_TYPE_2 = 2,
};
struct mlx4_mw {
u32 key;
u32 pd;
enum mlx4_mw_type type;
int enabled;
};
struct mlx4_fmr { struct mlx4_fmr {
struct mlx4_mr mr; struct mlx4_mr mr;
struct mlx4_mpt_entry *mpt; struct mlx4_mpt_entry *mpt;
...@@ -801,8 +815,12 @@ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); ...@@ -801,8 +815,12 @@ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
int npages, int page_shift, struct mlx4_mr *mr); int npages, int page_shift, struct mlx4_mr *mr);
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
struct mlx4_mw *mw);
void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list); int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
......
...@@ -265,6 +265,11 @@ struct mlx4_wqe_lso_seg { ...@@ -265,6 +265,11 @@ struct mlx4_wqe_lso_seg {
__be32 header[0]; __be32 header[0];
}; };
enum mlx4_wqe_bind_seg_flags2 {
MLX4_WQE_BIND_ZERO_BASED = (1 << 30),
MLX4_WQE_BIND_TYPE_2 = (1 << 31),
};
struct mlx4_wqe_bind_seg { struct mlx4_wqe_bind_seg {
__be32 flags1; __be32 flags1;
__be32 flags2; __be32 flags2;
...@@ -277,9 +282,9 @@ struct mlx4_wqe_bind_seg { ...@@ -277,9 +282,9 @@ struct mlx4_wqe_bind_seg {
enum { enum {
MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27, MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
MLX4_WQE_FMR_PERM_REMOTE_READ = 1 << 29, MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29,
MLX4_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
MLX4_WQE_FMR_PERM_ATOMIC = 1 << 31 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31
}; };
struct mlx4_wqe_fmr_seg { struct mlx4_wqe_fmr_seg {
...@@ -304,12 +309,10 @@ struct mlx4_wqe_fmr_ext_seg { ...@@ -304,12 +309,10 @@ struct mlx4_wqe_fmr_ext_seg {
}; };
struct mlx4_wqe_local_inval_seg { struct mlx4_wqe_local_inval_seg {
__be32 flags; u64 reserved1;
u32 reserved1;
__be32 mem_key; __be32 mem_key;
u32 reserved2[2]; u32 reserved2;
__be32 guest_id; u64 reserved3[2];
__be64 pa;
}; };
struct mlx4_wqe_raddr_seg { struct mlx4_wqe_raddr_seg {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment