Commit 0266a177 authored by Long Li's avatar Long Li Committed by Leon Romanovsky

RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter

Add a RDMA VF driver for Microsoft Azure Network Adapter (MANA).
Co-developed-by: default avatarAjay Sharma <sharmaajay@microsoft.com>
Signed-off-by: default avatarAjay Sharma <sharmaajay@microsoft.com>
Reviewed-by: default avatarDexuan Cui <decui@microsoft.com>
Signed-off-by: default avatarLong Li <longli@microsoft.com>
Link: https://lore.kernel.org/r/1667502990-2559-13-git-send-email-longli@linuxonhyperv.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 1ec56174
......@@ -13669,6 +13669,15 @@ F: drivers/scsi/smartpqi/smartpqi*.[ch]
F: include/linux/cciss*.h
F: include/uapi/linux/cciss*.h
MICROSOFT MANA RDMA DRIVER
M: Long Li <longli@microsoft.com>
M: Ajay Sharma <sharmaajay@microsoft.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/mana/
F: include/net/mana
F: include/uapi/rdma/mana-abi.h
MICROSOFT SURFACE AGGREGATOR TABLET-MODE SWITCH
M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
......
......@@ -85,6 +85,7 @@ source "drivers/infiniband/hw/erdma/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/hns/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mana/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/mthca/Kconfig"
......
......@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/
obj-$(CONFIG_MANA_INFINIBAND) += mana/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
......
# SPDX-License-Identifier: GPL-2.0-only
config MANA_INFINIBAND
tristate "Microsoft Azure Network Adapter support"
depends on NETDEVICES && ETHERNET && PCI && MICROSOFT_MANA
help
This driver provides low-level RDMA support for Microsoft Azure
Network Adapter (MANA). MANA supports RDMA features that can be used
for workloads (e.g. DPDK, MPI etc) that uses RDMA verbs to directly
access hardware from user-mode processes in Microsoft Azure cloud
environment.
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MANA_INFINIBAND) += mana_ib.o
mana_ib-y := device.o main.o wq.o qp.o cq.o mr.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Microsoft Corporation. All rights reserved.
*/
#include "mana_ib.h"
int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
if (udata->inlen < sizeof(ucmd))
return -EINVAL;
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
ibdev_dbg(ibdev,
"Failed to copy from udata for create cq, %d\n", err);
return err;
}
if (attr->cqe > MAX_SEND_BUFFERS_PER_QUEUE) {
ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
return -EINVAL;
}
cq->cqe = attr->cqe;
cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
err = PTR_ERR(cq->umem);
ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
err);
return err;
}
err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
if (err) {
ibdev_dbg(ibdev,
"Failed to create dma region for create cq, %d\n",
err);
goto err_release_umem;
}
ibdev_dbg(ibdev,
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
err, cq->gdma_region);
/*
* The CQ ID is not known at this time. The ID is generated at create_qp
*/
return 0;
err_release_umem:
ib_umem_release(cq->umem);
return err;
}
int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
ib_umem_release(cq->umem);
return 0;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Microsoft Corporation. All rights reserved.
*/
#include "mana_ib.h"
#include <net/mana/mana_auxiliary.h>
MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(NET_MANA);
static const struct ib_device_ops mana_ib_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_MANA,
.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
.alloc_pd = mana_ib_alloc_pd,
.alloc_ucontext = mana_ib_alloc_ucontext,
.create_cq = mana_ib_create_cq,
.create_qp = mana_ib_create_qp,
.create_rwq_ind_table = mana_ib_create_rwq_ind_table,
.create_wq = mana_ib_create_wq,
.dealloc_pd = mana_ib_dealloc_pd,
.dealloc_ucontext = mana_ib_dealloc_ucontext,
.dereg_mr = mana_ib_dereg_mr,
.destroy_cq = mana_ib_destroy_cq,
.destroy_qp = mana_ib_destroy_qp,
.destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
.destroy_wq = mana_ib_destroy_wq,
.disassociate_ucontext = mana_ib_disassociate_ucontext,
.get_port_immutable = mana_ib_get_port_immutable,
.mmap = mana_ib_mmap,
.modify_qp = mana_ib_modify_qp,
.modify_wq = mana_ib_modify_wq,
.query_device = mana_ib_query_device,
.query_gid = mana_ib_query_gid,
.query_port = mana_ib_query_port,
.reg_user_mr = mana_ib_reg_user_mr,
INIT_RDMA_OBJ_SIZE(ib_cq, mana_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mana_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mana_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mana_ib_ucontext, ibucontext),
INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mana_ib_rwq_ind_table,
ib_ind_table),
};
static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
struct gdma_dev *mdev = madev->mdev;
struct mana_context *mc;
struct mana_ib_dev *dev;
int ret;
mc = mdev->driver_data;
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
return -ENOMEM;
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
dev->ib_dev.phys_port_cnt = mc->num_ports;
ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
dev->gdma_dev = mdev;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
/*
* num_comp_vectors needs to set to the max MSIX index
* when interrupts and event queues are implemented
*/
dev->ib_dev.num_comp_vectors = 1;
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
ret = ib_register_device(&dev->ib_dev, "mana_%d",
mdev->gdma_context->dev);
if (ret) {
ib_dealloc_device(&dev->ib_dev);
return ret;
}
dev_set_drvdata(&adev->dev, dev);
return 0;
}
static void mana_ib_remove(struct auxiliary_device *adev)
{
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
ib_unregister_device(&dev->ib_dev);
ib_dealloc_device(&dev->ib_dev);
}
static const struct auxiliary_device_id mana_id_table[] = {
{
.name = "mana.rdma",
},
{},
};
MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
static struct auxiliary_driver mana_driver = {
.name = "rdma",
.probe = mana_ib_probe,
.remove = mana_ib_remove,
.id_table = mana_id_table,
};
module_auxiliary_driver(mana_driver);
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Microsoft Corporation. All rights reserved.
*/
#ifndef _MANA_IB_H_
#define _MANA_IB_H_
#include <rdma/ib_verbs.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_umem.h>
#include <rdma/mana-abi.h>
#include <rdma/uverbs_ioctl.h>
#include <net/mana/mana.h>
#define PAGE_SZ_BM \
(SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \
SZ_512K | SZ_1M | SZ_2M)
/* MANA doesn't have any limit for MR size */
#define MANA_IB_MAX_MR_SIZE U64_MAX
/*
* The hardware limit of number of MRs is greater than maximum number of MRs
* that can possibly represent in 24 bits
*/
#define MANA_IB_MAX_MR 0xFFFFFFu
struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
};
struct mana_ib_wq {
struct ib_wq ibwq;
struct ib_umem *umem;
int wqe;
u32 wq_buf_size;
u64 gdma_region;
u64 id;
mana_handle_t rx_object;
};
struct mana_ib_pd {
struct ib_pd ibpd;
u32 pdn;
mana_handle_t pd_handle;
/* Mutex for sharing access to vport_use_count */
struct mutex vport_mutex;
int vport_use_count;
bool tx_shortform_allowed;
u32 tx_vp_offset;
};
struct mana_ib_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
mana_handle_t mr_handle;
};
struct mana_ib_cq {
struct ib_cq ibcq;
struct ib_umem *umem;
int cqe;
u64 gdma_region;
u64 id;
};
struct mana_ib_qp {
struct ib_qp ibqp;
/* Work queue info */
struct ib_umem *sq_umem;
int sqe;
u64 sq_gdma_region;
u64 sq_id;
mana_handle_t tx_object;
/* The port on the IB device, starting with 1 */
u32 port;
};
struct mana_ib_ucontext {
struct ib_ucontext ibucontext;
u32 doorbell;
};
struct mana_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_ind_table;
};
int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region);
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata);
int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl);
struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 iova, int access_flags,
struct ib_udata *udata);
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int mana_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id,
struct mana_ib_pd *pd, u32 doorbell_id);
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port);
int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
struct ib_udata *udata);
void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma);
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw);
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
#endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Microsoft Corporation. All rights reserved.
*/
#include "mana_ib.h"
#define VALID_MR_FLAGS \
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
static enum gdma_mr_access_flags
mana_ib_verbs_to_gdma_access_flags(int access_flags)
{
enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
if (access_flags & IB_ACCESS_LOCAL_WRITE)
flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
if (access_flags & IB_ACCESS_REMOTE_WRITE)
flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ)
flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
return flags;
}
static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
struct gdma_create_mr_params *mr_params)
{
struct gdma_create_mr_response resp = {};
struct gdma_create_mr_request req = {};
struct gdma_dev *mdev = dev->gdma_dev;
struct gdma_context *gc;
int err;
gc = mdev->gdma_context;
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
sizeof(resp));
req.pd_handle = mr_params->pd_handle;
req.mr_type = mr_params->mr_type;
switch (mr_params->mr_type) {
case GDMA_MR_TYPE_GVA:
req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
req.gva.virtual_address = mr_params->gva.virtual_address;
req.gva.access_flags = mr_params->gva.access_flags;
break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
req.mr_type);
return -EINVAL;
}
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
resp.hdr.status);
if (!err)
err = -EPROTO;
return err;
}
mr->ibmr.lkey = resp.lkey;
mr->ibmr.rkey = resp.rkey;
mr->mr_handle = resp.mr_handle;
return 0;
}
static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev,
gdma_obj_handle_t mr_handle)
{
struct gdma_destroy_mr_response resp = {};
struct gdma_destroy_mr_request req = {};
struct gdma_dev *mdev = dev->gdma_dev;
struct gdma_context *gc;
int err;
gc = mdev->gdma_context;
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
sizeof(resp));
req.mr_handle = mr_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
resp.hdr.status);
if (!err)
err = -EPROTO;
return err;
}
return 0;
}
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int access_flags,
struct ib_udata *udata)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
struct gdma_create_mr_params mr_params = {};
struct ib_device *ibdev = ibpd->device;
gdma_obj_handle_t dma_region_handle;
struct mana_ib_dev *dev;
struct mana_ib_mr *mr;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
ibdev_dbg(ibdev,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
start, iova, length, access_flags);
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EINVAL);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(ibdev, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(ibdev,
"Failed to get umem for register user-mr, %d\n", err);
goto err_free;
}
err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
goto err_umem;
}
ibdev_dbg(ibdev,
"mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
mr_params.mr_type = GDMA_MR_TYPE_GVA;
mr_params.gva.dma_region_handle = dma_region_handle;
mr_params.gva.virtual_address = iova;
mr_params.gva.access_flags =
mana_ib_verbs_to_gdma_access_flags(access_flags);
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
goto err_dma_region;
/*
* There is no need to keep track of dma_region_handle after MR is
* successfully created. The dma_region_handle is tracked in the PF
* as part of the lifecycle of this MR.
*/
return &mr->ibmr;
err_dma_region:
mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
dma_region_handle);
err_umem:
ib_umem_release(mr->umem);
err_free:
kfree(mr);
return ERR_PTR(err);
}
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
struct ib_device *ibdev = ibmr->device;
struct mana_ib_dev *dev;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
if (err)
return err;
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
return 0;
}
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Microsoft Corporation. All rights reserved.
*/
#include "mana_ib.h"
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata)
{
struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_create_wq ucmd = {};
struct mana_ib_wq *wq;
struct ib_umem *umem;
int err;
if (udata->inlen < sizeof(ucmd))
return ERR_PTR(-EINVAL);
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to copy from udata for create wq, %d\n", err);
return ERR_PTR(err);
}
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
if (!wq)
return ERR_PTR(-ENOMEM);
ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
ibdev_dbg(&mdev->ib_dev,
"Failed to get umem for create wq, err %d\n", err);
goto err_free_wq;
}
wq->umem = umem;
wq->wqe = init_attr->max_wr;
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create wq, %d\n",
err);
goto err_release_umem;
}
ibdev_dbg(&mdev->ib_dev,
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);
/* WQ ID is returned at wq_create time, doesn't know the value yet */
return &wq->ibwq;
err_release_umem:
ib_umem_release(umem);
err_free_wq:
kfree(wq);
return ERR_PTR(err);
}
int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata)
{
/* modify_wq is not supported by this version of the driver */
return -EOPNOTSUPP;
}
int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{
struct mana_ib_wq *wq = container_of(ibwq, struct mana_ib_wq, ibwq);
struct ib_device *ib_dev = ibwq->device;
struct mana_ib_dev *mdev;
mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
ib_umem_release(wq->umem);
kfree(wq);
return 0;
}
int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata)
{
/*
* There is no additional data in ind_table to be maintained by this
* driver, do nothing
*/
return 0;
}
int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
{
/*
* There is no additional data in ind_table to be maintained by this
* driver, do nothing
*/
return 0;
}
......@@ -412,6 +412,9 @@ int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
extern const struct ethtool_ops mana_ethtool_ops;
/* A CQ can be created not associated with any EQ */
#define GDMA_CQ_NO_EQ 0xffff
struct mana_obj_spec {
u32 queue_index;
u64 gdma_region;
......
......@@ -251,6 +251,7 @@ enum rdma_driver_id {
RDMA_DRIVER_EFA,
RDMA_DRIVER_SIW,
RDMA_DRIVER_ERDMA,
RDMA_DRIVER_MANA,
};
enum ib_uverbs_gid_type {
......
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
/*
* Copyright (c) 2022, Microsoft Corporation. All rights reserved.
*/
#ifndef MANA_ABI_USER_H
#define MANA_ABI_USER_H
#include <linux/types.h>
#include <rdma/ib_user_ioctl_verbs.h>
/*
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
#define MANA_IB_UVERBS_ABI_VERSION 1
struct mana_ib_create_cq {
__aligned_u64 buf_addr;
};
struct mana_ib_create_qp {
__aligned_u64 sq_buf_addr;
__u32 sq_buf_size;
__u32 port;
};
struct mana_ib_create_qp_resp {
__u32 sqid;
__u32 cqid;
__u32 tx_vp_offset;
__u32 reserved;
};
struct mana_ib_create_wq {
__aligned_u64 wq_buf_addr;
__u32 wq_buf_size;
__u32 reserved;
};
/* RX Hash function flags */
enum mana_ib_rx_hash_function_flags {
MANA_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
};
struct mana_ib_create_qp_rss {
__aligned_u64 rx_hash_fields_mask;
__u8 rx_hash_function;
__u8 reserved[7];
__u32 rx_hash_key_len;
__u8 rx_hash_key[40];
__u32 port;
};
struct rss_resp_entry {
__u32 cqid;
__u32 wqid;
};
struct mana_ib_create_qp_rss_resp {
__aligned_u64 num_entries;
struct rss_resp_entry entries[64];
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment