Commit 8ec8015a authored by Srujana Challa's avatar Srujana Challa Committed by Herbert Xu

crypto: octeontx2 - add support to process the crypto request

Attach LFs to CPT VF to process the crypto requests and register
LF interrupts.
Signed-off-by: default avatarSuheil Chandran <schandran@marvell.com>
Signed-off-by: default avatarLukasz Bartosik <lbartosik@marvell.com>
Signed-off-by: default avatarSrujana Challa <schalla@marvell.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 19d8e8c7
......@@ -4,6 +4,6 @@ obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2-cpt.o octeontx2-cptvf.o
octeontx2-cpt-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o
octeontx2-cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
otx2_cpt_mbox_common.o
otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
......@@ -17,6 +17,9 @@
#define OTX2_CPT_MAX_VFS_NUM 128
#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
(((blk) << 20) | ((slot) << 12) | (offs))
#define OTX2_CPT_RVU_PFFUNC(pf, func) \
((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
(((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
#define OTX2_CPT_NAME_LENGTH 64
......@@ -34,6 +37,7 @@ enum otx2_cpt_eng_type {
/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
#define MBOX_MSG_GET_CAPS 0xBFD
#define MBOX_MSG_GET_KVF_LIMITS 0xBFC
/*
* Message request and response to get engine group number
......@@ -51,6 +55,19 @@ struct otx2_cpt_egrp_num_rsp {
u8 eng_grp_num;
};
/*
* Message request and response to get kernel crypto limits
* This messages are only used between CPT PF <-> CPT VF
*/
struct otx2_cpt_kvf_limits_msg {
struct mbox_msghdr hdr;
};
struct otx2_cpt_kvf_limits_rsp {
struct mbox_msghdr hdr;
u8 kvf_limits;
};
/* CPT HW capabilities */
union otx2_cpt_eng_caps {
u64 u;
......
......@@ -10,6 +10,22 @@
/* Completion code size and initial value */
#define OTX2_CPT_COMPLETION_CODE_SIZE 8
#define OTX2_CPT_COMPLETION_CODE_INIT OTX2_CPT_COMP_E_NOTDONE
/*
* Maximum total number of SG buffers is 100, we divide it equally
* between input and output
*/
#define OTX2_CPT_MAX_SG_IN_CNT 50
#define OTX2_CPT_MAX_SG_OUT_CNT 50
/* DMA mode direct or SG */
#define OTX2_CPT_DMA_MODE_DIRECT 0
#define OTX2_CPT_DMA_MODE_SG 1
/* Context source CPTR or DPTR */
#define OTX2_CPT_FROM_CPTR 0
#define OTX2_CPT_FROM_DPTR 1
#define OTX2_CPT_MAX_REQ_SIZE 65535
union otx2_cpt_opcode {
u16 flags;
......@@ -19,6 +35,13 @@ union otx2_cpt_opcode {
} s;
};
struct otx2_cptvf_request {
u32 param1;
u32 param2;
u16 dlen;
union otx2_cpt_opcode opcode;
};
/*
* CPT_INST_S software command definitions
* Words EI (0-3)
......@@ -48,4 +71,126 @@ struct otx2_cpt_iq_command {
union otx2_cpt_iq_cmd_word3 cptr;
};
struct otx2_cpt_pending_entry {
void *completion_addr; /* Completion address */
void *info;
/* Kernel async request callback */
void (*callback)(int status, void *arg1, void *arg2);
struct crypto_async_request *areq; /* Async request callback arg */
u8 resume_sender; /* Notify sender to resume sending requests */
u8 busy; /* Entry status (free/busy) */
};
struct otx2_cpt_pending_queue {
struct otx2_cpt_pending_entry *head; /* Head of the queue */
u32 front; /* Process work from here */
u32 rear; /* Append new work here */
u32 pending_count; /* Pending requests count */
u32 qlen; /* Queue length */
spinlock_t lock; /* Queue lock */
};
struct otx2_cpt_buf_ptr {
u8 *vptr;
dma_addr_t dma_addr;
u16 size;
};
union otx2_cpt_ctrl_info {
u32 flags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u32 reserved_6_31:26;
u32 grp:3; /* Group bits */
u32 dma_mode:2; /* DMA mode */
u32 se_req:1; /* To SE core */
#else
u32 se_req:1; /* To SE core */
u32 dma_mode:2; /* DMA mode */
u32 grp:3; /* Group bits */
u32 reserved_6_31:26;
#endif
} s;
};
struct otx2_cpt_req_info {
/* Kernel async request callback */
void (*callback)(int status, void *arg1, void *arg2);
struct crypto_async_request *areq; /* Async request callback arg */
struct otx2_cptvf_request req;/* Request information (core specific) */
union otx2_cpt_ctrl_info ctrl;/* User control information */
struct otx2_cpt_buf_ptr in[OTX2_CPT_MAX_SG_IN_CNT];
struct otx2_cpt_buf_ptr out[OTX2_CPT_MAX_SG_OUT_CNT];
u8 *iv_out; /* IV to send back */
u16 rlen; /* Output length */
u8 in_cnt; /* Number of input buffers */
u8 out_cnt; /* Number of output buffers */
u8 req_type; /* Type of request */
u8 is_enc; /* Is a request an encryption request */
u8 is_trunc_hmac;/* Is truncated hmac used */
};
struct otx2_cpt_inst_info {
struct otx2_cpt_pending_entry *pentry;
struct otx2_cpt_req_info *req;
struct pci_dev *pdev;
void *completion_addr;
u8 *out_buffer;
u8 *in_buffer;
dma_addr_t dptr_baddr;
dma_addr_t rptr_baddr;
dma_addr_t comp_baddr;
unsigned long time_in;
u32 dlen;
u32 dma_len;
u8 extra_time;
};
struct otx2_cpt_sglist_component {
__be16 len0;
__be16 len1;
__be16 len2;
__be16 len3;
__be64 ptr0;
__be64 ptr1;
__be64 ptr2;
__be64 ptr3;
};
static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
struct otx2_cpt_inst_info *info)
{
struct otx2_cpt_req_info *req;
int i;
if (info->dptr_baddr)
dma_unmap_single(&pdev->dev, info->dptr_baddr,
info->dma_len, DMA_BIDIRECTIONAL);
if (info->req) {
req = info->req;
for (i = 0; i < req->out_cnt; i++) {
if (req->out[i].dma_addr)
dma_unmap_single(&pdev->dev,
req->out[i].dma_addr,
req->out[i].size,
DMA_BIDIRECTIONAL);
}
for (i = 0; i < req->in_cnt; i++) {
if (req->in[i].dma_addr)
dma_unmap_single(&pdev->dev,
req->in[i].dma_addr,
req->in[i].size,
DMA_BIDIRECTIONAL);
}
}
kfree(info);
}
struct otx2_cptlf_wqe;
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num);
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
#endif /* __OTX2_CPT_REQMGR_H */
......@@ -80,6 +80,7 @@ struct otx2_cptlf_info {
u8 slot; /* Slot number of this LF */
struct otx2_cpt_inst_queue iqueue;/* Instruction queue */
struct otx2_cpt_pending_queue pqueue; /* Pending queue */
struct otx2_cptlf_wqe *wqe; /* Tasklet work info */
};
......@@ -91,6 +92,8 @@ struct otx2_cptlfs_info {
struct otx2_mbox *mbox;
u8 are_lfs_attached; /* Whether CPT LFs are attached */
u8 lfs_num; /* Number of CPT LFs */
u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
};
......@@ -334,6 +337,11 @@ static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst,
} while (!ret);
}
static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)
{
return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;
}
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
int lfs_num);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
......
......@@ -50,6 +50,7 @@ struct otx2_cptpf_dev {
u8 pf_id; /* RVU PF number */
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
u8 kvf_limits; /* Kernel crypto limits */
};
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
......
......@@ -392,6 +392,46 @@ static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
otx2_mbox_destroy(&cptpf->afpf_mbox);
}
static ssize_t kvf_limits_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", cptpf->kvf_limits);
}
static ssize_t kvf_limits_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
int lfs_num;
if (kstrtoint(buf, 0, &lfs_num)) {
dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
lfs_num, num_online_cpus());
return -EINVAL;
}
if (lfs_num < 1 || lfs_num > num_online_cpus()) {
dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
lfs_num, num_online_cpus());
return -EINVAL;
}
cptpf->kvf_limits = lfs_num;
return count;
}
static DEVICE_ATTR_RW(kvf_limits);
static struct attribute *cptpf_attrs[] = {
&dev_attr_kvf_limits.attr,
NULL
};
static const struct attribute_group cptpf_sysfs_group = {
.attrs = cptpf_attrs,
};
static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
{
u64 rev;
......@@ -616,8 +656,13 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
if (err)
goto unregister_intr;
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
goto cleanup_eng_grps;
return 0;
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
unregister_intr:
cptpf_disable_afpf_mbox_intr(cptpf);
destroy_afpf_mbox:
......@@ -635,6 +680,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
return;
cptpf_sriov_disable(pdev);
/* Delete sysfs entry created for kernel VF limits */
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
/* Cleanup engine groups */
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
/* Disable AF-PF mailbox interrupt */
......
......@@ -86,6 +86,25 @@ static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
return 0;
}
static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req)
{
struct otx2_cpt_kvf_limits_rsp *rsp;
rsp = (struct otx2_cpt_kvf_limits_rsp *)
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
if (!rsp)
return -ENOMEM;
rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = req->pcifunc;
rsp->kvf_limits = cptpf->kvf_limits;
return 0;
}
static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req, int size)
......@@ -103,6 +122,9 @@ static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
case MBOX_MSG_GET_CAPS:
err = handle_msg_get_caps(cptpf, vf, req);
break;
case MBOX_MSG_GET_KVF_LIMITS:
err = handle_msg_kvf_limits(cptpf, vf, req);
break;
default:
err = forward_to_af(cptpf, vf, req, size);
break;
......
......@@ -24,5 +24,6 @@ struct otx2_cptvf_dev {
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
#endif /* __OTX2_CPTVF_H */
......@@ -3,6 +3,7 @@
#include "otx2_cpt_common.h"
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include <rvu_reg.h>
#define OTX2_CPTVF_DRV_NAME "octeontx2-cptvf"
......@@ -95,6 +96,201 @@ static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
otx2_mbox_destroy(&cptvf->pfvf_mbox);
}
static void cptlf_work_handler(unsigned long data)
{
otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
}
static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
{
int i;
for (i = 0; i < lfs->lfs_num; i++) {
if (!lfs->lf[i].wqe)
continue;
tasklet_kill(&lfs->lf[i].wqe->work);
kfree(lfs->lf[i].wqe);
lfs->lf[i].wqe = NULL;
}
}
static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
{
struct otx2_cptlf_wqe *wqe;
int i, ret = 0;
for (i = 0; i < lfs->lfs_num; i++) {
wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
if (!wqe) {
ret = -ENOMEM;
goto cleanup_tasklet;
}
tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
wqe->lfs = lfs;
wqe->lf_num = i;
lfs->lf[i].wqe = wqe;
}
return 0;
cleanup_tasklet:
cleanup_tasklet_work(lfs);
return ret;
}
static void free_pending_queues(struct otx2_cptlfs_info *lfs)
{
int i;
for (i = 0; i < lfs->lfs_num; i++) {
kfree(lfs->lf[i].pqueue.head);
lfs->lf[i].pqueue.head = NULL;
}
}
static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
{
int size, ret, i;
if (!lfs->lfs_num)
return -EINVAL;
for (i = 0; i < lfs->lfs_num; i++) {
lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
size = lfs->lf[i].pqueue.qlen *
sizeof(struct otx2_cpt_pending_entry);
lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
if (!lfs->lf[i].pqueue.head) {
ret = -ENOMEM;
goto error;
}
/* Initialize spin lock */
spin_lock_init(&lfs->lf[i].pqueue.lock);
}
return 0;
error:
free_pending_queues(lfs);
return ret;
}
static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
{
cleanup_tasklet_work(lfs);
free_pending_queues(lfs);
}
static int lf_sw_init(struct otx2_cptlfs_info *lfs)
{
int ret;
ret = alloc_pending_queues(lfs);
if (ret) {
dev_err(&lfs->pdev->dev,
"Allocating pending queues failed\n");
return ret;
}
ret = init_tasklet_work(lfs);
if (ret) {
dev_err(&lfs->pdev->dev,
"Tasklet work init failed\n");
goto pending_queues_free;
}
return 0;
pending_queues_free:
free_pending_queues(lfs);
return ret;
}
static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
{
atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
/* Remove interrupts affinity */
otx2_cptlf_free_irqs_affinity(lfs);
/* Disable instruction queue */
otx2_cptlf_disable_iqueues(lfs);
/* Unregister LFs interrupts */
otx2_cptlf_unregister_interrupts(lfs);
/* Cleanup LFs software side */
lf_sw_cleanup(lfs);
/* Send request to detach LFs */
otx2_cpt_detach_rsrcs_msg(lfs);
}
static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
{
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
struct device *dev = &cptvf->pdev->dev;
int ret, lfs_num;
u8 eng_grp_msk;
/* Get engine group number for symmetric crypto */
cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
if (ret)
return ret;
if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
dev_err(dev, "Engine group for kernel crypto not available\n");
ret = -ENOENT;
return ret;
}
eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
if (ret)
return ret;
lfs->reg_base = cptvf->reg_base;
lfs->pdev = cptvf->pdev;
lfs->mbox = &cptvf->pfvf_mbox;
lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
num_online_cpus();
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
return ret;
/* Get msix offsets for attached LFs */
ret = otx2_cpt_msix_offset_msg(lfs);
if (ret)
goto cleanup_lf;
/* Initialize LFs software side */
ret = lf_sw_init(lfs);
if (ret)
goto cleanup_lf;
/* Register LFs interrupts */
ret = otx2_cptlf_register_interrupts(lfs);
if (ret)
goto cleanup_lf_sw;
/* Set interrupts affinity */
ret = otx2_cptlf_set_irqs_affinity(lfs);
if (ret)
goto unregister_intr;
atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
return 0;
unregister_intr:
otx2_cptlf_unregister_interrupts(lfs);
cleanup_lf_sw:
lf_sw_cleanup(lfs);
cleanup_lf:
otx2_cptlf_shutdown(lfs);
return ret;
}
static int otx2_cptvf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
......@@ -150,8 +346,15 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
if (ret)
goto destroy_pfvf_mbox;
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
goto unregister_interrupts;
return 0;
unregister_interrupts:
cptvf_disable_pfvf_mbox_intrs(cptvf);
destroy_pfvf_mbox:
cptvf_pfvf_mbox_destroy(cptvf);
clear_drvdata:
......@@ -168,6 +371,7 @@ static void otx2_cptvf_remove(struct pci_dev *pdev)
dev_err(&pdev->dev, "Invalid CPT VF device.\n");
return;
}
cptvf_lf_shutdown(&cptvf->lfs);
/* Disable PF-VF mailbox interrupt */
cptvf_disable_pfvf_mbox_intrs(cptvf);
/* Destroy PF-VF mbox */
......
......@@ -28,6 +28,8 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct mbox_msghdr *msg)
{
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
struct otx2_cpt_kvf_limits_rsp *rsp_limits;
struct otx2_cpt_egrp_num_rsp *rsp_grp;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
int i;
......@@ -75,6 +77,14 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
if (!rsp_reg->is_write)
*rsp_reg->ret_val = rsp_reg->val;
break;
case MBOX_MSG_GET_ENG_GRP_NUM:
rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
break;
case MBOX_MSG_GET_KVF_LIMITS:
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
msg->id);
......@@ -111,3 +121,47 @@ void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
}
otx2_mbox_reset(pfvf_mbox, 0);
}
int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
{
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct otx2_cpt_egrp_num_msg *req;
req = (struct otx2_cpt_egrp_num_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct otx2_cpt_egrp_num_rsp));
if (req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
req->eng_type = eng_type;
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
{
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct mbox_msghdr *req;
int ret;
req = (struct mbox_msghdr *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(struct otx2_cpt_kvf_limits_rsp));
if (req == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
req->id = MBOX_MSG_GET_KVF_LIMITS;
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
ret = otx2_cpt_send_mbox_msg(mbox, pdev);
return ret;
}
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment