Commit 30343221 authored by Ajit Khaparde's avatar Ajit Khaparde

bnxt_en: Remove runtime interrupt vector allocation

Modified the bnxt_en code to create and pre-configure RDMA devices
with the right MSI-X vector count for the ROCE driver to use.
This is to align the ROCE driver to the auxiliary device model which
will simply bind the driver without getting into PCI-related handling.
All PCI-related logic will now be in the bnxt_en driver.
Suggested-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarAjit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent a43c26fa
...@@ -129,7 +129,6 @@ struct bnxt_re_dev { ...@@ -129,7 +129,6 @@ struct bnxt_re_dev {
unsigned int version, major, minor; unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev; struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix; int num_msix;
int id; int id;
......
...@@ -262,7 +262,7 @@ static void bnxt_re_stop_irq(void *handle) ...@@ -262,7 +262,7 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{ {
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
struct bnxt_msix_entry *msix_ent = rdev->msix_entries; struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
struct bnxt_qplib_nq *nq; struct bnxt_qplib_nq *nq;
int indx, rc; int indx, rc;
...@@ -281,7 +281,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) ...@@ -281,7 +281,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
* in device sctructure. * in device sctructure.
*/ */
for (indx = 0; indx < rdev->num_msix; indx++) for (indx = 0; indx < rdev->num_msix; indx++)
rdev->msix_entries[indx].vector = ent[indx].vector; rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
false); false);
...@@ -315,32 +315,6 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) ...@@ -315,32 +315,6 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
return rc; return rc;
} }
static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
{
int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
struct bnxt_en_dev *en_dev;
en_dev = rdev->en_dev;
num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
num_msix_got = bnxt_req_msix_vecs(en_dev,
rdev->msix_entries,
num_msix_want);
if (num_msix_got < BNXT_RE_MIN_MSIX) {
rc = -EINVAL;
goto done;
}
if (num_msix_got != num_msix_want) {
ibdev_warn(&rdev->ibdev,
"Requested %d MSI-X vectors, got %d\n",
num_msix_want, num_msix_got);
}
rdev->num_msix = num_msix_got;
done:
return rc;
}
static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr, static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
u16 opcd, u16 crid, u16 trid) u16 opcd, u16 crid, u16 trid)
{ {
...@@ -785,7 +759,7 @@ static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx) ...@@ -785,7 +759,7 @@ static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB : (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) : BNXT_RE_GEN_P5_PF_NQ_DB) :
rdev->msix_entries[indx].db_offset; rdev->en_dev->msix_entries[indx].db_offset;
} }
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
...@@ -810,7 +784,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev) ...@@ -810,7 +784,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
for (i = 1; i < rdev->num_msix ; i++) { for (i = 1; i < rdev->num_msix ; i++) {
db_offt = bnxt_re_get_nqdb_offset(rdev, i); db_offt = bnxt_re_get_nqdb_offset(rdev, i);
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->msix_entries[i].vector, i - 1, rdev->en_dev->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler, db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler); &bnxt_re_srqn_handler);
if (rc) { if (rc) {
...@@ -897,7 +871,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) ...@@ -897,7 +871,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
rattr.type = type; rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1; rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
rattr.lrid = rdev->msix_entries[i + 1].ring_idx; rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) { if (rc) {
ibdev_err(&rdev->ibdev, ibdev_err(&rdev->ibdev,
...@@ -1217,7 +1191,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) ...@@ -1217,7 +1191,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_qplib_free_rcfw_channel(&rdev->rcfw); bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
} }
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
bnxt_free_msix_vecs(rdev->en_dev); rdev->num_msix = 0;
bnxt_re_destroy_chip_ctx(rdev); bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
...@@ -1262,13 +1236,15 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) ...@@ -1262,13 +1236,15 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
/* Check whether VF or PF */ /* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev); bnxt_re_get_sriov_func_type(rdev);
rc = bnxt_re_request_msix(rdev); if (!rdev->en_dev->ulp_tbl->msix_requested) {
if (rc) {
ibdev_err(&rdev->ibdev, ibdev_err(&rdev->ibdev,
"Failed to get MSI-X vectors: %#x\n", rc); "Failed to get MSI-X vectors: %#x\n", rc);
rc = -EINVAL; rc = -EINVAL;
goto fail; goto fail;
} }
ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
rdev->en_dev->ulp_tbl->msix_requested);
rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags); set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
bnxt_re_query_hwrm_intf_version(rdev); bnxt_re_query_hwrm_intf_version(rdev);
...@@ -1292,14 +1268,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) ...@@ -1292,14 +1268,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rattr.type = type; rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) { if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw; goto free_rcfw;
} }
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX); db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector; vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, rdev->is_virtfn, vid, db_offt, rdev->is_virtfn,
&bnxt_re_aeq_handler); &bnxt_re_aeq_handler);
......
...@@ -28,6 +28,30 @@ ...@@ -28,6 +28,30 @@
static DEFINE_IDA(bnxt_aux_dev_ids); static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
if (!edev->ulp_tbl->msix_requested) {
netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
return;
}
num_msix = edev->ulp_tbl->msix_requested;
idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
ent[i].db_offset = DB_PF_OFFSET_P5;
if (BNXT_VF(bp))
ent[i].db_offset = DB_VF_OFFSET_P5;
} else {
ent[i].db_offset = (idx + i) * 0x80;
}
}
}
int bnxt_register_dev(struct bnxt_en_dev *edev, int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp_ops *ulp_ops, struct bnxt_ulp_ops *ulp_ops,
void *handle) void *handle)
...@@ -42,17 +66,18 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, ...@@ -42,17 +66,18 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
bp->cp_nr_rings == max_stat_ctxs) bp->cp_nr_rings == max_stat_ctxs)
return -ENOMEM; return -ENOMEM;
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL); ulp = edev->ulp_tbl;
if (!ulp) if (!ulp)
return -ENOMEM; return -ENOMEM;
edev->ulp_tbl = ulp;
ulp->handle = handle; ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops); rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state)) if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, 0); bnxt_hwrm_vnic_cfg(bp, 0);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return 0; return 0;
} }
EXPORT_SYMBOL(bnxt_register_dev); EXPORT_SYMBOL(bnxt_register_dev);
...@@ -66,7 +91,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) ...@@ -66,7 +91,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
ulp = edev->ulp_tbl; ulp = edev->ulp_tbl;
if (ulp->msix_requested) if (ulp->msix_requested)
bnxt_free_msix_vecs(edev); edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (ulp->max_async_event_id) if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
...@@ -79,125 +104,17 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) ...@@ -79,125 +104,17 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
msleep(100); msleep(100);
i++; i++;
} }
kfree(ulp);
edev->ulp_tbl = NULL;
return; return;
} }
EXPORT_SYMBOL(bnxt_unregister_dev); EXPORT_SYMBOL(bnxt_unregister_dev);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
num_msix = edev->ulp_tbl->msix_requested;
idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
ent[i].db_offset = DB_PF_OFFSET_P5;
if (BNXT_VF(bp))
ent[i].db_offset = DB_VF_OFFSET_P5;
} else {
ent[i].db_offset = (idx + i) * 0x80;
}
}
}
int bnxt_req_msix_vecs(struct bnxt_en_dev *edev,
struct bnxt_msix_entry *ent,
int num_msix)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_hw_resc *hw_resc;
int max_idx, max_cp_rings;
int avail_msix, idx;
int total_vecs;
int rc = 0;
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
return -ENODEV;
if (edev->ulp_tbl->msix_requested)
return -EAGAIN;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
avail_msix = bnxt_get_avail_msix(bp, num_msix);
if (!avail_msix)
return -ENOMEM;
if (avail_msix > num_msix)
avail_msix = num_msix;
if (BNXT_NEW_RM(bp)) {
idx = bp->cp_nr_rings;
} else {
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
idx = max_idx - avail_msix;
}
edev->ulp_tbl->msix_base = idx;
edev->ulp_tbl->msix_requested = avail_msix;
hw_resc = &bp->hw_resc;
total_vecs = idx + avail_msix;
rtnl_lock();
if (bp->total_irqs < total_vecs ||
(BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
if (netif_running(dev)) {
bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_reserve_rings(bp, true);
}
}
rtnl_unlock();
if (rc) {
edev->ulp_tbl->msix_requested = 0;
return -EAGAIN;
}
if (BNXT_NEW_RM(bp)) {
int resv_msix;
resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
avail_msix = min_t(int, resv_msix, avail_msix);
edev->ulp_tbl->msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
}
EXPORT_SYMBOL(bnxt_req_msix_vecs);
void bnxt_free_msix_vecs(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
edev->ulp_tbl->msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
rtnl_lock();
if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
bnxt_close_nic(bp, true, false);
bnxt_open_nic(bp, true, false);
}
rtnl_unlock();
return;
}
EXPORT_SYMBOL(bnxt_free_msix_vecs);
int bnxt_get_ulp_msix_num(struct bnxt *bp) int bnxt_get_ulp_msix_num(struct bnxt *bp)
{ {
if (bnxt_ulp_registered(bp->edev)) { u32 roce_msix = BNXT_VF(bp) ?
struct bnxt_en_dev *edev = bp->edev; BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
return edev->ulp_tbl->msix_requested; return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
} min_t(u32, roce_msix, num_online_cpus()) : 0);
return 0;
} }
int bnxt_get_ulp_msix_base(struct bnxt *bp) int bnxt_get_ulp_msix_base(struct bnxt *bp)
...@@ -402,6 +319,7 @@ static void bnxt_aux_dev_release(struct device *dev) ...@@ -402,6 +319,7 @@ static void bnxt_aux_dev_release(struct device *dev)
container_of(dev, struct bnxt_aux_priv, aux_dev.dev); container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
ida_free(&bnxt_aux_dev_ids, aux_priv->id); ida_free(&bnxt_aux_dev_ids, aux_priv->id);
kfree(aux_priv->edev->ulp_tbl);
kfree(aux_priv->edev); kfree(aux_priv->edev);
kfree(aux_priv); kfree(aux_priv);
} }
...@@ -424,6 +342,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) ...@@ -424,6 +342,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->hw_ring_stats_size = bp->hw_ring_stats_size; edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id; edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state; edev->en_state = bp->state;
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
} }
void bnxt_rdma_aux_device_init(struct bnxt *bp) void bnxt_rdma_aux_device_init(struct bnxt *bp)
...@@ -431,6 +351,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp) ...@@ -431,6 +351,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
struct auxiliary_device *aux_dev; struct auxiliary_device *aux_dev;
struct bnxt_aux_priv *aux_priv; struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev; struct bnxt_en_dev *edev;
struct bnxt_ulp *ulp;
int rc; int rc;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
...@@ -470,6 +391,11 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp) ...@@ -470,6 +391,11 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev) if (!edev)
goto aux_dev_uninit; goto aux_dev_uninit;
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
aux_priv->edev = edev; aux_priv->edev = edev;
bp->edev = edev; bp->edev = edev;
bnxt_set_edev_info(edev, bp); bnxt_set_edev_info(edev, bp);
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#define BNXT_MIN_ROCE_CP_RINGS 2 #define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1 #define BNXT_MIN_ROCE_STAT_CTXS 1
#define BNXT_MAX_ROCE_MSIX 9
#define BNXT_MAX_VF_ROCE_MSIX 2
struct hwrm_async_event_cmpl; struct hwrm_async_event_cmpl;
struct bnxt; struct bnxt;
...@@ -51,6 +53,7 @@ struct bnxt_ulp { ...@@ -51,6 +53,7 @@ struct bnxt_ulp {
struct bnxt_en_dev { struct bnxt_en_dev {
struct net_device *net; struct net_device *net;
struct pci_dev *pdev; struct pci_dev *pdev;
struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags; u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1 #define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2 #define BNXT_EN_FLAG_ROCEV2_CAP 0x2
...@@ -101,9 +104,6 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp); ...@@ -101,9 +104,6 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops, int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle); void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev); void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, struct bnxt_msix_entry *ent,
int num_msix);
void bnxt_free_msix_vecs(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg); int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
int bnxt_register_async_events(struct bnxt_en_dev *edev, int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap, u16 max_id); unsigned long *events_bmap, u16 max_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment