Commit b3d0083c authored by Pavan Chebbi's avatar Pavan Chebbi Committed by Jakub Kicinski

bnxt_en: Support RSS contexts in ethtool .{get|set}_rxfh()

Support up to 32 RSS contexts per device if supported by the device.
Reviewed-by: default avatarKalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: default avatarPavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20240325222902.220712-11-michael.chan@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 77a614f7
......@@ -5939,7 +5939,7 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
u32 tpa_flags)
{
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
......@@ -6129,6 +6129,8 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
j = vnic->rss_ctx->rss_indir_tbl[i];
else
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
......@@ -6423,7 +6425,7 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp)
bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
}
static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
unsigned int start_rx_ring_idx,
unsigned int nr_rings)
{
......@@ -9852,7 +9854,7 @@ int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
static int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc, i, nr_ctxs;
......@@ -9939,15 +9941,46 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bool all)
{
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
int i;
bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
}
if (!all)
return;
if (vnic->rss_table)
dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
vnic->rss_table,
vnic->rss_table_dma_addr);
kfree(rss_ctx->rss_indir_tbl);
list_del(&rss_ctx->list);
bp->num_rss_ctx--;
clear_bit(rss_ctx->index, bp->rss_ctx_bmap);
kfree(rss_ctx);
}
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
struct bnxt_rss_ctx *rss_ctx, *tmp;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
__bnxt_setup_vnic_p5(bp, vnic)) {
netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
rss_ctx->index);
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
}
}
}
struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp)
{
struct bnxt_rss_ctx *rss_ctx = NULL;
......@@ -11829,6 +11862,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_hwrm_realloc_rss_ctx_vnic(bp);
bnxt_cfg_usr_fltrs(bp);
return 0;
......@@ -11977,6 +12012,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, false);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
......
......@@ -1270,6 +1270,7 @@ struct bnxt_rss_ctx {
#define BNXT_MAX_ETH_RSS_CTX 32
#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1)
#define BNXT_VNIC_ID_INVALID 0xffffffff
struct bnxt_hw_rings {
int tx;
......@@ -2714,11 +2715,16 @@ int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
unsigned int start_rx_ring_idx,
unsigned int nr_rings);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
......@@ -2745,6 +2751,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bool all);
struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp);
......
......@@ -1207,6 +1207,36 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
return rc;
}
static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
u32 index)
{
struct bnxt_rss_ctx *rss_ctx, *tmp;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
if (rss_ctx->index == index)
return rss_ctx;
return NULL;
}
static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp,
struct bnxt_rss_ctx *rss_ctx)
{
int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
vnic->rss_table_size,
&vnic->rss_table_dma_addr,
GFP_KERNEL);
if (!vnic->rss_table)
return -ENOMEM;
vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
return 0;
}
static int bnxt_add_l2_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs)
{
......@@ -1756,7 +1786,10 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
u32 rss_context = rxfh->rss_context;
struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
u16 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
......@@ -1766,10 +1799,18 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->indir && bp->rss_indir_tbl) {
if (rxfh->rss_context) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context);
if (!rss_ctx)
return -EINVAL;
indir_tbl = rss_ctx->rss_indir_tbl;
vnic = &rss_ctx->vnic;
}
if (rxfh->indir && indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
rxfh->indir[i] = bp->rss_indir_tbl[i];
rxfh->indir[i] = indir_tbl[i];
}
if (rxfh->key && vnic->rss_hash_key)
......@@ -1804,6 +1845,105 @@ static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
}
}
static int bnxt_set_rxfh_context(struct bnxt *bp,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
u32 *rss_context = &rxfh->rss_context;
struct bnxt_rss_ctx *rss_ctx;
struct bnxt_vnic_info *vnic;
bool modify = false;
int bit_id;
int rc;
if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
return -EOPNOTSUPP;
}
if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context);
if (!rss_ctx) {
NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found",
*rss_context);
return -EINVAL;
}
if (*rss_context && rxfh->rss_delete) {
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return 0;
}
modify = true;
vnic = &rss_ctx->vnic;
goto modify_context;
}
if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
BNXT_MAX_ETH_RSS_CTX);
return -EINVAL;
}
if (!bnxt_rfs_capable(bp, true)) {
NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
return -ENOMEM;
}
rss_ctx = bnxt_alloc_rss_ctx(bp);
if (!rss_ctx)
return -ENOMEM;
vnic = &rss_ctx->vnic;
vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
vnic->vnic_id = BNXT_VNIC_ID_INVALID;
rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx);
if (rc)
goto out;
rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx);
if (rc)
goto out;
bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx);
memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
goto out;
}
rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
goto out;
}
modify_context:
bnxt_modify_rss(bp, rss_ctx, rxfh);
if (modify)
return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
rc = __bnxt_setup_vnic_p5(bp, vnic);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
goto out;
}
bit_id = bitmap_find_free_region(bp->rss_ctx_bmap,
BNXT_RSS_CTX_BMAP_LEN, 0);
if (bit_id < 0) {
rc = -ENOMEM;
goto out;
}
rss_ctx->index = (u16)bit_id;
*rss_context = rss_ctx->index;
return 0;
out:
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return rc;
}
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
......@@ -1814,6 +1954,9 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (rxfh->rss_context)
return bnxt_set_rxfh_context(bp, rxfh, extack);
bnxt_modify_rss(bp, NULL, rxfh);
bnxt_clear_usr_fltrs(bp, false);
......@@ -5087,6 +5230,7 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
.cap_rss_ctx_supported = 1,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment