Commit 73b24e7c authored by Jakub Kicinski's avatar Jakub Kicinski

eth: bnxt: fix backward compatibility with older devices

Recent FW interface update bumped the size of struct hwrm_func_cfg_input
above 128B which is the max some devices support.

Probe on Stratus (BCM957452) with FW 20.8.3.11 fails with:

   bnxt_en ...: Unable to reserve tx rings
   bnxt_en ...: 2nd rings reservation failed.
   bnxt_en ...: Not enough rings available.

Once probe is fixed other errors pop up:

   bnxt_en ...: Failed to set async event completion ring.

This is because __hwrm_send() rejects requests larger than
bp->hwrm_max_ext_req_len with -E2BIG. Since the driver doesn't
actually access any of the new fields, yet, trim the length.
It should be safe.

Similar workaround exists for backing_store_cfg_input.
Although that one mins() to a constant of 256, not 128
we'll effectively use here. Michael explains: "the backing
store cfg command is supported by relatively newer firmware
that will accept 256 bytes at least."

To make debugging easier in the future add a warning
for oversized requests.

Fixes: 754fbf60 ("bnxt_en: Update firmware interface to 1.10.2.171")
Reviewed-by: default avatarMichael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20231016171640.1481493-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 99e79b67
......@@ -5861,7 +5861,7 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
if (BNXT_PF(bp)) {
struct hwrm_func_cfg_input *req;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......@@ -6272,7 +6272,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
struct hwrm_func_cfg_input *req;
u32 enables = 0;
if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
return NULL;
req->fid = cpu_to_le16(0xffff);
......@@ -8617,7 +8617,7 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
else
return -EINVAL;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......@@ -8635,7 +8635,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
return 0;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......
......@@ -62,7 +62,7 @@ static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset)
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......
......@@ -485,6 +485,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
msg_len > bp->hwrm_max_ext_req_len) {
netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x",
req_type);
rc = -E2BIG;
goto exit;
}
......
......@@ -137,4 +137,18 @@ int hwrm_req_send_silent(struct bnxt *bp, void *req);
int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len);
void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags);
void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma);
/* Older devices can only support req length of 128.
* HWRM_FUNC_CFG requests which don't need fields starting at
* num_quic_tx_key_ctxs can use this helper to avoid getting -E2BIG.
*/
static inline int
bnxt_hwrm_func_cfg_short_req_init(struct bnxt *bp,
struct hwrm_func_cfg_input **req)
{
u32 req_len;
req_len = min_t(u32, sizeof(**req), bp->hwrm_max_ext_req_len);
return __hwrm_req_init(bp, (void **)req, HWRM_FUNC_CFG, req_len);
}
#endif
......@@ -95,7 +95,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->flags = cpu_to_le32(func_flags);
......@@ -146,7 +146,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......@@ -232,7 +232,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
}
vf = &bp->pf.vf[vf_id];
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......@@ -274,7 +274,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (vlan_tag == vf->vlan)
return 0;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
......@@ -314,7 +314,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
}
if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
return 0;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
......@@ -491,7 +491,7 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
struct bnxt_vf_info *vf;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......@@ -653,7 +653,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u32 mtu, i;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment