Commit 29796143 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-09-06 (ice)

This series contains updates to ice driver only.

Tony reduces device MSI-X request/usage when entire request can't be fulfilled.

Michal adds check for reset when waiting for PTP offsets.

Paul refactors firmware version checks to use a common helper.

Christophe Jaillet changes a couple of local memory allocation to not
use the devm variant.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53fc01a0 04cbaa6c
...@@ -5286,26 +5286,41 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, ...@@ -5286,26 +5286,41 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
} }
/** /**
* ice_fw_supports_link_override * ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @maj: major version
* @min: minor version
* @patch: patch version
* *
* Checks if the firmware supports link override * Checks if the firmware API is minimum version
*/ */
bool ice_fw_supports_link_override(struct ice_hw *hw) static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{ {
if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { if (hw->api_maj_ver == maj) {
if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) if (hw->api_min_ver > min)
return true; return true;
if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && if (hw->api_min_ver == min && hw->api_patch >= patch)
hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
return true; return true;
} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { } else if (hw->api_maj_ver > maj) {
return true; return true;
} }
return false; return false;
} }
/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*
* Checks if the firmware supports link override
*/
bool ice_fw_supports_link_override(struct ice_hw *hw)
{
return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
ICE_FW_API_LINK_OVERRIDE_MIN,
ICE_FW_API_LINK_OVERRIDE_PATCH);
}
/** /**
* ice_get_link_default_override * ice_get_link_default_override
* @ldo: pointer to the link default override struct * @ldo: pointer to the link default override struct
...@@ -5436,16 +5451,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) ...@@ -5436,16 +5451,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
if (hw->mac_type != ICE_MAC_E810) if (hw->mac_type != ICE_MAC_E810)
return false; return false;
if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) ICE_FW_API_LLDP_FLTR_MIN,
return true; ICE_FW_API_LLDP_FLTR_PATCH);
if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
return true;
} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
return true;
}
return false;
} }
/** /**
...@@ -5482,14 +5490,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) ...@@ -5482,14 +5490,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/ */
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{ {
if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) ICE_FW_API_REPORT_DFLT_CFG_MIN,
return true; ICE_FW_API_REPORT_DFLT_CFG_PATCH);
if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
return true;
} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
return true;
}
return false;
} }
...@@ -3921,88 +3921,135 @@ static int ice_init_pf(struct ice_pf *pf) ...@@ -3921,88 +3921,135 @@ static int ice_init_pf(struct ice_pf *pf)
return 0; return 0;
} }
/**
* ice_reduce_msix_usage - Reduce usage of MSI-X vectors
* @pf: board private structure
* @v_remain: number of remaining MSI-X vectors to be distributed
*
* Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
* pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
* remaining vectors.
*/
static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
{
int v_rdma;
if (!ice_is_rdma_ena(pf)) {
pf->num_lan_msix = v_remain;
return;
}
/* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
pf->num_rdma_msix = 0;
pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
(v_remain - v_rdma < v_rdma)) {
/* Support minimum RDMA and give remaining vectors to LAN MSIX */
pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
} else {
/* Split remaining MSIX with RDMA after accounting for AEQ MSIX
*/
pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
ICE_RDMA_NUM_AEQ_MSIX;
pf->num_lan_msix = v_remain - pf->num_rdma_msix;
}
}
/** /**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS * ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure * @pf: board private structure
* *
* compute the number of MSIX vectors required (v_budget) and request from * Compute the number of MSIX vectors wanted and request from the OS. Adjust
* the OS. Return the number of vectors reserved or negative on failure * device usage if there are not enough vectors. Return the number of vectors
* reserved or negative on failure.
*/ */
static int ice_ena_msix_range(struct ice_pf *pf) static int ice_ena_msix_range(struct ice_pf *pf)
{ {
int num_cpus, v_left, v_actual, v_other, v_budget = 0; int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
int needed, err, i; int err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors; hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
num_cpus = num_online_cpus(); num_cpus = num_online_cpus();
/* reserve for LAN miscellaneous handler */ /* LAN miscellaneous handler */
needed = ICE_MIN_LAN_OICR_MSIX; v_other = ICE_MIN_LAN_OICR_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
v_budget += needed;
v_left -= needed;
/* reserve for flow director */ /* Flow Director */
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
needed = ICE_FDIR_MSIX; v_other += ICE_FDIR_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err; /* switchdev */
v_budget += needed; v_other += ICE_ESWITCH_MSIX;
v_left -= needed;
} v_wanted = v_other;
/* reserve for switchdev */ /* LAN traffic */
needed = ICE_ESWITCH_MSIX; pf->num_lan_msix = num_cpus;
if (v_left < needed) v_wanted += pf->num_lan_msix;
goto no_hw_vecs_left_err;
v_budget += needed; /* RDMA auxiliary driver */
v_left -= needed;
/* total used for non-traffic vectors */
v_other = v_budget;
/* reserve vectors for LAN traffic */
needed = num_cpus;
if (v_left < needed)
goto no_hw_vecs_left_err;
pf->num_lan_msix = needed;
v_budget += needed;
v_left -= needed;
/* reserve vectors for RDMA auxiliary driver */
if (ice_is_rdma_ena(pf)) { if (ice_is_rdma_ena(pf)) {
needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
if (v_left < needed) v_wanted += pf->num_rdma_msix;
goto no_hw_vecs_left_err; }
pf->num_rdma_msix = needed;
v_budget += needed; if (v_wanted > hw_num_msix) {
v_left -= needed; int v_remain;
dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
v_wanted, hw_num_msix);
if (hw_num_msix < ICE_MIN_MSIX) {
err = -ERANGE;
goto exit_err;
}
v_remain = hw_num_msix - v_other;
if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
v_remain = ICE_MIN_LAN_TXRX_MSIX;
}
ice_reduce_msix_usage(pf, v_remain);
v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
if (ice_is_rdma_ena(pf))
dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
} }
pf->msix_entries = devm_kcalloc(dev, v_budget, pf->msix_entries = devm_kcalloc(dev, v_wanted,
sizeof(*pf->msix_entries), GFP_KERNEL); sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) { if (!pf->msix_entries) {
err = -ENOMEM; err = -ENOMEM;
goto exit_err; goto exit_err;
} }
for (i = 0; i < v_budget; i++) for (i = 0; i < v_wanted; i++)
pf->msix_entries[i].entry = i; pf->msix_entries[i].entry = i;
/* actually reserve the vectors */ /* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
ICE_MIN_MSIX, v_budget); ICE_MIN_MSIX, v_wanted);
if (v_actual < 0) { if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n"); dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual; err = v_actual;
goto msix_err; goto msix_err;
} }
if (v_actual < v_budget) { if (v_actual < v_wanted) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual); v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) { if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */ /* error if we can't get minimum vectors */
...@@ -4011,38 +4058,11 @@ static int ice_ena_msix_range(struct ice_pf *pf) ...@@ -4011,38 +4058,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
goto msix_err; goto msix_err;
} else { } else {
int v_remain = v_actual - v_other; int v_remain = v_actual - v_other;
int v_rdma = 0, v_min_rdma = 0;
if (ice_is_rdma_ena(pf)) {
/* Need at least 1 interrupt in addition to
* AEQ MSIX
*/
v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
v_min_rdma = ICE_MIN_RDMA_MSIX;
}
if (v_actual == ICE_MIN_MSIX || if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { v_remain = ICE_MIN_LAN_TXRX_MSIX;
dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
pf->num_rdma_msix = 0; ice_reduce_msix_usage(pf, v_remain);
pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
(v_remain - v_rdma < v_rdma)) {
/* Support minimum RDMA and give remaining
* vectors to LAN MSIX
*/
pf->num_rdma_msix = v_min_rdma;
pf->num_lan_msix = v_remain - v_min_rdma;
} else {
/* Split remaining MSIX with RDMA after
* accounting for AEQ MSIX
*/
pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
ICE_RDMA_NUM_AEQ_MSIX;
pf->num_lan_msix = v_remain - pf->num_rdma_msix;
}
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix); pf->num_lan_msix);
...@@ -4057,12 +4077,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) ...@@ -4057,12 +4077,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
msix_err: msix_err:
devm_kfree(dev, pf->msix_entries); devm_kfree(dev, pf->msix_entries);
goto exit_err;
no_hw_vecs_left_err:
dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
needed, v_left);
err = -ERANGE;
exit_err: exit_err:
pf->num_rdma_msix = 0; pf->num_rdma_msix = 0;
pf->num_lan_msix = 0; pf->num_lan_msix = 0;
......
...@@ -1242,6 +1242,9 @@ static void ice_ptp_wait_for_offset_valid(struct kthread_work *work) ...@@ -1242,6 +1242,9 @@ static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
hw = &pf->hw; hw = &pf->hw;
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
if (ice_is_reset_in_progress(pf->state))
return;
if (ice_ptp_check_offset_valid(port)) { if (ice_ptp_check_offset_valid(port)) {
/* Offsets not ready yet, try again later */ /* Offsets not ready yet, try again later */
kthread_queue_delayed_work(pf->ptp.kworker, kthread_queue_delayed_work(pf->ptp.kworker,
......
...@@ -1212,7 +1212,7 @@ int ice_sched_init_port(struct ice_port_info *pi) ...@@ -1212,7 +1212,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw; hw = pi->hw;
/* Query the Default Topology from FW */ /* Query the Default Topology from FW */
buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
...@@ -1290,7 +1290,7 @@ int ice_sched_init_port(struct ice_port_info *pi) ...@@ -1290,7 +1290,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
pi->root = NULL; pi->root = NULL;
} }
devm_kfree(ice_hw_to_dev(hw), buf); kfree(buf);
return status; return status;
} }
......
...@@ -2274,9 +2274,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw) ...@@ -2274,9 +2274,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
int status; int status;
u16 i; u16 i;
rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
GFP_KERNEL);
if (!rbuf) if (!rbuf)
return -ENOMEM; return -ENOMEM;
...@@ -2324,7 +2322,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw) ...@@ -2324,7 +2322,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
} }
} while (req_desc && !status); } while (req_desc && !status);
devm_kfree(ice_hw_to_dev(hw), rbuf); kfree(rbuf);
return status; return status;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment