Commit f43995bd authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-qede-add-support-for-new-operating-modes'

Alexander Lobakin says:

====================
qed, qede: add support for new operating modes

This series covers the support for the following:
 - new port modes;
 - loopback modes, previously missing;
 - new speed/link modes;
 - several FEC modes;
 - multi-rate transceivers;

and also cleans up and optimizes several related parts of code.

v3 (from [2]):
 - dropped custom link mode declaration; qed, qede and qedf switched to
   Ethtool link modes and definitions (#0001, #0002, per Andrew Lunn's
   suggestion);
 - exchange more .text size to .initconst and .ro_after_init in qede
   (#0003).

v2 (from [1]):
 - added a patch (#0010) that drops discussed dead struct member;
 - addressed checkpatch complaints on #0014 (former #0013);
 - rebased on top of latest net-next;
 - no other changes.

[1] https://lore.kernel.org/netdev/20200716115446.994-1-alobakin@marvell.com/
[2] https://lore.kernel.org/netdev/20200719201453.3648-1-alobakin@marvell.com/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cfd69201 99785a87
......@@ -245,20 +245,6 @@ enum QED_FEATURE {
QED_MAX_FEATURES,
};
enum QED_PORT_MODE {
QED_PORT_MODE_DE_2X40G,
QED_PORT_MODE_DE_2X50G,
QED_PORT_MODE_DE_1X100G,
QED_PORT_MODE_DE_4X10G_F,
QED_PORT_MODE_DE_4X10G_E,
QED_PORT_MODE_DE_4X20G,
QED_PORT_MODE_DE_1X40G,
QED_PORT_MODE_DE_2X25G,
QED_PORT_MODE_DE_1X25G,
QED_PORT_MODE_DE_4X25G,
QED_PORT_MODE_DE_2X10G,
};
enum qed_dev_cap {
QED_DEV_CAP_ETH,
QED_DEV_CAP_FCOE,
......@@ -280,48 +266,49 @@ enum qed_db_rec_exec {
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
#define QED_IS_RDMA_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
(dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
enum qed_pci_personality personality;
#define QED_IS_RDMA_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
(dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_ROCE_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
#define QED_IS_ROCE_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_IWARP_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
#define QED_IS_IWARP_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_L2_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH || \
#define QED_IS_L2_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH || \
QED_IS_RDMA_PERSONALITY(dev))
#define QED_IS_FCOE_PERSONALITY(dev) \
#define QED_IS_FCOE_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_FCOE)
#define QED_IS_ISCSI_PERSONALITY(dev) \
#define QED_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ISCSI)
/* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC];
u32 resc_num[QED_MAX_RESC];
u32 feat_num[QED_MAX_FEATURES];
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
RESC_NUM(_p_hwfn, resc))
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
u32 feat_num[QED_MAX_FEATURES];
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
/* Amount of traffic classes HW supports */
u8 num_hw_tc;
u8 num_hw_tc;
/* Amount of TCs which should be active according to DCBx or upper
* layer driver configuration.
*/
u8 num_active_tc;
u8 num_active_tc;
u8 offload_tc;
bool offload_tc_set;
bool multi_tc_roce_en;
#define IS_QED_MULTI_TC_ROCE(p_hwfn) (((p_hwfn)->hw_info.multi_tc_roce_en))
#define IS_QED_MULTI_TC_ROCE(p_hwfn) ((p_hwfn)->hw_info.multi_tc_roce_en)
u32 concrete_fid;
u16 opaque_fid;
......@@ -336,12 +323,11 @@ struct qed_hw_info {
struct qed_igu_info *p_igu_info;
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
unsigned long device_capabilities;
u16 mtu;
enum qed_wol_support b_wol_support;
enum qed_wol_support b_wol_support;
};
/* maximun size of read/write commands (HW limit) */
......@@ -715,41 +701,42 @@ struct qed_dbg_feature {
};
struct qed_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
u16 vendor_id;
u16 device_id;
#define QED_DEV_ID_MASK 0xff00
#define QED_DEV_ID_MASK_BB 0x1600
#define QED_DEV_ID_MASK_AH 0x8000
#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5)
u16 vendor_id;
u16 device_id;
#define QED_DEV_ID_MASK 0xff00
#define QED_DEV_ID_MASK_BB 0x1600
#define QED_DEV_ID_MASK_AH 0x8000
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
u16 chip_bond_id;
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
u8 num_ports;
......
......@@ -3968,8 +3968,9 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld;
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
struct qed_mcp_link_speed_params *ext_speed;
struct qed_mcp_link_capabilities *p_caps;
struct qed_mcp_link_params *link;
......@@ -3994,37 +3995,21 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1:
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1:
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2:
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2:
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4:
break;
default:
DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
......@@ -4042,8 +4027,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
link->speed.advertised_speeds = link_temp;
link_temp = link->speed.advertised_speeds;
p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
p_caps->speed_capabilities = link->speed.advertised_speeds;
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
......@@ -4078,19 +4062,40 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
}
p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
link->speed.autoneg;
p_caps->default_speed_autoneg = link->speed.autoneg;
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
link->pause.autoneg = !!(link_temp &
NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
link->pause.forced_rx = !!(link_temp &
NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
link->pause.forced_tx = !!(link_temp &
NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL);
link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
link->loopback_mode = 0;
if (p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
switch (GET_MFW_FIELD(link_temp,
NVM_CFG1_PORT_FEC_FORCE_MODE)) {
case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE:
p_caps->fec_default |= QED_FEC_MODE_NONE;
break;
case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE:
p_caps->fec_default |= QED_FEC_MODE_FIRECODE;
break;
case NVM_CFG1_PORT_FEC_FORCE_MODE_RS:
p_caps->fec_default |= QED_FEC_MODE_RS;
break;
case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO:
p_caps->fec_default |= QED_FEC_MODE_AUTO;
break;
default:
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"unknown FEC mode in 0x%08x\n", link_temp);
}
} else {
p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED;
}
link->fec = p_caps->fec_default;
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
offsetof(struct nvm_cfg1_port, ext_phy));
......@@ -4122,14 +4127,97 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
}
DP_VERBOSE(p_hwfn,
NETIF_MSG_LINK,
"Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
link->speed.forced_speed,
link->speed.advertised_speeds,
link->speed.autoneg,
link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer);
if (p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
ext_speed = &link->ext_speed;
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
offsetof(struct nvm_cfg1_port,
extended_speed));
fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED);
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN)
ext_speed->autoneg = true;
ext_speed->forced_speed = 0;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G)
ext_speed->forced_speed |= QED_EXT_SPEED_1G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G)
ext_speed->forced_speed |= QED_EXT_SPEED_10G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G)
ext_speed->forced_speed |= QED_EXT_SPEED_20G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G)
ext_speed->forced_speed |= QED_EXT_SPEED_25G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G)
ext_speed->forced_speed |= QED_EXT_SPEED_40G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R)
ext_speed->forced_speed |= QED_EXT_SPEED_50G_R;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2)
ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2)
ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4)
ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4)
ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4;
fld = GET_MFW_FIELD(link_temp,
NVM_CFG1_PORT_EXTENDED_SPEED_CAP);
ext_speed->advertised_speeds = 0;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED)
ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G)
ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G)
ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G)
ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G)
ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G)
ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R)
ext_speed->advertised_speeds |=
QED_EXT_SPEED_MASK_50G_R;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2)
ext_speed->advertised_speeds |=
QED_EXT_SPEED_MASK_50G_R2;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2)
ext_speed->advertised_speeds |=
QED_EXT_SPEED_MASK_100G_R2;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4)
ext_speed->advertised_speeds |=
QED_EXT_SPEED_MASK_100G_R4;
if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4)
ext_speed->advertised_speeds |=
QED_EXT_SPEED_MASK_100G_P4;
link_temp = qed_rd(p_hwfn, p_ptt,
port_cfg_addr +
offsetof(struct nvm_cfg1_port,
extended_fec_mode));
link->ext_fec_mode = link_temp;
p_caps->default_ext_speed_caps = ext_speed->advertised_speeds;
p_caps->default_ext_speed = ext_speed->forced_speed;
p_caps->default_ext_autoneg = ext_speed->autoneg;
p_caps->default_ext_fec = link->ext_fec_mode;
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n",
ext_speed->forced_speed,
ext_speed->advertised_speeds, ext_speed->autoneg,
p_caps->default_ext_fec);
}
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n",
link->speed.forced_speed, link->speed.advertised_speeds,
link->speed.autoneg, link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer,
p_caps->fec_default);
if (IS_LEAD_HWFN(p_hwfn)) {
struct qed_dev *cdev = p_hwfn->cdev;
......
This diff is collapsed.
This diff is collapsed.
......@@ -1446,6 +1446,25 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
if (p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
switch (status & LINK_STATUS_FEC_MODE_MASK) {
case LINK_STATUS_FEC_MODE_NONE:
p_link->fec_active = QED_FEC_MODE_NONE;
break;
case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
p_link->fec_active = QED_FEC_MODE_FIRECODE;
break;
case LINK_STATUS_FEC_MODE_RS_CL91:
p_link->fec_active = QED_FEC_MODE_RS;
break;
default:
p_link->fec_active = QED_FEC_MODE_AUTO;
}
} else {
p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
}
qed_link_update(p_hwfn, p_ptt);
out:
spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
......@@ -1456,8 +1475,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
struct eth_phy_cfg phy_cfg;
u32 cmd, fec_bit = 0;
u32 val, ext_speed;
int rc = 0;
u32 cmd;
/* Set the shmem configuration according to params */
memset(&phy_cfg, 0, sizeof(phy_cfg));
......@@ -1489,19 +1509,91 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
EEE_TX_TIMER_USEC_MASK;
}
if (p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
if (params->fec & QED_FEC_MODE_NONE)
fec_bit |= FEC_FORCE_MODE_NONE;
else if (params->fec & QED_FEC_MODE_FIRECODE)
fec_bit |= FEC_FORCE_MODE_FIRECODE;
else if (params->fec & QED_FEC_MODE_RS)
fec_bit |= FEC_FORCE_MODE_RS;
else if (params->fec & QED_FEC_MODE_AUTO)
fec_bit |= FEC_FORCE_MODE_AUTO;
SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
}
if (p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
ext_speed = 0;
if (params->ext_speed.autoneg)
ext_speed |= ETH_EXT_SPEED_AN;
val = params->ext_speed.forced_speed;
if (val & QED_EXT_SPEED_1G)
ext_speed |= ETH_EXT_SPEED_1G;
if (val & QED_EXT_SPEED_10G)
ext_speed |= ETH_EXT_SPEED_10G;
if (val & QED_EXT_SPEED_20G)
ext_speed |= ETH_EXT_SPEED_20G;
if (val & QED_EXT_SPEED_25G)
ext_speed |= ETH_EXT_SPEED_25G;
if (val & QED_EXT_SPEED_40G)
ext_speed |= ETH_EXT_SPEED_40G;
if (val & QED_EXT_SPEED_50G_R)
ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
if (val & QED_EXT_SPEED_50G_R2)
ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
if (val & QED_EXT_SPEED_100G_R2)
ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
if (val & QED_EXT_SPEED_100G_R4)
ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
if (val & QED_EXT_SPEED_100G_P4)
ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
ext_speed);
ext_speed = 0;
val = params->ext_speed.advertised_speeds;
if (val & QED_EXT_SPEED_MASK_1G)
ext_speed |= ETH_EXT_ADV_SPEED_1G;
if (val & QED_EXT_SPEED_MASK_10G)
ext_speed |= ETH_EXT_ADV_SPEED_10G;
if (val & QED_EXT_SPEED_MASK_20G)
ext_speed |= ETH_EXT_ADV_SPEED_20G;
if (val & QED_EXT_SPEED_MASK_25G)
ext_speed |= ETH_EXT_ADV_SPEED_25G;
if (val & QED_EXT_SPEED_MASK_40G)
ext_speed |= ETH_EXT_ADV_SPEED_40G;
if (val & QED_EXT_SPEED_MASK_50G_R)
ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
if (val & QED_EXT_SPEED_MASK_50G_R2)
ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
if (val & QED_EXT_SPEED_MASK_100G_R2)
ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
if (val & QED_EXT_SPEED_MASK_100G_R4)
ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
if (val & QED_EXT_SPEED_MASK_100G_P4)
ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
phy_cfg.extended_speed |= ext_speed;
SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
params->ext_fec_mode);
}
p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
phy_cfg.speed,
phy_cfg.pause,
phy_cfg.adv_speed,
phy_cfg.loopback_mode,
phy_cfg.feature_config_flags);
"Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
phy_cfg.loopback_mode, phy_cfg.fec_mode,
phy_cfg.extended_speed);
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n");
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
}
memset(&mb_params, 0, sizeof(mb_params));
......@@ -2193,6 +2285,11 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
break;
case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
break;
case ETH_TRANSCEIVER_TYPE_40G_CR4:
case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
......@@ -2223,8 +2320,10 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
break;
case ETH_TRANSCEIVER_TYPE_10G_BASET:
case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
break;
default:
DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
......@@ -3798,7 +3897,12 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
u32 mcp_resp, mcp_param, features;
features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
if (QED_IS_E5(p_hwfn->cdev))
features |=
DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
......
......@@ -16,15 +16,38 @@
#include "qed_dev_api.h"
struct qed_mcp_link_speed_params {
bool autoneg;
u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
u32 forced_speed; /* In Mb/s */
bool autoneg;
u32 advertised_speeds;
#define QED_EXT_SPEED_MASK_RES 0x1
#define QED_EXT_SPEED_MASK_1G 0x2
#define QED_EXT_SPEED_MASK_10G 0x4
#define QED_EXT_SPEED_MASK_20G 0x8
#define QED_EXT_SPEED_MASK_25G 0x10
#define QED_EXT_SPEED_MASK_40G 0x20
#define QED_EXT_SPEED_MASK_50G_R 0x40
#define QED_EXT_SPEED_MASK_50G_R2 0x80
#define QED_EXT_SPEED_MASK_100G_R2 0x100
#define QED_EXT_SPEED_MASK_100G_R4 0x200
#define QED_EXT_SPEED_MASK_100G_P4 0x400
u32 forced_speed; /* In Mb/s */
#define QED_EXT_SPEED_1G 0x1
#define QED_EXT_SPEED_10G 0x2
#define QED_EXT_SPEED_20G 0x4
#define QED_EXT_SPEED_25G 0x8
#define QED_EXT_SPEED_40G 0x10
#define QED_EXT_SPEED_50G_R 0x20
#define QED_EXT_SPEED_50G_R2 0x40
#define QED_EXT_SPEED_100G_R2 0x80
#define QED_EXT_SPEED_100G_R4 0x100
#define QED_EXT_SPEED_100G_P4 0x200
};
struct qed_mcp_link_pause_params {
bool autoneg;
bool forced_rx;
bool forced_tx;
bool autoneg;
bool forced_rx;
bool forced_tx;
};
enum qed_mcp_eee_mode {
......@@ -34,61 +57,72 @@ enum qed_mcp_eee_mode {
};
struct qed_mcp_link_params {
struct qed_mcp_link_speed_params speed;
struct qed_mcp_link_pause_params pause;
u32 loopback_mode;
struct qed_link_eee_params eee;
struct qed_mcp_link_speed_params speed;
struct qed_mcp_link_pause_params pause;
u32 loopback_mode;
struct qed_link_eee_params eee;
u32 fec;
struct qed_mcp_link_speed_params ext_speed;
u32 ext_fec_mode;
};
struct qed_mcp_link_capabilities {
u32 speed_capabilities;
bool default_speed_autoneg;
enum qed_mcp_eee_mode default_eee;
u32 eee_lpi_timer;
u8 eee_speed_caps;
u32 speed_capabilities;
bool default_speed_autoneg;
u32 fec_default;
enum qed_mcp_eee_mode default_eee;
u32 eee_lpi_timer;
u8 eee_speed_caps;
u32 default_ext_speed_caps;
u32 default_ext_autoneg;
u32 default_ext_speed;
u32 default_ext_fec;
};
struct qed_mcp_link_state {
bool link_up;
u32 min_pf_rate;
bool link_up;
u32 min_pf_rate;
/* Actual link speed in Mb/s */
u32 line_speed;
u32 line_speed;
/* PF max speed in Mb/s, deduced from line_speed
* according to PF max bandwidth configuration.
*/
u32 speed;
bool full_duplex;
bool an;
bool an_complete;
bool parallel_detection;
bool pfc_enabled;
#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
#define QED_LINK_PARTNER_SPEED_10G BIT(2)
#define QED_LINK_PARTNER_SPEED_20G BIT(3)
#define QED_LINK_PARTNER_SPEED_25G BIT(4)
#define QED_LINK_PARTNER_SPEED_40G BIT(5)
#define QED_LINK_PARTNER_SPEED_50G BIT(6)
#define QED_LINK_PARTNER_SPEED_100G BIT(7)
u32 partner_adv_speed;
bool partner_tx_flow_ctrl_en;
bool partner_rx_flow_ctrl_en;
#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
#define QED_LINK_PARTNER_BOTH_PAUSE (3)
u8 partner_adv_pause;
bool sfp_tx_fault;
bool eee_active;
u8 eee_adv_caps;
u8 eee_lp_adv_caps;
u32 speed;
bool full_duplex;
bool an;
bool an_complete;
bool parallel_detection;
bool pfc_enabled;
u32 partner_adv_speed;
#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
#define QED_LINK_PARTNER_SPEED_10G BIT(2)
#define QED_LINK_PARTNER_SPEED_20G BIT(3)
#define QED_LINK_PARTNER_SPEED_25G BIT(4)
#define QED_LINK_PARTNER_SPEED_40G BIT(5)
#define QED_LINK_PARTNER_SPEED_50G BIT(6)
#define QED_LINK_PARTNER_SPEED_100G BIT(7)
bool partner_tx_flow_ctrl_en;
bool partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
#define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1
#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2
#define QED_LINK_PARTNER_BOTH_PAUSE 0x3
bool sfp_tx_fault;
bool eee_active;
u8 eee_adv_caps;
u8 eee_lp_adv_caps;
u32 fec_active;
};
struct qed_mcp_function_info {
......@@ -747,6 +781,20 @@ struct qed_drv_tlv_hdr {
u8 tlv_flags;
};
/**
* qed_mcp_is_ext_speed_supported() - Check if management firmware supports
* extended speeds.
* @p_hwfn: HW device data.
*
* Return: true if supported, false otherwise.
*/
static inline bool
qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
{
return !!(p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL);
}
/**
* @brief Initialize the interface with the MCP
*
......
......@@ -557,6 +557,8 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f);
void qede_forced_speed_maps_init(void);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
......
......@@ -263,6 +263,8 @@ int __init qede_init(void)
pr_info("qede_init: %s\n", version);
qede_forced_speed_maps_init();
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
pr_notice("Failed to get qed ethtool operations\n");
......
......@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/kthread.h>
#include <linux/phylink.h>
#include <scsi/libfc.h>
#include <scsi/scsi_host.h>
#include <scsi/fc_frame.h>
......@@ -440,6 +441,7 @@ static void qedf_link_recovery(struct work_struct *work)
static void qedf_update_link_speed(struct qedf_ctx *qedf,
struct qed_link_output *link)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
struct fc_lport *lport = qedf->lport;
lport->link_speed = FC_PORTSPEED_UNKNOWN;
......@@ -474,40 +476,60 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
* Set supported link speed by querying the supported
* capabilities of the link.
*/
if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) ||
(link->supported_caps & QED_LM_10000baseKX4_Full_BIT) ||
(link->supported_caps & QED_LM_10000baseR_FEC_BIT) ||
(link->supported_caps & QED_LM_10000baseCR_Full_BIT) ||
(link->supported_caps & QED_LM_10000baseSR_Full_BIT) ||
(link->supported_caps & QED_LM_10000baseLR_Full_BIT) ||
(link->supported_caps & QED_LM_10000baseLRM_Full_BIT) ||
(link->supported_caps & QED_LM_10000baseKR_Full_BIT)) {
phylink_zero(sup_caps);
phylink_set(sup_caps, 10000baseT_Full);
phylink_set(sup_caps, 10000baseKX4_Full);
phylink_set(sup_caps, 10000baseR_FEC);
phylink_set(sup_caps, 10000baseCR_Full);
phylink_set(sup_caps, 10000baseSR_Full);
phylink_set(sup_caps, 10000baseLR_Full);
phylink_set(sup_caps, 10000baseLRM_Full);
phylink_set(sup_caps, 10000baseKR_Full);
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
}
if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) ||
(link->supported_caps & QED_LM_25000baseCR_Full_BIT) ||
(link->supported_caps & QED_LM_25000baseSR_Full_BIT)) {
phylink_zero(sup_caps);
phylink_set(sup_caps, 25000baseKR_Full);
phylink_set(sup_caps, 25000baseCR_Full);
phylink_set(sup_caps, 25000baseSR_Full);
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
}
if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) ||
(link->supported_caps & QED_LM_40000baseKR4_Full_BIT) ||
(link->supported_caps & QED_LM_40000baseCR4_Full_BIT) ||
(link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) {
phylink_zero(sup_caps);
phylink_set(sup_caps, 40000baseLR4_Full);
phylink_set(sup_caps, 40000baseKR4_Full);
phylink_set(sup_caps, 40000baseCR4_Full);
phylink_set(sup_caps, 40000baseSR4_Full);
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
}
if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) ||
(link->supported_caps & QED_LM_50000baseCR2_Full_BIT) ||
(link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) {
phylink_zero(sup_caps);
phylink_set(sup_caps, 50000baseKR2_Full);
phylink_set(sup_caps, 50000baseCR2_Full);
phylink_set(sup_caps, 50000baseSR2_Full);
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
}
if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) ||
(link->supported_caps & QED_LM_100000baseSR4_Full_BIT) ||
(link->supported_caps & QED_LM_100000baseCR4_Full_BIT) ||
(link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) {
phylink_zero(sup_caps);
phylink_set(sup_caps, 100000baseKR4_Full);
phylink_set(sup_caps, 100000baseSR4_Full);
phylink_set(sup_caps, 100000baseCR4_Full);
phylink_set(sup_caps, 100000baseLR4_ER4_Full);
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
}
if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT)
phylink_zero(sup_caps);
phylink_set(sup_caps, 20000baseKR2_Full);
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
}
......
......@@ -82,6 +82,12 @@ static inline int linkmode_equal(const unsigned long *src1,
return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static inline int linkmode_intersects(const unsigned long *src1,
const unsigned long *src2)
{
return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static inline int linkmode_subset(const unsigned long *src1,
const unsigned long *src2)
{
......
......@@ -594,6 +594,7 @@ enum qed_hw_err_type {
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
QED_DEV_TYPE_E5,
};
struct qed_dev_info {
......@@ -661,87 +662,72 @@ enum qed_protocol {
QED_PROTOCOL_FCOE,
};
enum qed_link_mode_bits {
QED_LM_FIBRE_BIT = BIT(0),
QED_LM_Autoneg_BIT = BIT(1),
QED_LM_Asym_Pause_BIT = BIT(2),
QED_LM_Pause_BIT = BIT(3),
QED_LM_1000baseT_Full_BIT = BIT(4),
QED_LM_10000baseT_Full_BIT = BIT(5),
QED_LM_10000baseKR_Full_BIT = BIT(6),
QED_LM_20000baseKR2_Full_BIT = BIT(7),
QED_LM_25000baseKR_Full_BIT = BIT(8),
QED_LM_40000baseLR4_Full_BIT = BIT(9),
QED_LM_50000baseKR2_Full_BIT = BIT(10),
QED_LM_100000baseKR4_Full_BIT = BIT(11),
QED_LM_TP_BIT = BIT(12),
QED_LM_Backplane_BIT = BIT(13),
QED_LM_1000baseKX_Full_BIT = BIT(14),
QED_LM_10000baseKX4_Full_BIT = BIT(15),
QED_LM_10000baseR_FEC_BIT = BIT(16),
QED_LM_40000baseKR4_Full_BIT = BIT(17),
QED_LM_40000baseCR4_Full_BIT = BIT(18),
QED_LM_40000baseSR4_Full_BIT = BIT(19),
QED_LM_25000baseCR_Full_BIT = BIT(20),
QED_LM_25000baseSR_Full_BIT = BIT(21),
QED_LM_50000baseCR2_Full_BIT = BIT(22),
QED_LM_100000baseSR4_Full_BIT = BIT(23),
QED_LM_100000baseCR4_Full_BIT = BIT(24),
QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25),
QED_LM_50000baseSR2_Full_BIT = BIT(26),
QED_LM_1000baseX_Full_BIT = BIT(27),
QED_LM_10000baseCR_Full_BIT = BIT(28),
QED_LM_10000baseSR_Full_BIT = BIT(29),
QED_LM_10000baseLR_Full_BIT = BIT(30),
QED_LM_10000baseLRM_Full_BIT = BIT(31),
QED_LM_COUNT = 32
enum qed_fec_mode {
QED_FEC_MODE_NONE = BIT(0),
QED_FEC_MODE_FIRECODE = BIT(1),
QED_FEC_MODE_RS = BIT(2),
QED_FEC_MODE_AUTO = BIT(3),
QED_FEC_MODE_UNSUPPORTED = BIT(4),
};
struct qed_link_params {
bool link_up;
#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
u32 override_flags;
bool autoneg;
u32 adv_speeds;
u32 forced_speed;
#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
u32 pause_config;
#define QED_LINK_LOOPBACK_NONE BIT(0)
#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
#define QED_LINK_LOOPBACK_EXT BIT(3)
#define QED_LINK_LOOPBACK_MAC BIT(4)
u32 loopback_mode;
struct qed_link_eee_params eee;
bool link_up;
u32 override_flags;
#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
bool autoneg;
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
u32 forced_speed;
u32 pause_config;
#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
u32 loopback_mode;
#define QED_LINK_LOOPBACK_NONE BIT(0)
#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
#define QED_LINK_LOOPBACK_EXT BIT(3)
#define QED_LINK_LOOPBACK_MAC BIT(4)
#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
struct qed_link_eee_params eee;
u32 fec;
};
struct qed_link_output {
bool link_up;
bool link_up;
/* In QED_LM_* defs */
u32 supported_caps;
u32 advertised_caps;
u32 lp_caps;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
u32 speed; /* In Mb/s */
u8 duplex; /* In DUPLEX defs */
u8 port; /* In PORT defs */
bool autoneg;
u32 pause_config;
u32 speed; /* In Mb/s */
u8 duplex; /* In DUPLEX defs */
u8 port; /* In PORT defs */
bool autoneg;
u32 pause_config;
/* EEE - capability & param */
bool eee_supported;
bool eee_active;
u8 sup_caps;
struct qed_link_eee_params eee;
bool eee_supported;
bool eee_active;
u8 sup_caps;
struct qed_link_eee_params eee;
u32 sup_fec;
u32 active_fec;
};
struct qed_probe_params {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment