Commit 7d982567 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-01-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 and mlx5e updates 2017-01-19

This series includes some updates for mlx5 core and mlx5e netdevice driver.

From Leon, a small fix that remove an unnecessary print.

From Eli Cohen, a fix to the FW version printout in case of internal error.

From Eugenia Emantayev, two patches, the 1st adds mlx5 1pps (pulse per
second) mlx5 infrastructure support and the 2nd adds the necessary bits
for mlx5e ptp logic and structures.

From Mohamad, add support for s-tagged packet receive when in promiscuous
mode.

Form Gal Pressman, MCAM (Management capabilities mask register) and PCAM
(Ports capabilities mask register) registers infrastructure, those
registers are needed in order to query the different statistics registers
support in FW, in order for the driver to enable/disable query and
reporting them back to user.  On top of this infrastructure we've exposed
new set of statistics groups:
   - MPCNT: Physical layer statistical counters (For symbol errors)
   - PPCNT: PCIe performance counters

In addition to the statistics capabilities series we've moved the mlx5 HCA
capabilities fields to a dedicated struct under the driver private data.

At the end a small patch to update & query statistics in the most desired
order.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cc154783 3dd69e3d
......@@ -1704,9 +1704,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
if (ib_spec->eth.mask.vlan_tag) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
vlan_tag, 1);
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
vlan_tag, 1);
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
......
......@@ -262,6 +262,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
u8 *pps_pin_caps;
};
enum {
......@@ -571,7 +572,8 @@ struct mlx5e_vlan_table {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_vlan_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
bool filter_disabled;
};
......@@ -780,6 +782,8 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
struct skb_shared_hwtstamps *hwts);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
......
......@@ -37,6 +37,22 @@ enum {
MLX5E_CYCLES_SHIFT = 23
};
enum {
MLX5E_PIN_MODE_IN = 0x0,
MLX5E_PIN_MODE_OUT = 0x1,
};
enum {
MLX5E_OUT_PATTERN_PULSE = 0x0,
MLX5E_OUT_PATTERN_PERIODIC = 0x1,
};
enum {
MLX5E_EVENT_MODE_DISABLE = 0x0,
MLX5E_EVENT_MODE_REPETETIVE = 0x1,
MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
};
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts)
{
......@@ -189,6 +205,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
/* For future use need to add a loop for finding all 1PPS out pins */
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
mlx5_set_mtpps(priv->mdev, in, sizeof(in));
}
if (delta < 0) {
neg_adj = 1;
......@@ -208,6 +236,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
return 0;
}
static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u8 pattern = 0;
int pin = -1;
int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, pps) ||
!MLX5_CAP_GEN(priv->mdev, pps_modify))
return -EOPNOTSUPP;
if (rq->extts.index >= tstamp->ptp_info.n_pins)
return -EINVAL;
if (on) {
pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
}
if (rq->extts.flags & PTP_FALLING_EDGE)
pattern = 1;
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err)
return err;
return mlx5_set_mtppse(priv->mdev, pin, 0,
MLX5E_EVENT_MODE_REPETETIVE & on);
}
static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u64 nsec_now, nsec_delta, time_stamp;
u64 cycles_now, cycles_delta;
struct timespec64 ts;
unsigned long flags;
int pin = -1;
s64 ns;
if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
return -EOPNOTSUPP;
if (rq->perout.index >= tstamp->ptp_info.n_pins)
return -EINVAL;
if (on) {
pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
rq->perout.index);
if (pin < 0)
return -EBUSY;
}
ts.tv_sec = rq->perout.period.sec;
ts.tv_nsec = rq->perout.period.nsec;
ns = timespec64_to_ns(&ts);
if (on)
if ((ns >> 1) != 500000000LL)
return -EINVAL;
ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts);
cycles_now = mlx5_read_internal_timer(tstamp->mdev);
write_lock_irqsave(&tstamp->lock, flags);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
tstamp->cycles.mult);
write_unlock_irqrestore(&tstamp->lock, flags);
time_stamp = cycles_now + cycles_delta;
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
}
static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
return mlx5e_extts_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT:
return mlx5e_perout_configure(ptp, rq, on);
default:
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
}
static const struct ptp_clock_info mlx5e_ptp_clock_info = {
.owner = THIS_MODULE,
.max_adj = 100000000,
......@@ -221,6 +367,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = {
.gettime64 = mlx5e_ptp_gettime,
.settime64 = mlx5e_ptp_settime,
.enable = NULL,
.verify = NULL,
};
static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
......@@ -229,6 +376,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
{
int i;
tstamp->ptp_info.pin_config =
kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
tstamp->ptp_info.n_pins, GFP_KERNEL);
if (!tstamp->ptp_info.pin_config)
return -ENOMEM;
tstamp->ptp_info.enable = mlx5e_ptp_enable;
tstamp->ptp_info.verify = mlx5e_ptp_verify;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
snprintf(tstamp->ptp_info.pin_config[i].name,
sizeof(tstamp->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
tstamp->ptp_info.pin_config[i].index = i;
tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
tstamp->ptp_info.pin_config[i].chan = i;
}
return 0;
}
static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
struct mlx5e_tstamp *tstamp)
{
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
mlx5_query_mtpps(priv->mdev, out, sizeof(out));
tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
cap_number_of_pps_pins);
tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_in_pins);
tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event)
{
struct mlx5e_tstamp *tstamp = &priv->tstamp;
ptp_clock_event(tstamp->ptp, event);
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
struct mlx5e_tstamp *tstamp = &priv->tstamp;
......@@ -272,6 +475,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
tstamp->ptp_info = mlx5e_ptp_clock_info;
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
/* Initialize 1PPS data structures */
#define MAX_PIN_NUM 8
tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
if (tstamp->pps_pin_caps) {
if (MLX5_CAP_GEN(priv->mdev, pps))
mlx5e_get_pps_caps(priv, tstamp);
if (tstamp->ptp_info.n_pins)
mlx5e_init_pin_config(tstamp);
} else {
mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
}
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
if (IS_ERR(tstamp->ptp)) {
......@@ -293,5 +508,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL;
}
kfree(tstamp->pps_pin_caps);
kfree(tstamp->ptp_info.pin_config);
cancel_delayed_work_sync(&tstamp->overflow_work);
}
......@@ -170,7 +170,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
NUM_PCIE_COUNTERS(priv) +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
......@@ -218,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_phy_statistical_stats_desc[i].format);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
......@@ -330,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
pcie_perf_stats_desc, i);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
......
......@@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
MLX5E_VLAN_RULE_TYPE_ANY_VID,
MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
MLX5E_VLAN_RULE_TYPE_MATCH_VID,
};
......@@ -172,19 +173,31 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
dest.ft = priv->fs.l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
rule_p = &priv->fs.vlan.untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
rule_p = &priv->fs.vlan.any_vlan_rule;
MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
rule_p = &priv->fs.vlan.any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
rule_p = &priv->fs.vlan.any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->fs.vlan.active_vlans_rule[vid];
MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
......@@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
priv->fs.vlan.untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
if (priv->fs.vlan.any_vlan_rule) {
mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
priv->fs.vlan.any_vlan_rule = NULL;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
if (priv->fs.vlan.any_cvlan_rule) {
mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
priv->fs.vlan.any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
if (priv->fs.vlan.any_svlan_rule) {
mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
priv->fs.vlan.any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
......@@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
}
}
static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
{
int err;
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
if (err)
return err;
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.filter_disabled)
......@@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
mlx5e_del_any_vid_rules(priv);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
......@@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
mlx5e_add_any_vid_rules(priv);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
......@@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
if (priv->fs.vlan.filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
mlx5e_add_any_vid_rules(priv);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
......@@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
if (priv->fs.vlan.filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
mlx5e_del_any_vid_rules(priv);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
......@@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
if (enable_promisc) {
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
if (!priv->fs.vlan.filter_disabled)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
......@@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
if (!priv->fs.vlan.filter_disabled)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
......@@ -976,11 +1010,13 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
return err;
}
#define MLX5E_NUM_VLAN_GROUPS 2
#define MLX5E_NUM_VLAN_GROUPS 3
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE)
MLX5E_VLAN_GROUP1_SIZE +\
MLX5E_VLAN_GROUP2_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
......@@ -991,7 +1027,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP0_SIZE;
......@@ -1003,7 +1039,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
......@@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
return 0;
err_destroy_groups:
......
......@@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
if ((fs->flow_type & FLOW_EXT) &&
(fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
vlan_tag, 1);
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
vlan_tag, 1);
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
first_vid, 0xfff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
......
......@@ -268,6 +268,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
out = pstats->phy_statistical_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
out = pstats->per_prio_counters[prio];
......@@ -291,11 +297,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
void *out;
u32 *in;
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
return;
in = mlx5_vzalloc(sz);
if (!in)
return;
out = pcie_stats->pcie_perf_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
kvfree(in);
}
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
mlx5e_update_q_counter(priv);
mlx5e_update_vport_counters(priv);
mlx5e_update_pcie_counters(priv);
mlx5e_update_pport_counters(priv);
mlx5e_update_vport_counters(priv);
mlx5e_update_q_counter(priv);
mlx5e_update_sw_counters(priv);
}
......@@ -317,6 +346,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = NULL;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
......@@ -326,7 +357,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
case MLX5_DEV_EVENT_PPS:
eqe = (struct mlx5_eqe *)param;
ptp_event.type = PTP_CLOCK_EXTTS;
ptp_event.index = eqe->data.pps.pin;
ptp_event.timestamp =
timecounter_cyc2time(&priv->tstamp.clock,
be64_to_cpu(eqe->data.pps.time_stamp));
mlx5e_pps_event_handler(vpriv, &ptp_event);
break;
default:
break;
}
......
......@@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
......@@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = {
#define PPORT_2819_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PER_PRIO_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_per_prio_grp_data_layout.c##_high)
......@@ -215,6 +221,7 @@ struct mlx5e_pport_stats {
__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
};
static const struct counter_desc pport_802_3_stats_desc[] = {
......@@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = {
{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
};
static const struct counter_desc pport_phy_statistical_stats_desc[] = {
{ "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
};
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
......@@ -276,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
#define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c)
struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
};
static const struct counter_desc pcie_perf_stats_desc[] = {
{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
......@@ -360,15 +387,23 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
(ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
#define NUM_PCIE_PERF_COUNTERS(priv) \
(ARRAY_SIZE(pcie_perf_stats_desc) * \
MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \
#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
NUM_PPORT_2863_COUNTERS + \
NUM_PPORT_2819_COUNTERS + \
NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
......@@ -378,6 +413,7 @@ struct mlx5e_stats {
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
struct mlx5e_pcie_stats pcie;
};
static const struct counter_desc mlx5e_pme_status_desc[] = {
......
......@@ -460,8 +460,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_id || mask->vlan_priority) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
......
......@@ -154,6 +154,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
return "MLX5_EVENT_TYPE_PAGE_FAULT";
case MLX5_EVENT_TYPE_PPS_EVENT:
return "MLX5_EVENT_TYPE_PPS_EVENT";
default:
return "Unrecognized event";
}
......@@ -470,6 +472,10 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
mlx5_port_module_event(dev, eqe);
break;
case MLX5_EVENT_TYPE_PPS_EVENT:
if (dev->event)
dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
......@@ -684,6 +690,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else
mlx5_core_dbg(dev, "port_module_event is not set\n");
if (MLX5_CAP_GEN(dev, pps))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
......
......@@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
......@@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
......@@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
......@@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
if (vport->info.vlan || vport->info.qos)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
......@@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Allowed vlan rule */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
......
......@@ -1665,7 +1665,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
#define FLOW_TABLE_BIT_SZ 1
#define GET_FLOW_TABLE_CAP(dev, offset) \
((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
offset / 32)) >> \
(32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
......
......@@ -91,6 +91,20 @@ int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
}
EXPORT_SYMBOL(mlx5_core_query_vendor_id);
static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
{
return mlx5_query_pcam_reg(dev, dev->caps.pcam,
MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
MLX5_PCAM_REGS_5000_TO_507F);
}
static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
{
return mlx5_query_mcam_reg(dev, dev->caps.mcam,
MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
MLX5_MCAM_REGS_FIRST_128);
}
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
......@@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
if (MLX5_CAP_GEN(dev, mcam_reg))
mlx5_get_mcam_reg(dev);
return 0;
}
......
......@@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd)
}
}
static u16 get_maj(u32 fw)
{
return fw >> 28;
}
static u16 get_min(u32 fw)
{
return fw >> 16 & 0xfff;
}
static u16 get_sub(u32 fw)
{
return fw & 0xffff;
}
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
......@@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev)
dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
fw = ioread32be(&h->fw_ver);
sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
fw = ioread32be(&h->fw_ver);
dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw);
}
static unsigned long get_next_poll_jiffies(void)
......
......@@ -418,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
switch (cap_mode) {
case HCA_CAP_OPMOD_GET_MAX:
memcpy(dev->hca_caps_max[cap_type], hca_caps,
memcpy(dev->caps.hca_max[cap_type], hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
case HCA_CAP_OPMOD_GET_CUR:
memcpy(dev->hca_caps_cur[cap_type], hca_caps,
memcpy(dev->caps.hca_cur[cap_type], hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
default:
......@@ -513,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
MLX5_ST_SZ_BYTES(cmd_hca_cap));
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
......@@ -1340,9 +1340,7 @@ static int init_one(struct pci_dev *pdev,
goto clean_health;
}
err = request_module_nowait(MLX5_IB_MOD);
if (err)
pr_info("failed request module on %s\n", MLX5_IB_MOD);
request_module_nowait(MLX5_IB_MOD);
err = devlink_register(devlink, &pdev->dev);
if (err)
......
......@@ -113,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
void mlx5_cq_tasklet_cb(unsigned long data);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
......@@ -138,6 +143,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
void mlx5e_init(void);
void mlx5e_cleanup(void);
......
......@@ -74,6 +74,30 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group)
{
u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(pcam_reg);
MLX5_SET(pcam_reg, in, feature_group, feature_group);
MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group);
return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0);
}
int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
u8 access_reg_group)
{
u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(mcam_reg);
MLX5_SET(mcam_reg, in, feature_group, feature_group);
MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group);
return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
}
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
......@@ -866,3 +890,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
module_num, mlx5_pme_status[module_status - 1],
mlx5_pme_error[error_type]);
}
int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
{
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
{
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out,
sizeof(out), MLX5_REG_MTPPS, 0, 1);
}
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode)
{
u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
int err = 0;
MLX5_SET(mtppse_reg, in, pin, pin);
err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MTPPSE, 0, 0);
if (err)
return err;
*arm = MLX5_GET(mtppse_reg, in, event_arm);
*mode = MLX5_GET(mtppse_reg, in, event_generation_mode);
return err;
}
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
{
u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
MLX5_SET(mtppse_reg, in, pin, pin);
MLX5_SET(mtppse_reg, in, event_arm, arm);
MLX5_SET(mtppse_reg, in, event_generation_mode, mode);
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MTPPSE, 0, 1);
}
......@@ -289,6 +289,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
......@@ -569,6 +570,22 @@ struct mlx5_eqe_port_module {
u8 error_type;
} __packed;
struct mlx5_eqe_pps {
u8 rsvd0[3];
u8 pin;
u8 rsvd1[4];
union {
struct {
__be32 time_sec;
__be32 time_nsec;
};
struct {
__be64 time_stamp;
};
};
u8 rsvd2[12];
} __packed;
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
......@@ -583,6 +600,7 @@ union ev_data {
struct mlx5_eqe_page_fault page_fault;
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
} __packed;
struct mlx5_eqe {
......@@ -952,38 +970,54 @@ enum mlx5_cap_type {
MLX5_CAP_NUM
};
enum mlx5_pcam_reg_groups {
MLX5_PCAM_REGS_5000_TO_507F = 0x0,
};
enum mlx5_pcam_feature_groups {
MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
};
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
};
enum mlx5_mcam_feature_groups {
MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
};
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ETH_MAX(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ROCE(mdev, cap) \
MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
......@@ -1005,11 +1039,11 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
......@@ -1031,21 +1065,27 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
#define MLX5_CAP_QOS(mdev, cap)\
MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
enum {
MLX5_CMD_STAT_OK = 0x0,
......@@ -1075,9 +1115,14 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
enum {
MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
};
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
......
......@@ -121,10 +121,15 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
MLX5_REG_MCAM = 0x907f,
};
enum mlx5_dcbx_oper_mode {
......@@ -172,6 +177,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_PKEY_CHANGE,
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
MLX5_DEV_EVENT_PPS,
};
enum mlx5_port_status {
......@@ -732,8 +738,12 @@ struct mlx5_core_dev {
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
struct {
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
} caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
......
......@@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 ip_protocol[0x8];
u8 ip_dscp[0x6];
u8 ip_ecn[0x2];
u8 vlan_tag[0x1];
u8 reserved_at_91[0x1];
u8 cvlan_tag[0x1];
u8 svlan_tag[0x1];
u8 frag[0x1];
u8 reserved_at_93[0x4];
u8 tcp_flags[0x9];
......@@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 inner_second_cfi[0x1];
u8 inner_second_vid[0xc];
u8 outer_second_vlan_tag[0x1];
u8 inner_second_vlan_tag[0x1];
u8 reserved_at_62[0xe];
u8 outer_second_cvlan_tag[0x1];
u8 inner_second_cvlan_tag[0x1];
u8 outer_second_svlan_tag[0x1];
u8 inner_second_svlan_tag[0x1];
u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
u8 gre_key_h[0x18];
......@@ -824,7 +826,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
u8 early_vf_enable[0x1];
u8 reserved_at_1a9[0x2];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
u8 reserved_at_1b1[0x1];
......@@ -835,7 +838,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 port_type[0x2];
u8 num_ports[0x8];
u8 reserved_at_1c0[0x3];
u8 reserved_at_1c0[0x1];
u8 pps[0x1];
u8 pps_modify[0x1];
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
......@@ -1379,6 +1384,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
u8 reserved_at_640[0x180];
};
struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
u8 time_since_last_clear_high[0x20];
u8 time_since_last_clear_low[0x20];
u8 phy_received_bits_high[0x20];
u8 phy_received_bits_low[0x20];
u8 phy_symbol_errors_high[0x20];
u8 phy_symbol_errors_low[0x20];
u8 phy_corrected_bits_high[0x20];
u8 phy_corrected_bits_low[0x20];
u8 phy_corrected_bits_lane0_high[0x20];
u8 phy_corrected_bits_lane0_low[0x20];
u8 phy_corrected_bits_lane1_high[0x20];
u8 phy_corrected_bits_lane1_low[0x20];
u8 phy_corrected_bits_lane2_high[0x20];
u8 phy_corrected_bits_lane2_low[0x20];
u8 phy_corrected_bits_lane3_high[0x20];
u8 phy_corrected_bits_lane3_low[0x20];
u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 symbol_error_counter[0x10];
......@@ -1761,6 +1802,30 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 life_time_counter_high[0x20];
u8 life_time_counter_low[0x20];
u8 rx_errors[0x20];
u8 tx_errors[0x20];
u8 l0_to_recovery_eieos[0x20];
u8 l0_to_recovery_ts[0x20];
u8 l0_to_recovery_framing[0x20];
u8 l0_to_recovery_retrain[0x20];
u8 crc_error_dllp[0x20];
u8 crc_error_tlp[0x20];
u8 reserved_at_140[0x680];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
......@@ -2923,6 +2988,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
u8 reserved_at_0[0x7c0];
};
......@@ -7248,6 +7319,18 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8];
u8 pcie_index[0x8];
u8 reserved_at_10[0xa];
u8 grp[0x6];
u8 clr[0x1];
u8 reserved_at_21[0x1f];
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
......@@ -7477,6 +7560,63 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x7e];
u8 ppcnt_discard_group[0x1];
u8 ppcnt_statistical_group[0x1];
};
struct mlx5_ifc_pcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
u8 reserved_at_10[0x8];
u8 access_reg_group[0x8];
u8 reserved_at_20[0x20];
union {
u8 reserved_at_0[0x80];
} port_access_reg_cap_mask;
u8 reserved_at_c0[0x80];
union {
struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features;
u8 reserved_at_0[0x80];
} feature_cap_mask;
u8 reserved_at_1c0[0xc0];
};
struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x7f];
u8 pcie_performance_group[0x1];
};
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
u8 reserved_at_10[0x8];
u8 access_reg_group[0x8];
u8 reserved_at_20[0x20];
union {
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
u8 reserved_at_c0[0x80];
union {
struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features;
u8 reserved_at_0[0x80];
} mng_feature_cap_mask;
u8 reserved_at_1c0[0x80];
};
struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
......@@ -7821,6 +7961,60 @@ struct mlx5_ifc_initial_seg_bits {
u8 reserved_at_80a0[0x17fc0];
};
struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_0[0xc];
u8 cap_number_of_pps_pins[0x4];
u8 reserved_at_10[0x4];
u8 cap_max_num_of_pps_in_pins[0x4];
u8 reserved_at_18[0x4];
u8 cap_max_num_of_pps_out_pins[0x4];
u8 reserved_at_20[0x24];
u8 cap_pin_3_mode[0x4];
u8 reserved_at_48[0x4];
u8 cap_pin_2_mode[0x4];
u8 reserved_at_50[0x4];
u8 cap_pin_1_mode[0x4];
u8 reserved_at_58[0x4];
u8 cap_pin_0_mode[0x4];
u8 reserved_at_60[0x4];
u8 cap_pin_7_mode[0x4];
u8 reserved_at_68[0x4];
u8 cap_pin_6_mode[0x4];
u8 reserved_at_70[0x4];
u8 cap_pin_5_mode[0x4];
u8 reserved_at_78[0x4];
u8 cap_pin_4_mode[0x4];
u8 reserved_at_80[0x80];
u8 enable[0x1];
u8 reserved_at_101[0xb];
u8 pattern[0x4];
u8 reserved_at_110[0x4];
u8 pin_mode[0x4];
u8 pin[0x8];
u8 reserved_at_120[0x20];
u8 time_stamp[0x40];
u8 out_pulse_duration[0x10];
u8 out_periodic_adjustment[0x10];
u8 reserved_at_1a0[0x60];
};
struct mlx5_ifc_mtppse_reg_bits {
u8 reserved_at_0[0x18];
u8 pin[0x8];
u8 event_arm[0x1];
u8 reserved_at_21[0x1b];
u8 event_generation_mode[0x4];
u8 reserved_at_40[0x40];
};
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
......@@ -7853,6 +8047,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
......@@ -7865,6 +8060,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
struct mlx5_ifc_slrg_reg_bits slrg_reg;
struct mlx5_ifc_sltp_reg_bits sltp_reg;
struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
u8 reserved_at_0[0x60e0];
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment