Commit a8d5dd19 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-updates-2020-12-01

mlx5e port TX timestamping support and MISC updates

1) Add support for port TX timestamping, for better PTP accuracy.

Currently in mlx5 HW TX timestamping is done on CQE (TX completion)
generation, which much earlier than when the packet actually goes out to
the wire, in this series Eran implements the option to do timestamping on
the port using a special SQ (Send Queue), such Send Queue will generate 2
CQEs (TX completions), the original one and a new one when the packet
leaves the port, due to the nature of this special handling, such mechanism
is an opt-in only and it is off by default to avoid any performance
degradation on normal traffic flows.

This patchset improves TX Hardware timestamping offset to be less than
40ns at a 100Gbps line rate, compared to 600ns before.

With that, making our HW compliant with G.8273.2 class C, and allow Linux
systems to be deployed in the 5G telco edge, where this standard is a must.

2) Misc updates and trivial improvements.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c22c0d55 2f6b379c
...@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ ...@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o
# #
# Netdev extra # Netdev extra
......
...@@ -227,6 +227,7 @@ enum mlx5e_priv_flag { ...@@ -227,6 +227,7 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
MLX5E_PFLAG_XDP_TX_MPWQE, MLX5E_PFLAG_XDP_TX_MPWQE,
MLX5E_PFLAG_SKB_TX_MPWQE, MLX5E_PFLAG_SKB_TX_MPWQE,
MLX5E_PFLAG_TX_PORT_TS,
MLX5E_NUM_PFLAGS, /* Keep last */ MLX5E_NUM_PFLAGS, /* Keep last */
}; };
...@@ -282,10 +283,12 @@ struct mlx5e_cq { ...@@ -282,10 +283,12 @@ struct mlx5e_cq {
u16 event_ctr; u16 event_ctr;
struct napi_struct *napi; struct napi_struct *napi;
struct mlx5_core_cq mcq; struct mlx5_core_cq mcq;
struct mlx5e_channel *channel; struct mlx5e_ch_stats *ch_stats;
/* control */ /* control */
struct net_device *netdev;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
...@@ -329,6 +332,15 @@ struct mlx5e_tx_mpwqe { ...@@ -329,6 +332,15 @@ struct mlx5e_tx_mpwqe {
u8 inline_on; u8 inline_on;
}; };
struct mlx5e_skb_fifo {
struct sk_buff **fifo;
u16 *pc;
u16 *cc;
u16 mask;
};
struct mlx5e_ptpsq;
struct mlx5e_txqsq { struct mlx5e_txqsq {
/* data path */ /* data path */
...@@ -349,11 +361,10 @@ struct mlx5e_txqsq { ...@@ -349,11 +361,10 @@ struct mlx5e_txqsq {
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
u32 dma_fifo_mask; u32 dma_fifo_mask;
u16 skb_fifo_mask;
struct mlx5e_sq_stats *stats; struct mlx5e_sq_stats *stats;
struct { struct {
struct mlx5e_sq_dma *dma_fifo; struct mlx5e_sq_dma *dma_fifo;
struct sk_buff **skb_fifo; struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_tx_wqe_info *wqe_info; struct mlx5e_tx_wqe_info *wqe_info;
} db; } db;
void __iomem *uar_map; void __iomem *uar_map;
...@@ -367,14 +378,17 @@ struct mlx5e_txqsq { ...@@ -367,14 +378,17 @@ struct mlx5e_txqsq {
unsigned int hw_mtu; unsigned int hw_mtu;
struct hwtstamp_config *tstamp; struct hwtstamp_config *tstamp;
struct mlx5_clock *clock; struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
/* control path */ /* control path */
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
int ch_ix; int ch_ix;
int txq_ix; int txq_ix;
u32 rate_limit; u32 rate_limit;
struct work_struct recover_work; struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_dma_info { struct mlx5e_dma_info {
...@@ -593,7 +607,6 @@ struct mlx5e_rq { ...@@ -593,7 +607,6 @@ struct mlx5e_rq {
u8 map_dir; /* dma map direction */ u8 map_dir; /* dma map direction */
} buff; } buff;
struct mlx5e_channel *channel;
struct device *pdev; struct device *pdev;
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_rq_stats *stats; struct mlx5e_rq_stats *stats;
...@@ -602,6 +615,8 @@ struct mlx5e_rq { ...@@ -602,6 +615,8 @@ struct mlx5e_rq {
struct mlx5e_page_cache page_cache; struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp; struct hwtstamp_config *tstamp;
struct mlx5_clock *clock; struct mlx5_clock *clock;
struct mlx5e_icosq *icosq;
struct mlx5e_priv *priv;
mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes; mlx5e_fp_post_rx_wqes post_wqes;
...@@ -681,8 +696,11 @@ struct mlx5e_channel { ...@@ -681,8 +696,11 @@ struct mlx5e_channel {
int cpu; int cpu;
}; };
struct mlx5e_port_ptp;
struct mlx5e_channels { struct mlx5e_channels {
struct mlx5e_channel **c; struct mlx5e_channel **c;
struct mlx5e_port_ptp *port_ptp;
unsigned int num; unsigned int num;
struct mlx5e_params params; struct mlx5e_params params;
}; };
...@@ -697,6 +715,12 @@ struct mlx5e_channel_stats { ...@@ -697,6 +715,12 @@ struct mlx5e_channel_stats {
struct mlx5e_xdpsq_stats xsksq; struct mlx5e_xdpsq_stats xsksq;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_port_ptp_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
} ____cacheline_aligned_in_smp;
enum { enum {
MLX5E_STATE_OPENED, MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING, MLX5E_STATE_DESTROYING,
...@@ -766,8 +790,10 @@ struct mlx5e_scratchpad { ...@@ -766,8 +790,10 @@ struct mlx5e_scratchpad {
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; /* +1 for port ptp ts */
struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp; struct mlx5e_dcbx_dp dcbx_dp;
#endif #endif
...@@ -802,12 +828,15 @@ struct mlx5e_priv { ...@@ -802,12 +828,15 @@ struct mlx5e_priv {
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_stats stats; struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_port_ptp_stats port_ptp_stats;
u16 max_nch; u16 max_nch;
u8 max_opened_tc; u8 max_opened_tc;
bool port_ptp_opened;
struct hwtstamp_config tstamp; struct hwtstamp_config tstamp;
u16 q_counter; u16 q_counter;
u16 drop_rq_q_counter; u16 drop_rq_q_counter;
struct notifier_block events_nb; struct notifier_block events_nb;
int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info; struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
...@@ -923,9 +952,17 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -923,9 +952,17 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_xdpsq *sq, bool is_redirect); struct mlx5e_xdpsq *sq, bool is_redirect);
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
struct mlx5e_create_cq_param {
struct napi_struct *napi;
struct mlx5e_ch_stats *ch_stats;
int node;
int ix;
};
struct mlx5e_cq_param; struct mlx5e_cq_param;
int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_cq *cq); struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq); void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
...@@ -974,7 +1011,17 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); ...@@ -974,7 +1011,17 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p); struct mlx5e_modify_sq_param *p);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_tx_disable_queue(struct netdev_queue *txq); void mlx5e_tx_disable_queue(struct netdev_queue *txq);
int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
struct mlx5e_create_sq_param;
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
u32 *sqn);
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{ {
......
...@@ -287,8 +287,7 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); ...@@ -287,8 +287,7 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type); u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev);
#endif /* __MLX5E_FLOW_STEER_H__ */ #endif /* __MLX5E_FLOW_STEER_H__ */
...@@ -37,13 +37,12 @@ int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) ...@@ -37,13 +37,12 @@ int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
{ {
struct mlx5e_priv *priv = cq->channel->priv;
u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
u8 hw_status; u8 hw_status;
void *cqc; void *cqc;
int err; int err;
err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out); err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
if (err) if (err)
return err; return err;
...@@ -158,10 +157,8 @@ void mlx5e_health_channels_update(struct mlx5e_priv *priv) ...@@ -158,10 +157,8 @@ void mlx5e_health_channels_update(struct mlx5e_priv *priv)
DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
} }
int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn)
{ {
struct mlx5_core_dev *mdev = channel->mdev;
struct net_device *dev = channel->netdev;
struct mlx5e_modify_sq_param msp = {}; struct mlx5e_modify_sq_param msp = {};
int err; int err;
...@@ -206,21 +203,22 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv) ...@@ -206,21 +203,22 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
return err; return err;
} }
int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel) int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq,
struct mlx5e_ch_stats *stats)
{ {
u32 eqe_count; u32 eqe_count;
netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
eq->core.eqn, eq->core.cons_index, eq->core.irqn); eq->core.eqn, eq->core.cons_index, eq->core.irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq); eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count) if (!eqe_count)
return -EIO; return -EIO;
netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n", netdev_err(dev, "Recovered %d eqes on EQ 0x%x\n",
eqe_count, eq->core.eqn); eqe_count, eq->core.eqn);
channel->stats->eq_rearm++; stats->eq_rearm++;
return 0; return 0;
} }
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#include "en.h" #include "en.h"
#include "diag/rsc_dump.h" #include "diag/rsc_dump.h"
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
static inline bool cqe_syndrome_needs_recover(u8 syndrome) static inline bool cqe_syndrome_needs_recover(u8 syndrome)
{ {
return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
...@@ -42,8 +40,9 @@ struct mlx5e_err_ctx { ...@@ -42,8 +40,9 @@ struct mlx5e_err_ctx {
void *ctx; void *ctx;
}; };
int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn); int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn);
int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel); int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq,
struct mlx5e_ch_stats *stats);
int mlx5e_health_recover_channels(struct mlx5e_priv *priv); int mlx5e_health_recover_channels(struct mlx5e_priv *priv);
int mlx5e_health_report(struct mlx5e_priv *priv, int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str, struct devlink_health_reporter *reporter, char *err_str,
......
...@@ -41,6 +41,15 @@ struct mlx5e_channel_param { ...@@ -41,6 +41,15 @@ struct mlx5e_channel_param {
struct mlx5e_sq_param async_icosq; struct mlx5e_sq_param async_icosq;
}; };
struct mlx5e_create_sq_param {
struct mlx5_wq_ctrl *wq_ctrl;
u32 cqn;
u32 ts_cqe_to_dest_cqn;
u32 tisn;
u8 tis_lst_sz;
u8 min_inline_mode;
};
static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params, static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params,
u16 qid, u16 qid,
enum mlx5e_rq_group group, enum mlx5e_rq_group group,
...@@ -102,6 +111,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, ...@@ -102,6 +111,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
/* Build queue parameters */ /* Build queue parameters */
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
void mlx5e_build_rq_param(struct mlx5e_priv *priv, void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
#include "en/ptp.h"
#include "en/txrx.h"
#include "lib/clock.h"
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
ktime_t port_hwtstamp;
};
void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
{
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
}
static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
struct mlx5e_ptp_cq_stats *cq_stats)
{
struct skb_shared_hwtstamps hwts = {};
ktime_t diff;
diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
/* Maximal allowed diff is 1 / 128 second */
if (diff > (NSEC_PER_SEC >> 7)) {
cq_stats->abort++;
cq_stats->abort_abs_diff_ns += diff;
return;
}
hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
skb_tstamp_tx(skb, &hwts);
}
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
struct mlx5e_ptp_cq_stats *cq_stats)
{
switch (hwtstamp_type) {
case (MLX5E_SKB_CB_CQE_HWTSTAMP):
mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
break;
case (MLX5E_SKB_CB_PORT_HWTSTAMP):
mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
break;
}
/* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
* skb soon to be released.
*/
if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
!mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
return;
mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
ptpsq->cq_stats->err_cqe++;
goto out;
}
hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
ptpsq->cq_stats->cqe++;
out:
napi_consume_skb(skb, budget);
}
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
struct mlx5_cqwq *cqwq = &cq->wq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
return false;
cqe = mlx5_cqwq_get_cqe(cqwq);
if (!cqe)
return false;
do {
mlx5_cqwq_pop(cqwq);
mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
mlx5_cqwq_update_db_record(cqwq);
/* ensure cq space is freed before enabling more cqes */
wmb();
return work_done == budget;
}
static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp,
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
bool busy = false;
int work_done = 0;
int i;
rcu_read_lock();
ch_stats->poll++;
for (i = 0; i < c->num_tc; i++) {
busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
}
if (busy) {
work_done = budget;
goto out;
}
if (unlikely(!napi_complete_done(napi, work_done)))
goto out;
ch_stats->arm++;
for (i = 0; i < c->num_tc; i++) {
mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
}
out:
rcu_read_unlock();
return work_done;
}
static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq, int tc,
struct mlx5e_ptpsq *ptpsq)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_wq_cyc *wq = &sq->wq;
int err;
int node;
sq->pdev = c->pdev;
sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->priv = c->priv;
sq->mdev = mdev;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->port_ptp_stats.sq[tc];
sq->ptpsq = ptpsq;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = node;
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
wq->db = &wq->db[MLX5_SND_DBR];
err = mlx5e_alloc_txqsq_db(sq, node);
if (err)
goto err_sq_wq_destroy;
return 0;
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
return err;
}
static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
{
mlx5_core_destroy_sq(mdev, sqn);
}
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa);
if (!ptpsq->skb_fifo.fifo)
return -ENOMEM;
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1;
return 0;
}
static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
{
while (*skb_fifo->pc != *skb_fifo->cc) {
struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
dev_kfree_skb_any(skb);
}
}
static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
{
mlx5e_ptp_drain_skb_fifo(skb_fifo);
kvfree(skb_fifo->fifo);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
int txq_ix, struct mlx5e_ptp_params *cparams,
int tc, struct mlx5e_ptpsq *ptpsq)
{
struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
struct mlx5e_create_sq_param csp = {};
int err;
err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
txqsq, tc, ptpsq);
if (err)
return err;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
csp.cqn = txqsq->cq.mcq.cqn;
csp.wq_ctrl = &txqsq->wq_ctrl;
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn);
if (err)
goto err_free_txqsq;
err = mlx5e_ptp_alloc_traffic_db(ptpsq,
dev_to_node(mlx5_core_dma_dev(c->mdev)));
if (err)
goto err_free_txqsq;
return 0;
err_free_txqsq:
mlx5e_free_txqsq(txqsq);
return err;
}
static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
{
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct mlx5_core_dev *mdev = sq->mdev;
mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
cancel_work_sync(&sq->recover_work);
mlx5e_ptp_destroy_sq(mdev, sq->sqn);
mlx5e_free_txqsq_descs(sq);
mlx5e_free_txqsq(sq);
}
static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c,
struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
int ix_base;
int err;
int tc;
ix_base = params->num_tc * params->num_channels;
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = ix_base + tc;
err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
cparams, tc, &c->ptpsq[tc]);
if (err)
goto close_txqsq;
}
return 0;
close_txqsq:
for (--tc; tc >= 0; tc--)
mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
return err;
}
static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
}
static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
struct mlx5e_ptp_params *cparams)
{
struct mlx5e_params *params = &cparams->params;
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder ptp_moder = {};
struct mlx5e_cq_param *cq_param;
int err;
int tc;
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = c->ix;
cq_param = &cparams->txq_sq_param.cqp;
for (tc = 0; tc < params->num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_txqsq_cq;
}
for (tc = 0; tc < params->num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_ts_cq;
ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc];
}
return 0;
out_err_ts_cq:
for (--tc; tc >= 0; tc--)
mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
tc = params->num_tc;
out_err_txqsq_cq:
for (--tc; tc >= 0; tc--)
mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
return err;
}
static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
}
static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq;
mlx5e_build_sq_param_common(priv, param);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
mlx5e_build_tx_cq_param(priv, params, &param->cqp);
}
static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
struct mlx5e_ptp_params *cparams,
struct mlx5e_params *orig)
{
struct mlx5e_params *params = &cparams->params;
params->tx_min_inline_mode = orig->tx_min_inline_mode;
params->num_channels = orig->num_channels;
params->hard_mtu = orig->hard_mtu;
params->sw_mtu = orig->sw_mtu;
params->num_tc = orig->num_tc;
/* SQ */
params->log_sq_size = orig->log_sq_size;
mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param);
}
static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
struct mlx5e_ptp_params *cparams)
{
int err;
err = mlx5e_ptp_open_cqs(c, cparams);
if (err)
return err;
napi_enable(&c->napi);
err = mlx5e_ptp_open_txqsqs(c, cparams);
if (err)
goto disable_napi;
return 0;
disable_napi:
napi_disable(&c->napi);
mlx5e_ptp_close_cqs(c);
return err;
}
static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
{
mlx5e_ptp_close_txqsqs(c);
napi_disable(&c->napi);
mlx5e_ptp_close_cqs(c);
}
int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
u8 lag_port, struct mlx5e_port_ptp **cp)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_ptp_params *cparams;
struct mlx5e_port_ptp *c;
unsigned int irq;
int err;
int eqn;
err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq);
if (err)
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
if (!c || !cparams)
return -ENOMEM;
c->priv = priv;
c->mdev = priv->mdev;
c->tstamp = &priv->tstamp;
c->ix = 0;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
c->stats = &priv->port_ptp_stats.ch;
c->irq_desc = irq_to_desc(irq);
c->lag_port = lag_port;
netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
mlx5e_ptp_build_params(c, cparams, params);
err = mlx5e_ptp_open_queues(c, cparams);
if (unlikely(err))
goto err_napi_del;
*cp = c;
kvfree(cparams);
return 0;
err_napi_del:
netif_napi_del(&c->napi);
kvfree(cparams);
kvfree(c);
return err;
}
void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
{
mlx5e_ptp_close_queues(c);
netif_napi_del(&c->napi);
kvfree(c);
}
void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
}
void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __MLX5_EN_PTP_H__
#define __MLX5_EN_PTP_H__
#include "en.h"
#include "en/params.h"
#include "en_stats.h"
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
struct mlx5e_cq ts_cq;
u16 skb_fifo_cc;
u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats;
};
struct mlx5e_port_ptp {
/* data path */
struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
__be32 mkey_be;
u8 num_tc;
u8 lag_port;
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
};
struct mlx5e_ptp_params {
struct mlx5e_params params;
struct mlx5e_sq_param txq_sq_param;
};
int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
u8 lag_port, struct mlx5e_port_ptp **cp);
void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c);
void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c);
void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c);
enum {
MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1),
};
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
struct mlx5e_ptp_cq_stats *cq_stats);
void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb);
#endif /* __MLX5_EN_PTP_H__ */
...@@ -87,7 +87,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) ...@@ -87,7 +87,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
/* At this point, both the rq and the icosq are disabled */ /* At this point, both the rq and the icosq are disabled */
err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn); err = mlx5e_health_sq_to_ready(mdev, dev, icosq->sqn);
if (err) if (err)
goto out; goto out;
...@@ -146,17 +146,16 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx) ...@@ -146,17 +146,16 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
static int mlx5e_rx_reporter_timeout_recover(void *ctx) static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{ {
struct mlx5e_icosq *icosq;
struct mlx5_eq_comp *eq; struct mlx5_eq_comp *eq;
struct mlx5e_rq *rq; struct mlx5e_rq *rq;
int err; int err;
rq = ctx; rq = ctx;
icosq = &rq->channel->icosq;
eq = rq->cq.mcq.eq; eq = rq->cq.mcq.eq;
err = mlx5e_health_channel_eq_recover(eq, rq->channel);
if (err) err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); if (err && rq->icosq)
clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
return err; return err;
} }
...@@ -233,21 +232,13 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, ...@@ -233,21 +232,13 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg) struct devlink_fmsg *fmsg)
{ {
struct mlx5e_priv *priv = rq->channel->priv;
struct mlx5e_icosq *icosq;
u8 icosq_hw_state;
u16 wqe_counter; u16 wqe_counter;
int wqes_sz; int wqes_sz;
u8 hw_state; u8 hw_state;
u16 wq_head; u16 wq_head;
int err; int err;
icosq = &rq->channel->icosq; err = mlx5e_query_rq_state(rq->mdev, rq->rqn, &hw_state);
err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
if (err)
return err;
err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
if (err) if (err)
return err; return err;
...@@ -259,7 +250,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, ...@@ -259,7 +250,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
if (err) if (err)
return err; return err;
err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix); err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
if (err) if (err)
return err; return err;
...@@ -295,9 +286,18 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, ...@@ -295,9 +286,18 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
if (err) if (err)
return err; return err;
if (rq->icosq) {
struct mlx5e_icosq *icosq = rq->icosq;
u8 icosq_hw_state;
err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state);
if (err)
return err;
err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
if (err) if (err)
return err; return err;
}
err = devlink_fmsg_obj_nest_end(fmsg); err = devlink_fmsg_obj_nest_end(fmsg);
if (err) if (err)
...@@ -557,25 +557,29 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter, ...@@ -557,25 +557,29 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
{ {
struct mlx5e_icosq *icosq = &rq->channel->icosq; char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
struct mlx5e_priv *priv = rq->channel->priv;
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_icosq *icosq = rq->icosq;
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_err_ctx err_ctx = {}; struct mlx5e_err_ctx err_ctx = {};
err_ctx.ctx = rq; err_ctx.ctx = rq;
err_ctx.recover = mlx5e_rx_reporter_timeout_recover; err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
err_ctx.dump = mlx5e_rx_reporter_dump_rq; err_ctx.dump = mlx5e_rx_reporter_dump_rq;
if (icosq)
snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
snprintf(err_str, sizeof(err_str), snprintf(err_str, sizeof(err_str),
"RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x", "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn); rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
} }
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq) void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
{ {
struct mlx5e_priv *priv = rq->channel->priv;
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_err_ctx err_ctx = {}; struct mlx5e_err_ctx err_ctx = {};
err_ctx.ctx = rq; err_ctx.ctx = rq;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */ /* Copyright (c) 2019 Mellanox Technologies. */
#include "health.h" #include "health.h"
#include "en/ptp.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{ {
...@@ -15,7 +16,7 @@ static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) ...@@ -15,7 +16,7 @@ static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
msleep(20); msleep(20);
} }
netdev_err(sq->channel->netdev, netdev_err(sq->netdev,
"Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
sq->sqn, sq->cc, sq->pc); sq->sqn, sq->cc, sq->pc);
...@@ -41,8 +42,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) ...@@ -41,8 +42,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
int err; int err;
sq = ctx; sq = ctx;
mdev = sq->channel->mdev; mdev = sq->mdev;
dev = sq->channel->netdev; dev = sq->netdev;
if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
return 0; return 0;
...@@ -68,7 +69,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) ...@@ -68,7 +69,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
* pending WQEs. SQ can safely reset the SQ. * pending WQEs. SQ can safely reset the SQ.
*/ */
err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn); err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn);
if (err) if (err)
goto out; goto out;
...@@ -99,8 +100,8 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx) ...@@ -99,8 +100,8 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
to_ctx = ctx; to_ctx = ctx;
sq = to_ctx->sq; sq = to_ctx->sq;
eq = sq->cq.mcq.eq; eq = sq->cq.mcq.eq;
priv = sq->channel->priv; priv = sq->priv;
err = mlx5e_health_channel_eq_recover(eq, sq->channel); err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
if (!err) { if (!err) {
to_ctx->status = 0; /* this sq recovered */ to_ctx->status = 0; /* this sq recovered */
return err; return err;
...@@ -141,11 +142,11 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, ...@@ -141,11 +142,11 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
} }
static int static int
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc) struct mlx5e_txqsq *sq, int tc)
{ {
struct mlx5e_priv *priv = sq->channel->priv;
bool stopped = netif_xmit_stopped(sq->txq); bool stopped = netif_xmit_stopped(sq->txq);
struct mlx5e_priv *priv = sq->priv;
u8 state; u8 state;
int err; int err;
...@@ -153,14 +154,6 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, ...@@ -153,14 +154,6 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
if (err) if (err)
return err; return err;
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
if (err) if (err)
return err; return err;
...@@ -193,7 +186,24 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, ...@@ -193,7 +186,24 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
if (err) if (err)
return err; return err;
err = mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
}
static int
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
int err;
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
if (err)
return err;
err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
if (err) if (err)
return err; return err;
...@@ -204,49 +214,147 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, ...@@ -204,49 +214,147 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
return 0; return 0;
} }
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, static int
struct devlink_fmsg *fmsg, mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack) struct mlx5e_ptpsq *ptpsq, int tc)
{ {
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); int err;
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
u32 sq_stride, sq_sz;
int i, tc, err = 0; err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
mutex_lock(&priv->state_lock); err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
if (err)
return err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
goto unlock; if (err)
return err;
sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq); err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
sq_stride = MLX5_SEND_WQE_BB; if (err)
return err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
if (err) if (err)
goto unlock; return err;
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err)
return err;
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
return 0;
}
static int
mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *txqsq)
{
u32 sq_stride, sq_sz;
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
if (err) if (err)
goto unlock; return err;
sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
sq_stride = MLX5_SEND_WQE_BB;
err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
if (err) if (err)
goto unlock; return err;
err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
if (err) if (err)
goto unlock; return err;
err = mlx5e_health_cq_common_diag_fmsg(&generic_sq->cq, fmsg); err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
if (err) if (err)
goto unlock; return err;
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
struct mlx5e_ptpsq *ptpsq)
{
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
if (err)
return err;
err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
if (err)
return err;
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
struct mlx5e_ptpsq *generic_ptpsq;
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
if (err)
return err;
err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
if (err)
return err;
generic_ptpsq = priv->channels.port_ptp ?
&priv->channels.port_ptp->ptpsq[0] :
NULL;
if (!generic_ptpsq)
goto out;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
if (err)
return err;
err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
if (err)
return err;
err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
if (err)
return err;
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err) if (err)
return err;
out:
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
int i, tc, err = 0;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock; goto unlock;
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
if (err) if (err)
goto unlock; goto unlock;
...@@ -265,6 +373,19 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, ...@@ -265,6 +373,19 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
goto unlock; goto unlock;
} }
} }
if (!ptp_ch)
goto close_sqs_nest;
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
&ptp_ch->ptpsq[tc],
tc);
if (err)
goto unlock;
}
close_sqs_nest:
err = devlink_fmsg_arr_pair_nest_end(fmsg); err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err) if (err)
goto unlock; goto unlock;
...@@ -338,6 +459,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms ...@@ -338,6 +459,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg) struct devlink_fmsg *fmsg)
{ {
struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp;
struct mlx5_rsc_key key = {}; struct mlx5_rsc_key key = {};
int i, tc, err; int i, tc, err;
...@@ -373,6 +495,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, ...@@ -373,6 +495,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
return err; return err;
} }
} }
if (ptp_ch) {
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
if (err)
return err;
}
}
return devlink_fmsg_arr_pair_nest_end(fmsg); return devlink_fmsg_arr_pair_nest_end(fmsg);
} }
...@@ -396,8 +529,8 @@ static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter, ...@@ -396,8 +529,8 @@ static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
{ {
struct mlx5e_priv *priv = sq->channel->priv;
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_priv *priv = sq->priv;
struct mlx5e_err_ctx err_ctx = {}; struct mlx5e_err_ctx err_ctx = {};
err_ctx.ctx = sq; err_ctx.ctx = sq;
...@@ -410,9 +543,9 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) ...@@ -410,9 +543,9 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
{ {
struct mlx5e_priv *priv = sq->channel->priv;
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_tx_timeout_ctx to_ctx = {}; struct mlx5e_tx_timeout_ctx to_ctx = {};
struct mlx5e_priv *priv = sq->priv;
struct mlx5e_err_ctx err_ctx = {}; struct mlx5e_err_ctx err_ctx = {};
to_ctx.sq = sq; to_ctx.sq = sq;
...@@ -421,7 +554,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) ...@@ -421,7 +554,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
err_ctx.dump = mlx5e_tx_reporter_dump_sq; err_ctx.dump = mlx5e_tx_reporter_dump_sq;
snprintf(err_str, sizeof(err_str), snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u", "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - sq->txq->trans_start)); jiffies_to_usecs(jiffies - sq->txq->trans_start));
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
enum mlx5e_icosq_wqe_type { enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP, MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX, MLX5E_ICOSQ_WQE_UMR_RX,
...@@ -250,21 +252,24 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, ...@@ -250,21 +252,24 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
dma->type = map_type; dma->type = map_type;
} }
static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i) static inline
struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
{ {
return &sq->db.skb_fifo[i & sq->skb_fifo_mask]; return &fifo->fifo[i & fifo->mask];
} }
static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb) static inline
void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
{ {
struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++); struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
*skb_item = skb; *skb_item = skb;
} }
static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq) static inline
struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
{ {
return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++); return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
} }
static inline void static inline void
...@@ -308,7 +313,7 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, ...@@ -308,7 +313,7 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
netdev_err(cq->channel->netdev, netdev_err(cq->netdev,
"Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
cq->mcq.cqn, ci, qn, cq->mcq.cqn, ci, qn,
get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
......
...@@ -49,8 +49,11 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, ...@@ -49,8 +49,11 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_channel *c) struct mlx5e_channel *c)
{ {
struct mlx5e_channel_param *cparam; struct mlx5e_channel_param *cparam;
struct mlx5e_create_cq_param ccp;
int err; int err;
mlx5e_build_create_cq_param(&ccp, c);
if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
return -EINVAL; return -EINVAL;
...@@ -60,7 +63,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, ...@@ -60,7 +63,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
mlx5e_build_xsk_cparam(priv, params, xsk, cparam); mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq); err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err)) if (unlikely(err))
goto err_free_cparam; goto err_free_cparam;
...@@ -68,7 +72,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, ...@@ -68,7 +72,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err)) if (unlikely(err))
goto err_close_rx_cq; goto err_close_rx_cq;
err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq); err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xsksq.cq);
if (unlikely(err)) if (unlikely(err))
goto err_close_rq; goto err_close_rq;
......
...@@ -276,7 +276,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, ...@@ -276,7 +276,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out; goto err_out;
if (mlx5_accel_is_ktls_tx(sq->channel->mdev)) if (mlx5_accel_is_ktls_tx(sq->mdev))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
/* FPGA */ /* FPGA */
......
...@@ -1944,6 +1944,38 @@ static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable) ...@@ -1944,6 +1944,38 @@ static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable)
return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable); return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable);
} }
static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err;
if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
return -EOPNOTSUPP;
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
* ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
* has the same log_sq_size.
*/
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
goto out;
}
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_num_channels_changed_ctx, NULL);
out:
if (!err)
priv->port_ptp_opened = true;
return err;
}
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_cqe_moder", set_pflag_rx_cqe_based_moder }, { "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
{ "tx_cqe_moder", set_pflag_tx_cqe_based_moder }, { "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
...@@ -1952,6 +1984,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { ...@@ -1952,6 +1984,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, { "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
{ "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe }, { "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
{ "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe }, { "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe },
{ "tx_port_ts", set_pflag_tx_port_ts },
}; };
static int mlx5e_handle_pflag(struct net_device *netdev, static int mlx5e_handle_pflag(struct net_device *netdev,
......
...@@ -772,25 +772,31 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { ...@@ -772,25 +772,31 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
}; };
bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
}
static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type)
{ {
switch (proto_type) { switch (proto_type) {
case IPPROTO_GRE: case IPPROTO_GRE:
return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
case IPPROTO_IPIP: case IPPROTO_IPIP:
case IPPROTO_IPV6: case IPPROTO_IPV6:
return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip); return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
default: default:
return false; return false;
} }
} }
bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
{ {
int tt; int tt;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto)) if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto))
return true; return true;
} }
return false; return false;
...@@ -798,7 +804,7 @@ bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) ...@@ -798,7 +804,7 @@ bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{ {
return (mlx5e_any_tunnel_proto_supported(mdev) && return (mlx5e_tunnel_any_rx_proto_supported(mdev) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
} }
...@@ -899,7 +905,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, ...@@ -899,7 +905,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = params->inner_ttc->ft.t; dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
if (!mlx5e_tunnel_proto_supported(priv->mdev, if (!mlx5e_tunnel_proto_supported_rx(priv->mdev,
ttc_tunnel_rules[tt].proto)) ttc_tunnel_rules[tt].proto))
continue; continue;
trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include "en/hv_vhca_stats.h" #include "en/hv_vhca_stats.h"
#include "en/devlink.h" #include "en/devlink.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "en/ptp.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{ {
...@@ -412,9 +413,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -412,9 +413,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->wq_type = params->rq_wq_type; rq->wq_type = params->rq_wq_type;
rq->pdev = c->pdev; rq->pdev = c->pdev;
rq->netdev = c->netdev; rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp; rq->tstamp = c->tstamp;
rq->clock = &mdev->clock; rq->clock = &mdev->clock;
rq->channel = c; rq->icosq = &c->icosq;
rq->ix = c->ix; rq->ix = c->ix;
rq->mdev = mdev; rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
...@@ -613,14 +615,11 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -613,14 +615,11 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
static void mlx5e_free_rq(struct mlx5e_rq *rq) static void mlx5e_free_rq(struct mlx5e_rq *rq)
{ {
struct mlx5e_channel *c = rq->channel; struct bpf_prog *old_prog;
struct bpf_prog *old_prog = NULL;
int i; int i;
/* drop_rq has neither channel nor xdp_prog. */
if (c)
old_prog = rcu_dereference_protected(rq->xdp_prog, old_prog = rcu_dereference_protected(rq->xdp_prog,
lockdep_is_held(&c->priv->state_lock)); lockdep_is_held(&rq->priv->state_lock));
if (old_prog) if (old_prog)
bpf_prog_put(old_prog); bpf_prog_put(old_prog);
...@@ -720,9 +719,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) ...@@ -720,9 +719,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{ {
struct mlx5e_channel *c = rq->channel; struct mlx5_core_dev *mdev = rq->mdev;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in; void *in;
void *rqc; void *rqc;
...@@ -751,8 +748,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) ...@@ -751,8 +748,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{ {
struct mlx5e_channel *c = rq->channel; struct mlx5_core_dev *mdev = rq->mdev;
struct mlx5_core_dev *mdev = c->mdev;
void *in; void *in;
void *rqc; void *rqc;
int inlen; int inlen;
...@@ -786,7 +782,6 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) ...@@ -786,7 +782,6 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
{ {
unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
struct mlx5e_channel *c = rq->channel;
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
...@@ -797,8 +792,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) ...@@ -797,8 +792,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
msleep(20); msleep(20);
} while (time_before(jiffies, exp_time)); } while (time_before(jiffies, exp_time));
netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
mlx5e_reporter_rx_timeout(rq); mlx5e_reporter_rx_timeout(rq);
return -ETIMEDOUT; return -ETIMEDOUT;
...@@ -913,7 +908,7 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -913,7 +908,7 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
void mlx5e_activate_rq(struct mlx5e_rq *rq) void mlx5e_activate_rq(struct mlx5e_rq *rq)
{ {
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
mlx5e_trigger_irq(&rq->channel->icosq); mlx5e_trigger_irq(rq->icosq);
} }
void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
...@@ -925,7 +920,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) ...@@ -925,7 +920,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq)
{ {
cancel_work_sync(&rq->dim.work); cancel_work_sync(&rq->dim.work);
cancel_work_sync(&rq->channel->icosq.recover_work); cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work); cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq); mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq); mlx5e_free_rx_descs(rq);
...@@ -1089,14 +1084,14 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq) ...@@ -1089,14 +1084,14 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
mlx5_wq_destroy(&sq->wq_ctrl); mlx5_wq_destroy(&sq->wq_ctrl);
} }
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{ {
kvfree(sq->db.wqe_info); kvfree(sq->db.wqe_info);
kvfree(sq->db.skb_fifo); kvfree(sq->db.skb_fifo.fifo);
kvfree(sq->db.dma_fifo); kvfree(sq->db.dma_fifo);
} }
static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
...@@ -1104,24 +1099,26 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) ...@@ -1104,24 +1099,26 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.dma_fifo)), sizeof(*sq->db.dma_fifo)),
GFP_KERNEL, numa); GFP_KERNEL, numa);
sq->db.skb_fifo = kvzalloc_node(array_size(df_sz, sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.skb_fifo)), sizeof(*sq->db.skb_fifo.fifo)),
GFP_KERNEL, numa); GFP_KERNEL, numa);
sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.wqe_info)), sizeof(*sq->db.wqe_info)),
GFP_KERNEL, numa); GFP_KERNEL, numa);
if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) { if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq); mlx5e_free_txqsq_db(sq);
return -ENOMEM; return -ENOMEM;
} }
sq->dma_fifo_mask = df_sz - 1; sq->dma_fifo_mask = df_sz - 1;
sq->skb_fifo_mask = df_sz - 1;
sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
sq->db.skb_fifo.mask = df_sz - 1;
return 0; return 0;
} }
static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix, int txq_ix,
struct mlx5e_params *params, struct mlx5e_params *params,
...@@ -1138,7 +1135,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1138,7 +1135,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->tstamp = c->tstamp; sq->tstamp = c->tstamp;
sq->clock = &mdev->clock; sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be; sq->mkey_be = c->mkey_be;
sq->channel = c; sq->netdev = c->netdev;
sq->mdev = c->mdev;
sq->priv = c->priv;
sq->ch_ix = c->ix; sq->ch_ix = c->ix;
sq->txq_ix = txq_ix; sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->uar_map = mdev->mlx5e_res.bfreg.map;
...@@ -1177,20 +1176,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1177,20 +1176,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
return err; return err;
} }
static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{ {
mlx5e_free_txqsq_db(sq); mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl); mlx5_wq_destroy(&sq->wq_ctrl);
} }
struct mlx5e_create_sq_param {
struct mlx5_wq_ctrl *wq_ctrl;
u32 cqn;
u32 tisn;
u8 tis_lst_sz;
u8 min_inline_mode;
};
static int mlx5e_create_sq(struct mlx5_core_dev *mdev, static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param, struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp, struct mlx5e_create_sq_param *csp,
...@@ -1215,6 +1206,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, ...@@ -1215,6 +1206,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz); MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
MLX5_SET(sqc, sqc, tis_num_0, csp->tisn); MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
MLX5_SET(sqc, sqc, cqn, csp->cqn); MLX5_SET(sqc, sqc, cqn, csp->cqn);
MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
...@@ -1272,7 +1264,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) ...@@ -1272,7 +1264,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
mlx5_core_destroy_sq(mdev, sqn); mlx5_core_destroy_sq(mdev, sqn);
} }
static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param, struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp, struct mlx5e_create_sq_param *csp,
u32 *sqn) u32 *sqn)
...@@ -1338,7 +1330,7 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, ...@@ -1338,7 +1330,7 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{ {
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq); netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq); netif_tx_start_queue(sq->txq);
...@@ -1351,7 +1343,7 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq) ...@@ -1351,7 +1343,7 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq)
__netif_tx_unlock_bh(txq); __netif_tx_unlock_bh(txq);
} }
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
...@@ -1376,8 +1368,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) ...@@ -1376,8 +1368,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
{ {
struct mlx5e_channel *c = sq->channel; struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_rate_limit rl = {0}; struct mlx5_rate_limit rl = {0};
cancel_work_sync(&sq->dim.work); cancel_work_sync(&sq->dim.work);
...@@ -1391,7 +1382,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) ...@@ -1391,7 +1382,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
mlx5e_free_txqsq(sq); mlx5e_free_txqsq(sq);
} }
static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
{ {
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work); recover_work);
...@@ -1518,10 +1509,11 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) ...@@ -1518,10 +1509,11 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq); mlx5e_free_xdpsq(sq);
} }
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param, struct mlx5e_cq_param *param,
struct mlx5e_cq *cq) struct mlx5e_cq *cq)
{ {
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq; struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used; int eqn_not_used;
unsigned int irqn; unsigned int irqn;
...@@ -1554,25 +1546,27 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, ...@@ -1554,25 +1546,27 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
} }
cq->mdev = mdev; cq->mdev = mdev;
cq->netdev = priv->netdev;
cq->priv = priv;
return 0; return 0;
} }
static int mlx5e_alloc_cq(struct mlx5e_channel *c, static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param, struct mlx5e_cq_param *param,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq) struct mlx5e_cq *cq)
{ {
struct mlx5_core_dev *mdev = c->priv->mdev;
int err; int err;
param->wq.buf_numa_node = cpu_to_node(c->cpu); param->wq.buf_numa_node = ccp->node;
param->wq.db_numa_node = cpu_to_node(c->cpu); param->wq.db_numa_node = ccp->node;
param->eq_ix = c->ix; param->eq_ix = ccp->ix;
err = mlx5e_alloc_cq_common(mdev, param, cq); err = mlx5e_alloc_cq_common(priv, param, cq);
cq->napi = &c->napi; cq->napi = ccp->napi;
cq->channel = c; cq->ch_stats = ccp->ch_stats;
return err; return err;
} }
...@@ -1636,13 +1630,14 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) ...@@ -1636,13 +1630,14 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
mlx5_core_destroy_cq(cq->mdev, &cq->mcq); mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
} }
int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_cq *cq) struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{ {
struct mlx5_core_dev *mdev = c->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
err = mlx5e_alloc_cq(c, param, cq); err = mlx5e_alloc_cq(priv, param, ccp, cq);
if (err) if (err)
return err; return err;
...@@ -1668,14 +1663,15 @@ void mlx5e_close_cq(struct mlx5e_cq *cq) ...@@ -1668,14 +1663,15 @@ void mlx5e_close_cq(struct mlx5e_cq *cq)
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
{ {
int err; int err;
int tc; int tc;
for (tc = 0; tc < c->num_tc; tc++) { for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_cq(c, params->tx_cq_moderation, err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
&cparam->txq_sq.cqp, &c->sq[tc].cq); ccp, &c->sq[tc].cq);
if (err) if (err)
goto err_close_tx_cqs; goto err_close_tx_cqs;
} }
...@@ -1810,35 +1806,52 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) ...@@ -1810,35 +1806,52 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err; return err;
} }
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
{
*ccp = (struct mlx5e_create_cq_param) {
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
.ix = c->ix,
};
}
static int mlx5e_open_queues(struct mlx5e_channel *c, static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
{ {
struct dim_cq_moder icocq_moder = {0, 0}; struct dim_cq_moder icocq_moder = {0, 0};
struct mlx5e_create_cq_param ccp;
int err; int err;
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq); mlx5e_build_create_cq_param(&ccp, c);
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err) if (err)
return err; return err;
err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq); err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->icosq.cq);
if (err) if (err)
goto err_close_async_icosq_cq; goto err_close_async_icosq_cq;
err = mlx5e_open_tx_cqs(c, params, cparam); err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
if (err) if (err)
goto err_close_icosq_cq; goto err_close_icosq_cq;
err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq); err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xdpsq.cq);
if (err) if (err)
goto err_close_tx_cqs; goto err_close_tx_cqs;
err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq); err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err) if (err)
goto err_close_xdp_tx_cqs; goto err_close_xdp_tx_cqs;
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0; &ccp, &c->rq_xdpsq.cq) : 0;
if (err) if (err)
goto err_close_rx_cq; goto err_close_rx_cq;
...@@ -2361,6 +2374,13 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, ...@@ -2361,6 +2374,13 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels; goto err_close_channels;
} }
if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) {
err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port,
&chs->port_ptp);
if (err)
goto err_close_channels;
}
mlx5e_health_channels_update(priv); mlx5e_health_channels_update(priv);
kvfree(cparam); kvfree(cparam);
return 0; return 0;
...@@ -2382,6 +2402,9 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs) ...@@ -2382,6 +2402,9 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
for (i = 0; i < chs->num; i++) for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]); mlx5e_activate_channel(chs->c[i]);
if (chs->port_ptp)
mlx5e_ptp_activate_channel(chs->port_ptp);
} }
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
...@@ -2408,6 +2431,9 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) ...@@ -2408,6 +2431,9 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
{ {
int i; int i;
if (chs->port_ptp)
mlx5e_ptp_deactivate_channel(chs->port_ptp);
for (i = 0; i < chs->num; i++) for (i = 0; i < chs->num; i++)
mlx5e_deactivate_channel(chs->c[i]); mlx5e_deactivate_channel(chs->c[i]);
} }
...@@ -2416,6 +2442,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) ...@@ -2416,6 +2442,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{ {
int i; int i;
if (chs->port_ptp)
mlx5e_port_ptp_close(chs->port_ptp);
for (i = 0; i < chs->num; i++) for (i = 0; i < chs->num; i++)
mlx5e_close_channel(chs->c[i]); mlx5e_close_channel(chs->c[i]);
...@@ -2901,6 +2930,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) ...@@ -2901,6 +2930,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels; nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc; ntc = priv->channels.params.num_tc;
num_txqs = nch * ntc; num_txqs = nch * ntc;
if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
num_txqs += ntc;
num_rxqs = nch * priv->profile->rq_groups; num_rxqs = nch * priv->profile->rq_groups;
mlx5e_netdev_set_tcs(netdev, nch, ntc); mlx5e_netdev_set_tcs(netdev, nch, ntc);
...@@ -2974,14 +3005,13 @@ MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); ...@@ -2974,14 +3005,13 @@ MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
{ {
int i, ch; int i, ch, tc, num_tc;
ch = priv->channels.num; ch = priv->channels.num;
num_tc = priv->channels.params.num_tc;
for (i = 0; i < ch; i++) { for (i = 0; i < ch; i++) {
int tc; for (tc = 0; tc < num_tc; tc++) {
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5e_txqsq *sq = &c->sq[tc]; struct mlx5e_txqsq *sq = &c->sq[tc];
...@@ -2989,10 +3019,29 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) ...@@ -2989,10 +3019,29 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
priv->channel_tc2realtxq[i][tc] = i + tc * ch; priv->channel_tc2realtxq[i][tc] = i + tc * ch;
} }
} }
if (!priv->channels.port_ptp)
return;
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_port_ptp *c = priv->channels.port_ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
}
}
static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
{
/* Sync with mlx5e_select_queue. */
WRITE_ONCE(priv->num_tc_x_num_ch,
priv->channels.params.num_tc * priv->channels.num);
} }
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{ {
mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv); mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels); mlx5e_activate_channels(&priv->channels);
mlx5e_xdp_tx_enable(priv); mlx5e_xdp_tx_enable(priv);
...@@ -3196,6 +3245,11 @@ int mlx5e_close(struct net_device *netdev) ...@@ -3196,6 +3245,11 @@ int mlx5e_close(struct net_device *netdev)
return err; return err;
} }
static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
{
mlx5_wq_destroy(&rq->wq_ctrl);
}
static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq, struct mlx5e_rq *rq,
struct mlx5e_rq_param *param) struct mlx5e_rq_param *param)
...@@ -3219,14 +3273,16 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, ...@@ -3219,14 +3273,16 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
return 0; return 0;
} }
static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq, struct mlx5e_cq *cq,
struct mlx5e_cq_param *param) struct mlx5e_cq_param *param)
{ {
struct mlx5_core_dev *mdev = priv->mdev;
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(mdev, param, cq); return mlx5e_alloc_cq_common(priv, param, cq);
} }
int mlx5e_open_drop_rq(struct mlx5e_priv *priv, int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
...@@ -3240,7 +3296,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, ...@@ -3240,7 +3296,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
mlx5e_build_drop_rq_param(priv, &rq_param); mlx5e_build_drop_rq_param(priv, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err) if (err)
return err; return err;
...@@ -3263,7 +3319,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, ...@@ -3263,7 +3319,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
return 0; return 0;
err_free_rq: err_free_rq:
mlx5e_free_rq(drop_rq); mlx5e_free_drop_rq(drop_rq);
err_destroy_cq: err_destroy_cq:
mlx5e_destroy_cq(cq); mlx5e_destroy_cq(cq);
...@@ -3277,7 +3333,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, ...@@ -3277,7 +3333,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{ {
mlx5e_destroy_rq(drop_rq); mlx5e_destroy_rq(drop_rq);
mlx5e_free_rq(drop_rq); mlx5e_free_drop_rq(drop_rq);
mlx5e_destroy_cq(&drop_rq->cq); mlx5e_destroy_cq(&drop_rq->cq);
mlx5e_free_cq(&drop_rq->cq); mlx5e_free_cq(&drop_rq->cq);
} }
...@@ -4231,6 +4287,20 @@ int mlx5e_get_vf_stats(struct net_device *dev, ...@@ -4231,6 +4287,20 @@ int mlx5e_get_vf_stats(struct net_device *dev,
} }
#endif #endif
static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
{
switch (proto_type) {
case IPPROTO_GRE:
return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
case IPPROTO_IPIP:
case IPPROTO_IPV6:
return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
default:
return false;
}
}
static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev, static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -4273,7 +4343,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, ...@@ -4273,7 +4343,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
break; break;
case IPPROTO_IPIP: case IPPROTO_IPIP:
case IPPROTO_IPV6: case IPPROTO_IPV6:
if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
return features; return features;
break; break;
case IPPROTO_UDP: case IPPROTO_UDP:
...@@ -4322,6 +4392,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) ...@@ -4322,6 +4392,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
{ {
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
tx_timeout_work); tx_timeout_work);
struct net_device *netdev = priv->netdev;
int i; int i;
rtnl_lock(); rtnl_lock();
...@@ -4330,9 +4401,9 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) ...@@ -4330,9 +4401,9 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock; goto unlock;
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue = struct netdev_queue *dev_queue =
netdev_get_tx_queue(priv->netdev, i); netdev_get_tx_queue(netdev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i]; struct mlx5e_txqsq *sq = priv->txq2sq[i];
if (!netif_xmit_stopped(dev_queue)) if (!netif_xmit_stopped(dev_queue))
...@@ -4392,7 +4463,7 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog ...@@ -4392,7 +4463,7 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog
struct bpf_prog *old_prog; struct bpf_prog *old_prog;
old_prog = rcu_replace_pointer(rq->xdp_prog, prog, old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
lockdep_is_held(&rq->channel->priv->state_lock)); lockdep_is_held(&rq->priv->state_lock));
if (old_prog) if (old_prog)
bpf_prog_put(old_prog); bpf_prog_put(old_prog);
} }
...@@ -4832,6 +4903,17 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv) ...@@ -4832,6 +4903,17 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
priv->netdev->udp_tunnel_nic_info = &priv->nic_info; priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
} }
static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
{
int tt;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt)))
return true;
}
return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
}
static void mlx5e_build_nic_netdev(struct net_device *netdev) static void mlx5e_build_nic_netdev(struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -4877,8 +4959,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4877,8 +4959,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_vxlan_set_netdev_info(priv); mlx5e_vxlan_set_netdev_info(priv);
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
mlx5e_any_tunnel_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6; netdev->hw_enc_features |= NETIF_F_TSO6;
...@@ -4895,7 +4976,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4895,7 +4976,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM; NETIF_F_GSO_UDP_TUNNEL_CSUM;
} }
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
netdev->hw_features |= NETIF_F_GSO_GRE | netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM; NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE | netdev->hw_enc_features |= NETIF_F_GSO_GRE |
...@@ -4904,7 +4985,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4904,7 +4985,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM; NETIF_F_GSO_GRE_CSUM;
} }
if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) { if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
netdev->hw_features |= NETIF_F_GSO_IPXIP4 | netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6; NETIF_F_GSO_IPXIP6;
netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
...@@ -5289,10 +5370,14 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, ...@@ -5289,10 +5370,14 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
void *ppriv) void *ppriv)
{ {
struct net_device *netdev; struct net_device *netdev;
unsigned int ptp_txqs = 0;
int err; int err;
if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
ptp_txqs = profile->max_tc;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc, nch * profile->max_tc + ptp_txqs,
nch * profile->rq_groups); nch * profile->rq_groups);
if (!netdev) { if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
......
...@@ -52,7 +52,6 @@ ...@@ -52,7 +52,6 @@
#include "en/xsk/rx.h" #include "en/xsk/rx.h"
#include "en/health.h" #include "en/health.h"
#include "en/params.h" #include "en/params.h"
#include "en/txrx.h"
static struct sk_buff * static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
...@@ -503,7 +502,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -503,7 +502,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe; struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
...@@ -670,13 +669,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -670,13 +669,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs; sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev, netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n", "Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe)); get_cqe_opcode(cqe));
mlx5e_dump_error_cqe(&sq->cq, sq->sqn, mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe); (struct mlx5_err_cqe *)cqe);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work); queue_work(cq->priv->wq, &sq->recover_work);
break; break;
} }
...@@ -697,7 +696,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -697,7 +696,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break; break;
#endif #endif
default: default:
netdev_WARN_ONCE(cq->channel->netdev, netdev_WARN_ONCE(cq->netdev,
"Bad WQE type in ICOSQ WQE info: 0x%x\n", "Bad WQE type in ICOSQ WQE info: 0x%x\n",
wi->wqe_type); wi->wqe_type);
} }
...@@ -713,9 +712,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -713,9 +712,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{ {
struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
u8 umr_completed = rq->mpwqe.umr_completed; u8 umr_completed = rq->mpwqe.umr_completed;
struct mlx5e_icosq *sq = rq->icosq;
int alloc_err = 0; int alloc_err = 0;
u8 missing, i; u8 missing, i;
u16 head; u16 head;
...@@ -1218,11 +1217,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -1218,11 +1217,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
struct mlx5e_priv *priv = rq->priv;
if (cqe_syndrome_needs_recover(err_cqe->syndrome) && if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
!test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
queue_work(rq->channel->priv->wq, &rq->recover_work); queue_work(priv->wq, &rq->recover_work);
} }
} }
...@@ -1771,8 +1771,9 @@ static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq ...@@ -1771,8 +1771,9 @@ static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{ {
struct net_device *netdev = rq->netdev;
struct mlx5_core_dev *mdev = rq->mdev; struct mlx5_core_dev *mdev = rq->mdev;
struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = rq->priv;
switch (rq->wq_type) { switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
...@@ -1784,15 +1785,15 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool ...@@ -1784,15 +1785,15 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes; rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) { if (MLX5_IPSEC_DEV(mdev)) {
netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
return -EINVAL; return -EINVAL;
} }
#endif #endif
if (!rq->handle_rx_cqe) { if (!rq->handle_rx_cqe) {
netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n"); netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL; return -EINVAL;
} }
break; break;
...@@ -1807,13 +1808,13 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool ...@@ -1807,13 +1808,13 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
c->priv->ipsec) priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else else
#endif #endif
rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) { if (!rq->handle_rx_cqe) {
netdev_err(c->netdev, "RX handler of RQ is not set\n"); netdev_err(netdev, "RX handler of RQ is not set\n");
return -EINVAL; return -EINVAL;
} }
} }
......
...@@ -248,24 +248,68 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) ...@@ -248,24 +248,68 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
return idx; return idx;
} }
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
struct mlx5e_xdpsq_stats *xdpsq_red_stats)
{ {
struct mlx5e_sw_stats *s = &priv->stats.sw; s->tx_xdp_xmit += xdpsq_red_stats->xmit;
int i; s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
}
memset(s, 0, sizeof(*s)); static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
struct mlx5e_xdpsq_stats *xdpsq_stats)
{
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
}
for (i = 0; i < priv->max_nch; i++) { static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
struct mlx5e_channel_stats *channel_stats = struct mlx5e_xdpsq_stats *xsksq_stats)
&priv->channel_stats[i]; {
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; s->tx_xsk_xmit += xsksq_stats->xmit;
struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq; s->tx_xsk_inlnw += xsksq_stats->inlnw;
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; s->tx_xsk_full += xsksq_stats->full;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; s->tx_xsk_err += xsksq_stats->err;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; s->tx_xsk_cqes += xsksq_stats->cqes;
int j; }
static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
struct mlx5e_rq_stats *xskrq_stats)
{
s->rx_xsk_packets += xskrq_stats->packets;
s->rx_xsk_bytes += xskrq_stats->bytes;
s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
s->rx_xsk_csum_none += xskrq_stats->csum_none;
s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
}
static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
struct mlx5e_rq_stats *rq_stats)
{
s->rx_packets += rq_stats->packets; s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_packets += rq_stats->lro_packets;
...@@ -280,13 +324,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) ...@@ -280,13 +324,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_redirect += rq_stats->xdp_redirect; s->rx_xdp_redirect += rq_stats->xdp_redirect;
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
s->rx_wqe_err += rq_stats->wqe_err; s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
...@@ -315,50 +352,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) ...@@ -315,50 +352,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
s->rx_tls_err += rq_stats->tls_err; s->rx_tls_err += rq_stats->tls_err;
#endif #endif
}
static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
struct mlx5e_ch_stats *ch_stats)
{
s->ch_events += ch_stats->events; s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll; s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm; s->ch_arm += ch_stats->arm;
s->ch_aff_change += ch_stats->aff_change; s->ch_aff_change += ch_stats->aff_change;
s->ch_force_irq += ch_stats->force_irq; s->ch_force_irq += ch_stats->force_irq;
s->ch_eq_rearm += ch_stats->eq_rearm; s->ch_eq_rearm += ch_stats->eq_rearm;
/* xdp redirect */ }
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
/* AF_XDP zero-copy */
s->rx_xsk_packets += xskrq_stats->packets;
s->rx_xsk_bytes += xskrq_stats->bytes;
s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
s->rx_xsk_csum_none += xskrq_stats->csum_none;
s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
s->tx_xsk_xmit += xsksq_stats->xmit;
s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
s->tx_xsk_inlnw += xsksq_stats->inlnw;
s->tx_xsk_full += xsksq_stats->full;
s->tx_xsk_err += xsksq_stats->err;
s->tx_xsk_cqes += xsksq_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
struct mlx5e_sq_stats *sq_stats)
{
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
s->tx_tso_packets += sq_stats->tso_packets; s->tx_tso_packets += sq_stats->tso_packets;
...@@ -391,11 +400,55 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) ...@@ -391,11 +400,55 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
#endif #endif
s->tx_cqes += sq_stats->cqes; s->tx_cqes += sq_stats->cqes;
}
static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
struct mlx5e_sw_stats *s)
{
int i;
if (!priv->port_ptp_opened)
return;
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
for (i = 0; i < priv->max_opened_tc; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier();
}
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
memset(s, 0, sizeof(*s));
for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
/* xdp redirect */
mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
/* AF_XDP zero-copy */
mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
for (j = 0; j < priv->max_opened_tc; j++) {
mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier(); barrier();
} }
} }
mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
} }
static const struct counter_desc q_stats_desc[] = { static const struct counter_desc q_stats_desc[] = {
...@@ -1656,6 +1709,37 @@ static const struct counter_desc ch_stats_desc[] = { ...@@ -1656,6 +1709,37 @@ static const struct counter_desc ch_stats_desc[] = {
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
}; };
static const struct counter_desc ptp_sq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
static const struct counter_desc ptp_ch_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
};
static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
};
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
...@@ -1663,6 +1747,69 @@ static const struct counter_desc ch_stats_desc[] = { ...@@ -1663,6 +1747,69 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
{
return priv->port_ptp_opened ?
NUM_PTP_CH_STATS +
((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
0;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
{
int i, tc;
if (!priv->port_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_ch_stats_desc[i].format);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_sq_stats_desc[i].format, tc);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_cq_stats_desc[i].format, tc);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
{
int i, tc;
if (!priv->port_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
ptp_ch_stats_desc, i);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
ptp_sq_stats_desc, i);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
ptp_cq_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{ {
...@@ -1784,6 +1931,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0); ...@@ -1784,6 +1931,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0); MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0); static MLX5E_DEFINE_STATS_GRP(tls, 0);
static MLX5E_DEFINE_STATS_GRP(ptp, 0);
/* The stats groups order is opposite to the update_stats() order calls */ /* The stats groups order is opposite to the update_stats() order calls */
mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
...@@ -1806,6 +1954,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { ...@@ -1806,6 +1954,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(tls), &MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest), &MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
}; };
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
......
...@@ -51,6 +51,10 @@ ...@@ -51,6 +51,10 @@
#define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
struct counter_desc { struct counter_desc {
char format[ETH_GSTRING_LEN]; char format[ETH_GSTRING_LEN];
size_t offset; /* Byte offset */ size_t offset; /* Byte offset */
...@@ -398,6 +402,13 @@ struct mlx5e_ch_stats { ...@@ -398,6 +402,13 @@ struct mlx5e_ch_stats {
u64 eq_rearm; u64 eq_rearm;
}; };
struct mlx5e_ptp_cq_stats {
u64 cqe;
u64 err_cqe;
u64 abort;
u64 abort_abs_diff_ns;
};
struct mlx5e_stats { struct mlx5e_stats {
struct mlx5e_sw_stats sw; struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt; struct mlx5e_qcounter_stats qcnt;
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/ptp_classify.h>
#include <net/geneve.h> #include <net/geneve.h>
#include <net/dsfield.h> #include <net/dsfield.h>
#include "en.h" #include "en.h"
...@@ -39,6 +40,7 @@ ...@@ -39,6 +40,7 @@
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{ {
...@@ -66,14 +68,73 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb ...@@ -66,14 +68,73 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
} }
#endif #endif
static bool mlx5e_use_ptpsq(struct sk_buff *skb)
{
struct flow_keys fk;
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return false;
if (fk.basic.n_proto == htons(ETH_P_1588))
return true;
if (fk.basic.n_proto != htons(ETH_P_IP) &&
fk.basic.n_proto != htons(ETH_P_IPV6))
return false;
return (fk.basic.ip_proto == IPPROTO_UDP &&
fk.ports.dst == htons(PTP_EV_PORT));
}
static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int up = 0;
if (!netdev_get_num_tc(dev))
goto return_txq;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
up = mlx5e_get_dscp_up(priv, skb);
else
#endif
if (skb_vlan_tag_present(skb))
up = skb_vlan_tag_get_prio(skb);
return_txq:
return priv->port_ptp_tc2realtxq[up];
}
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev) struct net_device *sb_dev)
{ {
int txq_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int txq_ix;
int up = 0; int up = 0;
int ch_ix; int ch_ix;
if (unlikely(priv->channels.port_ptp)) {
int num_tc_x_num_ch;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
mlx5e_use_ptpsq(skb))
return mlx5e_select_ptpsq(dev, skb);
/* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
txq_ix = netdev_pick_tx(dev, skb, NULL);
/* Fix netdev_pick_tx() not to choose ptp_channel txqs.
* If they are selected, switch to regular queues.
* Driver to select these queues only at mlx5e_select_ptpsq().
*/
if (unlikely(txq_ix >= num_tc_x_num_ch))
txq_ix %= num_tc_x_num_ch;
} else {
txq_ix = netdev_pick_tx(dev, skb, NULL);
}
if (!netdev_get_num_tc(dev)) if (!netdev_get_num_tc(dev))
return txq_ix; return txq_ix;
...@@ -402,6 +463,12 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -402,6 +463,12 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_check_stop(sq); mlx5e_tx_check_stop(sq);
if (unlikely(sq->ptpsq)) {
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
skb_get(skb);
}
send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more); send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
if (send_doorbell) if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
...@@ -579,7 +646,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -579,7 +646,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_unmap; goto err_unmap;
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
mlx5e_skb_fifo_push(sq, skb); mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd); mlx5e_tx_mpwqe_add_dseg(sq, &txd);
...@@ -707,6 +774,10 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -707,6 +774,10 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u64 ts = get_cqe_ts(cqe); u64 ts = get_cqe_ts(cqe);
hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts); hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
if (sq->ptpsq)
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
hwts.hwtstamp, sq->ptpsq->cq_stats);
else
skb_tstamp_tx(skb, &hwts); skb_tstamp_tx(skb, &hwts);
} }
...@@ -719,7 +790,7 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t ...@@ -719,7 +790,7 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
int i; int i;
for (i = 0; i < wi->num_fifo_pkts; i++) { for (i = 0; i < wi->num_fifo_pkts; i++) {
struct sk_buff *skb = mlx5e_skb_fifo_pop(sq); struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
mlx5e_consume_skb(sq, skb, cqe, napi_budget); mlx5e_consume_skb(sq, skb, cqe, napi_budget);
} }
...@@ -805,8 +876,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -805,8 +876,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(&sq->cq, sq->sqn, mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe); (struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq, queue_work(cq->priv->wq, &sq->recover_work);
&sq->recover_work);
} }
stats->cqe_err++; stats->cqe_err++;
} }
...@@ -840,7 +910,7 @@ static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_ ...@@ -840,7 +910,7 @@ static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_
int i; int i;
for (i = 0; i < wi->num_fifo_pkts; i++) for (i = 0; i < wi->num_fifo_pkts; i++)
dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq)); dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
} }
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
......
...@@ -221,14 +221,13 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) ...@@ -221,14 +221,13 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
napi_schedule(cq->napi); napi_schedule(cq->napi);
cq->event_ctr++; cq->event_ctr++;
cq->channel->stats->events++; cq->ch_stats->events++;
} }
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{ {
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
struct mlx5e_channel *c = cq->channel; struct net_device *netdev = cq->netdev;
struct net_device *netdev = c->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event); __func__, mcq->cqn, event);
......
...@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, ...@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
eqe = next_eqe_sw(eq); eqe = next_eqe_sw(eq);
if (!eqe) if (!eqe)
goto out; return 0;
do { do {
struct mlx5_core_cq *cq; struct mlx5_core_cq *cq;
...@@ -161,8 +161,6 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, ...@@ -161,8 +161,6 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index; ++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
out:
eq_update_ci(eq, 1); eq_update_ci(eq, 1);
if (cqn != -1) if (cqn != -1)
...@@ -250,9 +248,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb, ...@@ -250,9 +248,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index; ++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
eq_update_ci(eq, 1);
out: out:
eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags); mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(recovery) ? num_eqes : 0; return unlikely(recovery) ? num_eqes : 0;
......
...@@ -101,7 +101,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -101,7 +101,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
vport->egress.acl = esw_acl_table_create(esw, vport->vport, vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size); table_size);
if (IS_ERR_OR_NULL(vport->egress.acl)) { if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl); err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL; vport->egress.acl = NULL;
goto out; goto out;
......
...@@ -173,7 +173,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport ...@@ -173,7 +173,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++; table_size++;
vport->egress.acl = esw_acl_table_create(esw, vport->vport, vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
if (IS_ERR_OR_NULL(vport->egress.acl)) { if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl); err = PTR_ERR(vport->egress.acl);
vport->egress.acl = NULL; vport->egress.acl = NULL;
return err; return err;
......
...@@ -180,7 +180,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -180,7 +180,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
vport->ingress.acl = esw_acl_table_create(esw, vport->vport, vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size); table_size);
if (IS_ERR_OR_NULL(vport->ingress.acl)) { if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl); err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL; vport->ingress.acl = NULL;
return err; return err;
......
...@@ -258,7 +258,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, ...@@ -258,7 +258,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
vport->ingress.acl = esw_acl_table_create(esw, vport->vport, vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes); num_ftes);
if (IS_ERR_OR_NULL(vport->ingress.acl)) { if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl); err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL; vport->ingress.acl = NULL;
return err; return err;
......
...@@ -1680,7 +1680,6 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) ...@@ -1680,7 +1680,6 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
goto out_free; goto out_free;
} }
memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria); match_criteria);
misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment