Commit 58a51894 authored by Aya Levin's avatar Aya Levin Committed by Saeed Mahameed

net/mlx5e: Add resiliency for PTP TX port timestamp

PTP TX port timestamp relies on receiving 2 CQEs for each outgoing
packet (WQE). The regular CQE has a less accurate timestamp than the
wire CQE. On link change, the wire CQE may get lost. Let the driver
detect and restore the relation between the CQEs, and re-sync after
timeout.

Add resiliency for this as follows: add id (producer counter)
into the WQE's metadata. This id will be received in the wire
CQE (in wqe_counter field). On handling the wire CQE, if there is no
match, replay the PTP application with the time-stamp from the regular
CQE and restore the sync between the CQEs and their SKBs. This patch
adds 2 ptp counters:
1) ptp_cq0_resync_event: number of times a mismatch was detected between
   the regular CQE and the wire CQE.
2) ptp_cq0_resync_cqe: total amount of missing wire CQEs.
Signed-off-by: default avatarAya Levin <ayal@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 2e5e4185
...@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, ...@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
} }
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
{
return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
}
static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
{
struct skb_shared_hwtstamps hwts = {};
struct sk_buff *skb;
ptpsq->cq_stats->resync_event++;
while (skb_cc != skb_id) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
skb_tstamp_tx(skb, &hwts);
ptpsq->cq_stats->resync_cqe++;
skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
}
}
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
int budget) int budget)
{ {
struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq; struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp; ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ptpsq->cq_stats->err_cqe++; ptpsq->cq_stats->err_cqe++;
goto out; goto out;
} }
if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats); hwtstamp, ptpsq->cq_stats);
...@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) ...@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq); int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)), ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa); GFP_KERNEL, numa);
...@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) ...@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc; ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc; ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1; ptpsq->skb_fifo.mask = wq_sz - 1;
if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
ptpsq->ts_cqe_ctr_mask =
(1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0; return 0;
} }
......
...@@ -17,6 +17,7 @@ struct mlx5e_ptpsq { ...@@ -17,6 +17,7 @@ struct mlx5e_ptpsq {
u16 skb_fifo_pc; u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo; struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats; struct mlx5e_ptp_cq_stats *cq_stats;
u16 ts_cqe_ctr_mask;
}; };
enum { enum {
......
...@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = { ...@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
}; };
static const struct counter_desc ptp_rq_stats_desc[] = { static const struct counter_desc ptp_rq_stats_desc[] = {
......
...@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats { ...@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe; u64 err_cqe;
u64 abort; u64 abort;
u64 abort_abs_diff_ns; u64 abort_abs_diff_ns;
u64 resync_cqe;
u64 resync_event;
}; };
struct mlx5e_stats { struct mlx5e_stats {
......
...@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) ...@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq); mlx5e_tx_mpwqe_session_complete(sq);
} }
static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
ptpsq->ts_cqe_ctr_mask);
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs) struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs); mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
} }
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment