Commit b80b8d7a authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5e-xdp'

Tariq Toukan says:

====================
mlx5e XDP support

This series adds XDP support in mlx5e driver.
This includes the use cases: XDP_DROP, XDP_PASS, and XDP_TX.

Single stream performance tests show 16.5 Mpps for XDP_DROP,
and 12.4 Mpps for XDP_TX, with nice scalability for multiple streams/rings.

This rate of XDP_DROP is lower than the 32 Mpps we got in previous
implementation, when Striding RQ was used.

We moved to non-Striding RQ, as some XDP_TX requirements (like headroom,
packet-per-page) cannot be satisfied with the current Striding RQ HW,
and we decided to fully support both DROP/TX.

Few directions are considered in order to enable the faster rate for XDP_DROP,
e.g a possibility for users to enable Striding RQ so they choose optimized
XDP_DROP on the price of partial XDP_TX functionality, or some HW changes.

Series generated against net-next commit:
cf714ac1 'ipvlan: Fix dependency issue'

Thanks,
Tariq

V2:
* patch 8:
 - when XDP_TX fails, call mlx5e_page_release and drop the packet.
 - update xdp_tx counter within mlx5e_xmit_xdp_frame.
   (mlx5e_xmit_xdp_frame return value becomes obsolete, change it to void)
 - drop the packet for unknown XDP return code.
* patch 9:
 - use a boolean for xdp_doorbell in SQ struct, instead of dragging it
   throughout the functions calls.
 - handle doorbell and counters within mlx5e_xmit_xdp_frame.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f9616c35 35b510e2
...@@ -65,6 +65,8 @@ ...@@ -65,6 +65,8 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
#define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ #define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */ #define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
#define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_LOG_WQE_SZ 18
...@@ -99,6 +101,18 @@ ...@@ -99,6 +101,18 @@
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16 #define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_ICOSQ_MAX_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_IHS_DS_COUNT \
DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT \
(MLX5E_XDP_IHS_DS_COUNT + \
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
#define MLX5E_XDP_TX_WQEBBS \
DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
#define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_NUM_MAIN_GROUPS 9
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
...@@ -302,10 +316,20 @@ struct mlx5e_page_cache { ...@@ -302,10 +316,20 @@ struct mlx5e_page_cache {
struct mlx5e_rq { struct mlx5e_rq {
/* data path */ /* data path */
struct mlx5_wq_ll wq; struct mlx5_wq_ll wq;
u32 wqe_sz;
struct sk_buff **skb; union {
struct mlx5e_mpw_info *wqe_info; struct mlx5e_dma_info *dma_info;
void *mtt_no_align; struct {
struct mlx5e_mpw_info *info;
void *mtt_no_align;
u32 mtt_offset;
} mpwqe;
};
struct {
u8 page_order;
u32 wqe_sz; /* wqe data buffer size */
u8 map_dir; /* dma map direction */
} buff;
__be32 mkey_be; __be32 mkey_be;
struct device *pdev; struct device *pdev;
...@@ -321,9 +345,9 @@ struct mlx5e_rq { ...@@ -321,9 +345,9 @@ struct mlx5e_rq {
unsigned long state; unsigned long state;
int ix; int ix;
u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */ struct mlx5e_rx_am am; /* Adaptive Moderation */
struct bpf_prog *xdp_prog;
/* control */ /* control */
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
...@@ -370,11 +394,17 @@ enum { ...@@ -370,11 +394,17 @@ enum {
MLX5E_SQ_STATE_BF_ENABLE, MLX5E_SQ_STATE_BF_ENABLE,
}; };
struct mlx5e_ico_wqe_info { struct mlx5e_sq_wqe_info {
u8 opcode; u8 opcode;
u8 num_wqebbs; u8 num_wqebbs;
}; };
enum mlx5e_sq_type {
MLX5E_SQ_TXQ,
MLX5E_SQ_ICO,
MLX5E_SQ_XDP
};
struct mlx5e_sq { struct mlx5e_sq {
/* data path */ /* data path */
...@@ -392,10 +422,20 @@ struct mlx5e_sq { ...@@ -392,10 +422,20 @@ struct mlx5e_sq {
struct mlx5e_cq cq; struct mlx5e_cq cq;
/* pointers to per packet info: write@xmit, read@completion */ /* pointers to per tx element info: write@xmit, read@completion */
struct sk_buff **skb; union {
struct mlx5e_sq_dma *dma_fifo; struct {
struct mlx5e_tx_wqe_info *wqe_info; struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} txq;
struct mlx5e_sq_wqe_info *ico_wqe;
struct {
struct mlx5e_sq_wqe_info *wqe_info;
struct mlx5e_dma_info *di;
bool doorbell;
} xdp;
} db;
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
...@@ -417,8 +457,8 @@ struct mlx5e_sq { ...@@ -417,8 +457,8 @@ struct mlx5e_sq {
struct mlx5_uar uar; struct mlx5_uar uar;
struct mlx5e_channel *channel; struct mlx5e_channel *channel;
int tc; int tc;
struct mlx5e_ico_wqe_info *ico_wqe_info;
u32 rate_limit; u32 rate_limit;
u8 type;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
...@@ -434,8 +474,10 @@ enum channel_flags { ...@@ -434,8 +474,10 @@ enum channel_flags {
struct mlx5e_channel { struct mlx5e_channel {
/* data path */ /* data path */
struct mlx5e_rq rq; struct mlx5e_rq rq;
struct mlx5e_sq xdp_sq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_sq icosq; /* internal control operations */ struct mlx5e_sq icosq; /* internal control operations */
bool xdp;
struct napi_struct napi; struct napi_struct napi;
struct device *pdev; struct device *pdev;
struct net_device *netdev; struct net_device *netdev;
...@@ -617,6 +659,7 @@ struct mlx5e_priv { ...@@ -617,6 +659,7 @@ struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map; struct mlx5e_sq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
struct bpf_prog *xdp_prog;
/* priv data path fields - end */ /* priv data path fields - end */
unsigned long state; unsigned long state;
...@@ -663,7 +706,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); ...@@ -663,7 +706,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget); int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_tx_descs(struct mlx5e_sq *sq); void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle); bool recycle);
...@@ -764,7 +807,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) ...@@ -764,7 +807,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{ {
return rq->mpwqe_mtt_offset + return rq->mpwqe.mtt_offset +
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
} }
......
...@@ -65,6 +65,9 @@ struct mlx5e_sw_stats { ...@@ -65,6 +65,9 @@ struct mlx5e_sw_stats {
u64 rx_csum_none; u64 rx_csum_none;
u64 rx_csum_complete; u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner; u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
u64 rx_xdp_tx;
u64 rx_xdp_tx_full;
u64 tx_csum_partial; u64 tx_csum_partial;
u64 tx_csum_partial_inner; u64 tx_csum_partial_inner;
u64 tx_queue_stopped; u64 tx_queue_stopped;
...@@ -100,6 +103,9 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -100,6 +103,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
...@@ -278,6 +284,9 @@ struct mlx5e_rq_stats { ...@@ -278,6 +284,9 @@ struct mlx5e_rq_stats {
u64 csum_none; u64 csum_none;
u64 lro_packets; u64 lro_packets;
u64 lro_bytes; u64 lro_bytes;
u64 xdp_drop;
u64 xdp_tx;
u64 xdp_tx_full;
u64 wqe_err; u64 wqe_err;
u64 mpwqe_filler; u64 mpwqe_filler;
u64 buff_alloc_err; u64 buff_alloc_err;
...@@ -295,6 +304,9 @@ static const struct counter_desc rq_stats_desc[] = { ...@@ -295,6 +304,9 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
......
...@@ -52,7 +52,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) ...@@ -52,7 +52,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
sq->skb[pi] = NULL;
sq->pc++; sq->pc++;
sq->stats.nop++; sq->stats.nop++;
...@@ -82,15 +81,17 @@ static inline void mlx5e_dma_push(struct mlx5e_sq *sq, ...@@ -82,15 +81,17 @@ static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
u32 size, u32 size,
enum mlx5e_dma_map_type map_type) enum mlx5e_dma_map_type map_type)
{ {
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type; sq->db.txq.dma_fifo[i].addr = addr;
sq->db.txq.dma_fifo[i].size = size;
sq->db.txq.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++; sq->dma_fifo_pc++;
} }
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
{ {
return &sq->dma_fifo[i & sq->dma_fifo_mask]; return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
} }
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma) static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
...@@ -221,7 +222,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -221,7 +222,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u16 pi = sq->pc & wq->sz_m1; u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi]; struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
...@@ -341,7 +342,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -341,7 +342,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->skb[pi] = skb; sq->db.txq.skb[pi] = skb;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += wi->num_wqebbs; sq->pc += wi->num_wqebbs;
...@@ -368,8 +369,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -368,8 +369,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
} }
/* fill sq edge with nops to avoid wqe wrap around */ /* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge) while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.txq.skb[pi] = NULL;
mlx5e_send_nop(sq, false); mlx5e_send_nop(sq, false);
}
if (bf) if (bf)
sq->bf_budget--; sq->bf_budget--;
...@@ -442,8 +445,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -442,8 +445,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter); last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1; ci = sqcc & sq->wq.sz_m1;
skb = sq->skb[ci]; skb = sq->db.txq.skb[ci];
wi = &sq->wqe_info[ci]; wi = &sq->db.txq.wqe_info[ci];
if (unlikely(!skb)) { /* nop */ if (unlikely(!skb)) { /* nop */
sqcc++; sqcc++;
...@@ -492,7 +495,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -492,7 +495,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET); return (i == MLX5E_TX_CQ_POLL_BUDGET);
} }
void mlx5e_free_tx_descs(struct mlx5e_sq *sq) static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
{ {
struct mlx5e_tx_wqe_info *wi; struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -501,8 +504,8 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq) ...@@ -501,8 +504,8 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) { while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1; ci = sq->cc & sq->wq.sz_m1;
skb = sq->skb[ci]; skb = sq->db.txq.skb[ci];
wi = &sq->wqe_info[ci]; wi = &sq->db.txq.wqe_info[ci];
if (!skb) { /* nop */ if (!skb) { /* nop */
sq->cc++; sq->cc++;
...@@ -520,3 +523,37 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq) ...@@ -520,3 +523,37 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
sq->cc += wi->num_wqebbs; sq->cc += wi->num_wqebbs;
} }
} }
static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
{
struct mlx5e_sq_wqe_info *wi;
struct mlx5e_dma_info *di;
u16 ci;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
wi = &sq->db.xdp.wqe_info[ci];
if (wi->opcode == MLX5_OPCODE_NOP) {
sq->cc++;
continue;
}
sq->cc += wi->num_wqebbs;
mlx5e_page_release(&sq->channel->rq, di, false);
}
}
void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
{
switch (sq->type) {
case MLX5E_SQ_TXQ:
mlx5e_free_txq_sq_descs(sq);
break;
case MLX5E_SQ_XDP:
mlx5e_free_xdp_sq_descs(sq);
break;
}
}
...@@ -72,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -72,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
do { do {
u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci]; struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(&cq->wq);
sqcc += icowi->num_wqebbs; sqcc += icowi->num_wqebbs;
...@@ -105,6 +105,66 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -105,6 +105,66 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc; sq->cc = sqcc;
} }
static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
{
struct mlx5e_sq *sq;
u16 sqcc;
int i;
sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return false;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
sqcc = sq->cc;
for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
struct mlx5_cqe64 *cqe;
u16 wqe_counter;
bool last_wqe;
cqe = mlx5e_get_cqe(cq);
if (!cqe)
break;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
struct mlx5e_sq_wqe_info *wi;
struct mlx5e_dma_info *di;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
wi = &sq->db.xdp.wqe_info[ci];
if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
sqcc++;
continue;
}
sqcc += wi->num_wqebbs;
/* Recycle RX page */
mlx5e_page_release(&sq->channel->rq, di, true);
} while (!last_wqe);
}
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
wmb();
sq->cc = sqcc;
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
int mlx5e_napi_poll(struct napi_struct *napi, int budget) int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{ {
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
...@@ -121,6 +181,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -121,6 +181,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget; busy |= work_done == budget;
if (c->xdp)
busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
mlx5e_poll_ico_cq(&c->icosq.cq); mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq); busy |= mlx5e_post_rx_wqes(&c->rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment