Commit 34802a42 authored by Achiad Shochat's avatar Achiad Shochat Committed by David S. Miller

net/mlx5e: Do not modify the TX SKB

If the SKB is cloned, or has an elevated users count, someone else
can be looking at it at the same time.
Signed-off-by: default avatarAchiad Shochat <achiad@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 33c15297
...@@ -328,14 +328,12 @@ struct mlx5e_rq { ...@@ -328,14 +328,12 @@ struct mlx5e_rq {
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_tx_skb_cb { struct mlx5e_tx_wqe_info {
u32 num_bytes; u32 num_bytes;
u8 num_wqebbs; u8 num_wqebbs;
u8 num_dma; u8 num_dma;
}; };
#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
enum mlx5e_dma_map_type { enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE, MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE MLX5E_DMA_MAP_PAGE
...@@ -371,6 +369,7 @@ struct mlx5e_sq { ...@@ -371,6 +369,7 @@ struct mlx5e_sq {
/* pointers to per packet info: write@xmit, read@completion */ /* pointers to per packet info: write@xmit, read@completion */
struct sk_buff **skb; struct sk_buff **skb;
struct mlx5e_sq_dma *dma_fifo; struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
......
...@@ -507,6 +507,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) ...@@ -507,6 +507,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_sq_db(struct mlx5e_sq *sq) static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
{ {
kfree(sq->wqe_info);
kfree(sq->dma_fifo); kfree(sq->dma_fifo);
kfree(sq->skb); kfree(sq->skb);
} }
...@@ -519,8 +520,10 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) ...@@ -519,8 +520,10 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa); sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL, sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
numa); numa);
sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
numa);
if (!sq->skb || !sq->dma_fifo) { if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
mlx5e_free_sq_db(sq); mlx5e_free_sq_db(sq);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -92,11 +92,11 @@ static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) ...@@ -92,11 +92,11 @@ static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
return &sq->dma_fifo[i & sq->dma_fifo_mask]; return &sq->dma_fifo[i & sq->dma_fifo_mask];
} }
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
{ {
int i; int i;
for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { for (i = 0; i < num_dma; i++) {
struct mlx5e_sq_dma *last_pushed_dma = struct mlx5e_sq_dma *last_pushed_dma =
mlx5e_dma_get(sq, --sq->dma_fifo_pc); mlx5e_dma_get(sq, --sq->dma_fifo_pc);
...@@ -139,19 +139,28 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, ...@@ -139,19 +139,28 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return MLX5E_MIN_INLINE; return MLX5E_MIN_INLINE;
} }
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
unsigned int *skb_len,
unsigned int len)
{
*skb_len -= len;
*skb_data += len;
}
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
unsigned char **skb_data,
unsigned int *skb_len)
{ {
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
int cpy1_sz = 2 * ETH_ALEN; int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz; int cpy2_sz = ihs - cpy1_sz;
skb_copy_from_linear_data(skb, vhdr, cpy1_sz); memcpy(vhdr, *skb_data, cpy1_sz);
skb_pull_inline(skb, cpy1_sz); mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto, memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
cpy2_sz); mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
skb_pull_inline(skb, cpy2_sz);
} }
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
...@@ -160,11 +169,14 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -160,11 +169,14 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u16 pi = sq->pc & wq->sz_m1; u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg; struct mlx5_wqe_data_seg *dseg;
unsigned char *skb_data = skb->data;
unsigned int skb_len = skb->len;
u8 opcode = MLX5_OPCODE_SEND; u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0; dma_addr_t dma_addr = 0;
bool bf = false; bool bf = false;
...@@ -192,7 +204,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -192,7 +204,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
opcode = MLX5_OPCODE_LSO; opcode = MLX5_OPCODE_LSO;
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload_len = skb->len - ihs; payload_len = skb->len - ihs;
MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + wi->num_bytes = skb->len +
(skb_shinfo(skb)->gso_segs - 1) * ihs; (skb_shinfo(skb)->gso_segs - 1) * ihs;
sq->stats.tso_packets++; sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len; sq->stats.tso_bytes += payload_len;
...@@ -201,16 +213,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -201,16 +213,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
!skb->xmit_more && !skb->xmit_more &&
!skb_shinfo(skb)->nr_frags; !skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ETH_ZLEN);
} }
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs); mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
&skb_len);
ihs += VLAN_HLEN; ihs += VLAN_HLEN;
} else { } else {
skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs); memcpy(eseg->inline_hdr_start, skb_data, ihs);
skb_pull_inline(skb, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
} }
eseg->inline_hdr_sz = cpu_to_be16(ihs); eseg->inline_hdr_sz = cpu_to_be16(ihs);
...@@ -220,11 +232,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -220,11 +232,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
MLX5_SEND_WQE_DS); MLX5_SEND_WQE_DS);
dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
MLX5E_TX_SKB_CB(skb)->num_dma = 0; wi->num_dma = 0;
headlen = skb_headlen(skb); headlen = skb_len - skb->data_len;
if (headlen) { if (headlen) {
dma_addr = dma_map_single(sq->pdev, skb->data, headlen, dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
goto dma_unmap_wqe_err; goto dma_unmap_wqe_err;
...@@ -234,7 +246,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -234,7 +246,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->byte_count = cpu_to_be32(headlen); dseg->byte_count = cpu_to_be32(headlen);
mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
MLX5E_TX_SKB_CB(skb)->num_dma++; wi->num_dma++;
dseg++; dseg++;
} }
...@@ -253,23 +265,22 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -253,23 +265,22 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->byte_count = cpu_to_be32(fsz); dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
MLX5E_TX_SKB_CB(skb)->num_dma++; wi->num_dma++;
dseg++; dseg++;
} }
ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma; ds_cnt += wi->num_dma;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->skb[pi] = skb; sq->skb[pi] = skb;
MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt, wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
MLX5_SEND_WQEBB_NUM_DS); sq->pc += wi->num_wqebbs;
sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); netdev_tx_sent_queue(sq->txq, wi->num_bytes);
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq); netif_tx_stop_queue(sq->txq);
...@@ -280,7 +291,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -280,7 +291,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
int bf_sz = 0; int bf_sz = 0;
if (bf && sq->uar_bf_map) if (bf && sq->uar_bf_map)
bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3; bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe, bf_sz); mlx5e_tx_notify_hw(sq, wqe, bf_sz);
...@@ -297,7 +308,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -297,7 +308,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dma_unmap_wqe_err: dma_unmap_wqe_err:
sq->stats.dropped++; sq->stats.dropped++;
mlx5e_dma_unmap_wqe_err(sq, skb); mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -352,6 +363,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -352,6 +363,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter); wqe_counter = be16_to_cpu(cqe->wqe_counter);
do { do {
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb; struct sk_buff *skb;
u16 ci; u16 ci;
int j; int j;
...@@ -360,6 +372,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -360,6 +372,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
ci = sqcc & sq->wq.sz_m1; ci = sqcc & sq->wq.sz_m1;
skb = sq->skb[ci]; skb = sq->skb[ci];
wi = &sq->wqe_info[ci];
if (unlikely(!skb)) { /* nop */ if (unlikely(!skb)) { /* nop */
sq->stats.nop++; sq->stats.nop++;
...@@ -367,7 +380,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -367,7 +380,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
continue; continue;
} }
for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { for (j = 0; j < wi->num_dma; j++) {
struct mlx5e_sq_dma *dma = struct mlx5e_sq_dma *dma =
mlx5e_dma_get(sq, dma_fifo_cc++); mlx5e_dma_get(sq, dma_fifo_cc++);
...@@ -375,8 +388,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -375,8 +388,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
} }
npkts++; npkts++;
nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; nbytes += wi->num_bytes;
sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; sqcc += wi->num_wqebbs;
dev_kfree_skb(skb); dev_kfree_skb(skb);
} while (!last_wqe); } while (!last_wqe);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment