Commit 98b16349 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net/mlx4_en: Align tx path structures to cache lines

Reorganize struct mlx4_en_tx_ring to have:
- One cache line containing last_nr_txbb & cons & wake_queue, used by tx
  completion.
- One cache line containing fields dirtied by mlx4_en_xmit()
- Following part is read mostly and shared by cpus.

Align struct mlx4_en_tx_info to a cache line
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7dfa4b41
...@@ -222,7 +222,7 @@ struct mlx4_en_tx_info { ...@@ -222,7 +222,7 @@ struct mlx4_en_tx_info {
u8 data_offset; u8 data_offset;
u8 inl; u8 inl;
u8 ts_requested; u8 ts_requested;
}; } ____cacheline_aligned_in_smp;
#define MLX4_EN_BIT_DESC_OWN 0x80000000 #define MLX4_EN_BIT_DESC_OWN 0x80000000
...@@ -253,40 +253,46 @@ struct mlx4_en_rx_alloc { ...@@ -253,40 +253,46 @@ struct mlx4_en_rx_alloc {
}; };
struct mlx4_en_tx_ring { struct mlx4_en_tx_ring {
/* cache line used and dirtied in tx completion
* (mlx4_en_free_tx_buf())
*/
u32 last_nr_txbb;
u32 cons;
unsigned long wake_queue;
/* cache line used and dirtied in mlx4_en_xmit() */
u32 prod ____cacheline_aligned_in_smp;
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
struct mlx4_bf bf;
unsigned long queue_stopped;
/* Following part should be mostly read */
cpumask_t affinity_mask;
struct mlx4_qp qp;
struct mlx4_hwq_resources wqres; struct mlx4_hwq_resources wqres;
u32 size ; /* number of TXBBs */ u32 size; /* number of TXBBs */
u32 size_mask; u32 size_mask;
u16 stride; u16 stride;
u16 cqn; /* index of port CQ associated with this ring */ u16 cqn; /* index of port CQ associated with this ring */
u32 prod;
u32 cons;
u32 buf_size; u32 buf_size;
u32 doorbell_qpn; u32 doorbell_qpn;
void *buf; void *buf;
struct mlx4_en_tx_info *tx_info; struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf; u8 *bounce_buf;
u8 queue_index;
cpumask_t affinity_mask;
u32 last_nr_txbb;
struct mlx4_qp qp;
struct mlx4_qp_context context; struct mlx4_qp_context context;
int qpn; int qpn;
enum mlx4_qp_state qp_state; enum mlx4_qp_state qp_state;
struct mlx4_srq dummy; u8 queue_index;
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tso_packets;
unsigned long xmit_more;
struct mlx4_bf bf;
bool bf_enabled; bool bf_enabled;
bool bf_alloced; bool bf_alloced;
struct netdev_queue *tx_queue; struct netdev_queue *tx_queue;
int hwtstamp_tx_type; int hwtstamp_tx_type;
int inline_thold; int inline_thold;
}; } ____cacheline_aligned_in_smp;
struct mlx4_en_rx_desc { struct mlx4_en_rx_desc {
/* actual number of entries depends on rx ring stride */ /* actual number of entries depends on rx ring stride */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment