Commit ee4576fc authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2019-01-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-01-25

This series provides some updates to mlx5 driver,

From Tariq,
1) Make sure RX packet header does not cross page boundary
   To avoid page boundary crossing, use stride size that fits
   the maximum possible header. Stride is increased form 64B to 256B.

2) CQ struct cleanup: Take CQ decompress fields into a separate structure

From Moshe,
3) Expand XPS cpumask to cover all online cpus

From Jason Gunthorpe and Tariq:
4) Compilation warning cleanup

From Or,
5) Add trace points for flow tables create/destroy

From Saeed,
6) Software stats update/folding improvements
   this also solves a compilation warning on 32bit systems that was reported
   last release cycle by Arnd and Andrew.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 30e5c2c6 b832d4fd
...@@ -258,6 +258,8 @@ const char *parse_fs_dst(struct trace_seq *p, ...@@ -258,6 +258,8 @@ const char *parse_fs_dst(struct trace_seq *p,
return ret; return ret;
} }
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_ft);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_ft);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_fg); EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fg); EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_set_fte); EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_set_fte);
......
...@@ -61,6 +61,41 @@ const char *parse_fs_dst(struct trace_seq *p, ...@@ -61,6 +61,41 @@ const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst, const struct mlx5_flow_destination *dst,
u32 counter_id); u32 counter_id);
TRACE_EVENT(mlx5_fs_add_ft,
TP_PROTO(const struct mlx5_flow_table *ft),
TP_ARGS(ft),
TP_STRUCT__entry(
__field(const struct mlx5_flow_table *, ft)
__field(u32, id)
__field(u32, level)
__field(u32, type)
),
TP_fast_assign(
__entry->ft = ft;
__entry->id = ft->id;
__entry->level = ft->level;
__entry->type = ft->type;
),
TP_printk("ft=%p id=%u level=%u type=%u \n",
__entry->ft, __entry->id, __entry->level, __entry->type)
);
TRACE_EVENT(mlx5_fs_del_ft,
TP_PROTO(const struct mlx5_flow_table *ft),
TP_ARGS(ft),
TP_STRUCT__entry(
__field(const struct mlx5_flow_table *, ft)
__field(u32, id)
),
TP_fast_assign(
__entry->ft = ft;
__entry->id = ft->id;
),
TP_printk("ft=%p id=%u\n",
__entry->ft, __entry->id)
);
TRACE_EVENT(mlx5_fs_add_fg, TRACE_EVENT(mlx5_fs_add_fg,
TP_PROTO(const struct mlx5_flow_group *fg), TP_PROTO(const struct mlx5_flow_group *fg),
TP_ARGS(fg), TP_ARGS(fg),
......
...@@ -76,15 +76,14 @@ struct page_pool; ...@@ -76,15 +76,14 @@ struct page_pool;
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6) #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
(cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
#define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
...@@ -119,8 +118,6 @@ struct page_pool; ...@@ -119,8 +118,6 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_RX_MAX_HEAD (256)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
...@@ -309,16 +306,18 @@ struct mlx5e_cq { ...@@ -309,16 +306,18 @@ struct mlx5e_cq {
struct mlx5_core_cq mcq; struct mlx5_core_cq mcq;
struct mlx5e_channel *channel; struct mlx5e_channel *channel;
/* control */
struct mlx5_core_dev *mdev;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
struct mlx5e_cq_decomp {
/* cqe decompression */ /* cqe decompression */
struct mlx5_cqe64 title; struct mlx5_cqe64 title;
struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
u8 mini_arr_idx; u8 mini_arr_idx;
u16 decmprs_left; u16 left;
u16 decmprs_wqe_counter; u16 wqe_counter;
/* control */
struct mlx5_core_dev *mdev;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_tx_wqe_info { struct mlx5e_tx_wqe_info {
...@@ -581,6 +580,7 @@ struct mlx5e_rq { ...@@ -581,6 +580,7 @@ struct mlx5e_rq {
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_rq_stats *stats; struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq; struct mlx5e_cq cq;
struct mlx5e_cq_decomp cqd;
struct mlx5e_page_cache page_cache; struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp; struct hwtstamp_config *tstamp;
struct mlx5_clock *clock; struct mlx5_clock *clock;
...@@ -638,6 +638,7 @@ struct mlx5e_channel { ...@@ -638,6 +638,7 @@ struct mlx5e_channel {
struct hwtstamp_config *tstamp; struct hwtstamp_config *tstamp;
int ix; int ix;
int cpu; int cpu;
cpumask_var_t xps_cpumask;
}; };
struct mlx5e_channels { struct mlx5e_channels {
...@@ -803,6 +804,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -803,6 +804,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_update_stats(struct mlx5e_priv *priv); void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv);
......
...@@ -171,8 +171,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, ...@@ -171,8 +171,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
return MLX5E_MPWQE_STRIDE_SZ(mdev, return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
} }
static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
...@@ -1950,6 +1949,29 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) ...@@ -1950,6 +1949,29 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err; return err;
} }
static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
struct mlx5e_params *params)
{
int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
int irq;
if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
return -ENOMEM;
for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
cpumask_set_cpu(cpu, c->xps_cpumask);
}
return 0;
}
static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
{
free_cpumask_var(c->xps_cpumask);
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam, struct mlx5e_channel_param *cparam,
...@@ -1982,9 +2004,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1982,9 +2004,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc; c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog; c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch; c->stats = &priv->channel_stats[ix].ch;
c->irq_desc = irq_to_desc(irq); c->irq_desc = irq_to_desc(irq);
err = mlx5e_alloc_xps_cpumask(c, params);
if (err)
goto err_free_channel;
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
...@@ -2067,6 +2092,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -2067,6 +2092,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
err_napi_del: err_napi_del:
netif_napi_del(&c->napi); netif_napi_del(&c->napi);
mlx5e_free_xps_cpumask(c);
err_free_channel:
kvfree(c); kvfree(c);
return err; return err;
...@@ -2079,7 +2107,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) ...@@ -2079,7 +2107,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++) for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_rq(&c->rq); mlx5e_activate_rq(&c->rq);
netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
} }
static void mlx5e_deactivate_channel(struct mlx5e_channel *c) static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
...@@ -2107,6 +2135,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) ...@@ -2107,6 +2135,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_tx_cqs(c); mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq); mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi); netif_napi_del(&c->napi);
mlx5e_free_xps_cpumask(c);
kvfree(c); kvfree(c);
} }
...@@ -3492,11 +3521,32 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, ...@@ -3492,11 +3521,32 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
} }
} }
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
{
int i;
for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
int j;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_dropped += sq_stats->dropped;
}
}
}
void void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sw_stats *sstats = &priv->stats.sw;
struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5e_pport_stats *pstats = &priv->stats.pport;
...@@ -3511,12 +3561,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -3511,12 +3561,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
} else { } else {
mlx5e_grp_sw_update_stats(priv); mlx5e_fold_sw_stats64(priv, stats);
stats->rx_packets = sstats->rx_packets;
stats->rx_bytes = sstats->rx_bytes;
stats->tx_packets = sstats->tx_packets;
stats->tx_bytes = sstats->tx_bytes;
stats->tx_dropped = sstats->tx_queue_dropped;
} }
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
......
...@@ -162,27 +162,16 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) ...@@ -162,27 +162,16 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{ {
struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_sw_stats *s = &priv->stats.sw;
struct mlx5e_rq_stats *rq_stats; struct rtnl_link_stats64 stats64 = {};
struct mlx5e_sq_stats *sq_stats;
int i, j;
memset(s, 0, sizeof(*s)); memset(s, 0, sizeof(*s));
for (i = 0; i < priv->channels.num; i++) { mlx5e_fold_sw_stats64(priv, &stats64);
struct mlx5e_channel *c = priv->channels.c[i];
rq_stats = c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
for (j = 0; j < priv->channels.params.num_tc; j++) { s->rx_packets = stats64.rx_packets;
sq_stats = c->sq[j].stats; s->rx_bytes = stats64.rx_bytes;
s->tx_packets = stats64.tx_packets;
s->tx_packets += sq_stats->packets; s->tx_bytes = stats64.tx_bytes;
s->tx_bytes += sq_stats->bytes; s->tx_queue_dropped = stats64.tx_dropped;
s->tx_queue_dropped += sq_stats->dropped;
}
}
} }
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
...@@ -195,8 +184,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, ...@@ -195,8 +184,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
return; return;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_hw_counters(priv); mlx5e_rep_update_hw_counters(priv);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
...@@ -1229,17 +1217,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, ...@@ -1229,17 +1217,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sw_stats *sstats = &priv->stats.sw;
mlx5e_rep_update_sw_counters(priv);
stats->rx_packets = sstats->rx_packets;
stats->rx_bytes = sstats->rx_bytes;
stats->tx_packets = sstats->tx_packets;
stats->tx_bytes = sstats->tx_bytes;
stats->tx_dropped = sstats->tx_queue_dropped;
mlx5e_fold_sw_stats64(priv, stats);
return 0; return 0;
} }
......
...@@ -52,40 +52,45 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) ...@@ -52,40 +52,45 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
return config->rx_filter == HWTSTAMP_FILTER_ALL; return config->rx_filter == HWTSTAMP_FILTER_ALL;
} }
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
void *data) u32 cqcc, void *data)
{ {
u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc); u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
} }
static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
struct mlx5e_cq *cq, u32 cqcc) struct mlx5_cqwq *wq,
u32 cqcc)
{ {
mlx5e_read_cqe_slot(cq, cqcc, &cq->title); struct mlx5e_cq_decomp *cqd = &rq->cqd;
cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); struct mlx5_cqe64 *title = &cqd->title;
cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
mlx5e_read_cqe_slot(wq, cqcc, title);
cqd->left = be32_to_cpu(title->byte_cnt);
cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
rq->stats->cqe_compress_blks++; rq->stats->cqe_compress_blks++;
} }
static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
struct mlx5e_cq_decomp *cqd,
u32 cqcc)
{ {
mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr); mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
cq->mini_arr_idx = 0; cqd->mini_arr_idx = 0;
} }
static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
{ {
struct mlx5_cqwq *wq = &cq->wq; u32 cqcc = wq->cc;
u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
u32 wq_sz = mlx5_cqwq_get_size(wq); u32 wq_sz = mlx5_cqwq_get_size(wq);
u32 ci_top = min_t(u32, wq_sz, ci + n); u32 ci_top = min_t(u32, wq_sz, ci + n);
for (; ci < ci_top; ci++, n--) { for (; ci < ci_top; ci++, n--) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe->op_own = op_own; cqe->op_own = op_own;
} }
...@@ -93,7 +98,7 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) ...@@ -93,7 +98,7 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
if (unlikely(ci == wq_sz)) { if (unlikely(ci == wq_sz)) {
op_own = !op_own; op_own = !op_own;
for (ci = 0; ci < n; ci++) { for (ci = 0; ci < n; ci++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe->op_own = op_own; cqe->op_own = op_own;
} }
...@@ -101,68 +106,79 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) ...@@ -101,68 +106,79 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
} }
static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
struct mlx5e_cq *cq, u32 cqcc) struct mlx5_cqwq *wq,
u32 cqcc)
{ {
cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; struct mlx5e_cq_decomp *cqd = &rq->cqd;
cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
cq->title.op_own &= 0xf0; struct mlx5_cqe64 *title = &cqd->title;
cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); title->byte_cnt = mini_cqe->byte_cnt;
title->check_sum = mini_cqe->checksum;
title->op_own &= 0xf0;
title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
cq->decmprs_wqe_counter += cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
mpwrq_get_cqe_consumed_strides(&cq->title);
else else
cq->decmprs_wqe_counter = cqd->wqe_counter =
mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
} }
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
struct mlx5e_cq *cq, u32 cqcc) struct mlx5_cqwq *wq,
u32 cqcc)
{ {
mlx5e_decompress_cqe(rq, cq, cqcc); struct mlx5e_cq_decomp *cqd = &rq->cqd;
cq->title.rss_hash_type = 0;
cq->title.rss_hash_result = 0; mlx5e_decompress_cqe(rq, wq, cqcc);
cqd->title.rss_hash_type = 0;
cqd->title.rss_hash_result = 0;
} }
static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
struct mlx5e_cq *cq, struct mlx5_cqwq *wq,
int update_owner_only, int update_owner_only,
int budget_rem) int budget_rem)
{ {
u32 cqcc = cq->wq.cc + update_owner_only; struct mlx5e_cq_decomp *cqd = &rq->cqd;
u32 cqcc = wq->cc + update_owner_only;
u32 cqe_count; u32 cqe_count;
u32 i; u32 i;
cqe_count = min_t(u32, cq->decmprs_left, budget_rem); cqe_count = min_t(u32, cqd->left, budget_rem);
for (i = update_owner_only; i < cqe_count; for (i = update_owner_only; i < cqe_count;
i++, cq->mini_arr_idx++, cqcc++) { i++, cqd->mini_arr_idx++, cqcc++) {
if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
mlx5e_read_mini_arr_slot(cq, cqcc); mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
rq->handle_rx_cqe(rq, &cq->title); rq->handle_rx_cqe(rq, &cqd->title);
} }
mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
cq->wq.cc = cqcc; wq->cc = cqcc;
cq->decmprs_left -= cqe_count; cqd->left -= cqe_count;
rq->stats->cqe_compress_pkts += cqe_count; rq->stats->cqe_compress_pkts += cqe_count;
return cqe_count; return cqe_count;
} }
static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
struct mlx5e_cq *cq, struct mlx5_cqwq *wq,
int budget_rem) int budget_rem)
{ {
mlx5e_read_title_slot(rq, cq, cq->wq.cc); struct mlx5e_cq_decomp *cqd = &rq->cqd;
mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1); u32 cc = wq->cc;
mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
rq->handle_rx_cqe(rq, &cq->title);
cq->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; mlx5e_read_title_slot(rq, wq, cc);
mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
mlx5e_decompress_cqe(rq, wq, cc);
rq->handle_rx_cqe(rq, &cqd->title);
cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
} }
static inline bool mlx5e_page_is_reserved(struct page *page) static inline bool mlx5e_page_is_reserved(struct page *page)
...@@ -369,7 +385,7 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, ...@@ -369,7 +385,7 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
static inline void static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
struct mlx5e_dma_info *dma_info, struct mlx5e_dma_info *dma_info,
int offset_from, int offset_to, u32 headlen) int offset_from, u32 headlen)
{ {
const void *from = page_address(dma_info->page) + offset_from; const void *from = page_address(dma_info->page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */ /* Aligning len to sizeof(long) optimizes memcpy performance */
...@@ -377,24 +393,7 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, ...@@ -377,24 +393,7 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb_copy_to_linear_data_offset(skb, offset_to, from, len); skb_copy_to_linear_data(skb, from, len);
}
static inline void
mlx5e_copy_skb_header_mpwqe(struct device *pdev,
struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
u32 offset, u32 headlen)
{
u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg);
if (unlikely(offset + headlen > PAGE_SIZE)) {
dma_info++;
mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg,
headlen - headlen_pg);
}
} }
static void static void
...@@ -973,8 +972,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -973,8 +972,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
} }
/* copy header */ /* copy header */
mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
0, headlen);
/* skb linear part was allocated with headlen and aligned to long */ /* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen; skb->tail += headlen;
skb->len += headlen; skb->len += headlen;
...@@ -1096,8 +1094,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w ...@@ -1096,8 +1094,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
di++; di++;
} }
/* copy header */ /* copy header */
mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di, mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */ /* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen; skb->tail += headlen;
skb->len += headlen; skb->len += headlen;
...@@ -1203,16 +1200,17 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1203,16 +1200,17 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{ {
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
struct mlx5_cqwq *cqwq = &cq->wq;
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
int work_done = 0; int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0; return 0;
if (cq->decmprs_left) if (rq->cqd.left)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
cqe = mlx5_cqwq_get_cqe(&cq->wq); cqe = mlx5_cqwq_get_cqe(cqwq);
if (!cqe) { if (!cqe) {
if (unlikely(work_done)) if (unlikely(work_done))
goto out; goto out;
...@@ -1222,21 +1220,21 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -1222,21 +1220,21 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
do { do {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
work_done += work_done +=
mlx5e_decompress_cqes_start(rq, cq, mlx5e_decompress_cqes_start(rq, cqwq,
budget - work_done); budget - work_done);
continue; continue;
} }
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(cqwq);
rq->handle_rx_cqe(rq, cqe); rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out: out:
if (rq->xdp_prog) if (rq->xdp_prog)
mlx5e_xdp_rx_poll_complete(rq); mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(cqwq);
/* ensure cq space is freed before enabling more cqes */ /* ensure cq space is freed before enabling more cqes */
wmb(); wmb();
......
...@@ -127,9 +127,9 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) ...@@ -127,9 +127,9 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx; return idx;
} }
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
{ {
struct mlx5e_sw_stats temp, *s = &temp; struct mlx5e_sw_stats *s = &priv->stats.sw;
int i; int i;
memset(s, 0, sizeof(*s)); memset(s, 0, sizeof(*s));
...@@ -212,8 +212,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -212,8 +212,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_cqes += sq_stats->cqes; s->tx_cqes += sq_stats->cqes;
} }
} }
memcpy(&priv->stats.sw, s, sizeof(*s));
} }
static const struct counter_desc q_stats_desc[] = { static const struct counter_desc q_stats_desc[] = {
......
...@@ -277,7 +277,6 @@ struct mlx5e_stats_grp { ...@@ -277,7 +277,6 @@ struct mlx5e_stats_grp {
extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps; extern const int mlx5e_num_stats_grps;
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv); void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_STATS_H__ */ #endif /* __MLX5_EN_STATS_H__ */
...@@ -2767,14 +2767,13 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, ...@@ -2767,14 +2767,13 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
return err; return err;
} }
static int static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv, __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, struct tc_cls_flower_offload *f,
u16 flow_flags, u16 flow_flags,
struct net_device *filter_dev, struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep, struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev, struct mlx5_core_dev *in_mdev)
struct mlx5e_tc_flow **__flow)
{ {
struct netlink_ext_ack *extack = f->common.extack; struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
...@@ -2814,15 +2813,13 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -2814,15 +2813,13 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err) if (err)
goto err_free; goto err_free;
*__flow = flow; return flow;
return 0;
err_free: err_free:
kfree(flow); kfree(flow);
kvfree(parse_attr); kvfree(parse_attr);
out: out:
return err; return ERR_PTR(err);
} }
static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f, static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
...@@ -2855,11 +2852,13 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f, ...@@ -2855,11 +2852,13 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
in_mdev = priv->mdev; in_mdev = priv->mdev;
parse_attr = flow->esw_attr->parse_attr; parse_attr = flow->esw_attr->parse_attr;
err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags, peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
parse_attr->filter_dev, parse_attr->filter_dev,
flow->esw_attr->in_rep, in_mdev, &peer_flow); flow->esw_attr->in_rep, in_mdev);
if (err) if (IS_ERR(peer_flow)) {
err = PTR_ERR(peer_flow);
goto out; goto out;
}
flow->peer_flow = peer_flow; flow->peer_flow = peer_flow;
flow->flags |= MLX5E_TC_FLOW_DUP; flow->flags |= MLX5E_TC_FLOW_DUP;
...@@ -2885,10 +2884,10 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -2885,10 +2884,10 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev, &flow); in_mdev);
if (err) if (IS_ERR(flow))
goto out; return PTR_ERR(flow);
if (is_peer_flow_needed(flow)) { if (is_peer_flow_needed(flow)) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow); err = mlx5e_tc_add_fdb_peer_flow(f, flow);
......
...@@ -397,6 +397,7 @@ static void del_hw_flow_table(struct fs_node *node) ...@@ -397,6 +397,7 @@ static void del_hw_flow_table(struct fs_node *node)
fs_get_obj(ft, node); fs_get_obj(ft, node);
dev = get_dev(&ft->node); dev = get_dev(&ft->node);
root = find_root(&ft->node); root = find_root(&ft->node);
trace_mlx5_fs_del_ft(ft);
if (node->active) { if (node->active) {
err = root->cmds->destroy_flow_table(dev, ft); err = root->cmds->destroy_flow_table(dev, ft);
...@@ -1019,6 +1020,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ...@@ -1019,6 +1020,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
fs_prio->num_ft++; fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node); up_write_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
trace_mlx5_fs_add_ft(ft);
return ft; return ft;
destroy_ft: destroy_ft:
root->cmds->destroy_flow_table(root->dev, ft); root->cmds->destroy_flow_table(root->dev, ft);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment