Commit bc77b240 authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: Add fragmented memory support for RX multi packet WQE

If the allocation of a linear (physically continuous) MPWQE fails,
we allocate a fragmented MPWQE.

This is implemented via device's UMR (User Memory Registration)
which allows to register multiple memory fragments into ConnectX
hardware as a continuous buffer.
UMR registration is an asynchronous operation and is done via
ICO SQs.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d3c9bc27
......@@ -72,6 +72,9 @@
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
......@@ -134,6 +137,13 @@ struct mlx5e_rx_wqe {
struct mlx5_wqe_data_seg data;
};
struct mlx5e_umr_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
struct mlx5_wqe_data_seg data;
};
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
......@@ -179,6 +189,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
"tx_queue_dropped",
"rx_wqe_err",
"rx_mpwqe_filler",
"rx_mpwqe_frag",
};
struct mlx5e_vport_stats {
......@@ -221,8 +232,9 @@ struct mlx5e_vport_stats {
u64 tx_queue_dropped;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag;
#define NUM_VPORT_COUNTERS 36
#define NUM_VPORT_COUNTERS 37
};
static const char pport_strings[][ETH_GSTRING_LEN] = {
......@@ -317,6 +329,7 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"lro_bytes",
"wqe_err",
"mpwqe_filler",
"mpwqe_frag",
};
struct mlx5e_rq_stats {
......@@ -328,7 +341,8 @@ struct mlx5e_rq_stats {
u64 lro_bytes;
u64 wqe_err;
u64 mpwqe_filler;
#define NUM_RQ_STATS 8
u64 mpwqe_frag;
#define NUM_RQ_STATS 9
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
......@@ -407,6 +421,7 @@ struct mlx5e_tstamp {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
};
struct mlx5e_cq {
......@@ -434,18 +449,14 @@ struct mlx5e_dma_info {
dma_addr_t addr;
};
struct mlx5e_mpw_info {
struct mlx5e_dma_info dma_info;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
u32 wqe_sz;
struct sk_buff **skb;
struct mlx5e_mpw_info *wqe_info;
__be32 mkey_be;
__be32 umr_mkey_be;
struct device *pdev;
struct net_device *netdev;
......@@ -466,6 +477,36 @@ struct mlx5e_rq {
struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
struct mlx5e_umr_dma_info {
__be64 *mtt;
__be64 *mtt_no_align;
dma_addr_t mtt_addr;
struct mlx5e_dma_info *dma_info;
};
struct mlx5e_mpw_info {
union {
struct mlx5e_dma_info dma_info;
struct mlx5e_umr_dma_info umr;
};
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
void (*dma_pre_sync)(struct device *pdev,
struct mlx5e_mpw_info *wi,
u32 wqe_offset, u32 len);
void (*add_skb_frag)(struct device *pdev,
struct sk_buff *skb,
struct mlx5e_mpw_info *wi,
u32 page_idx, u32 frag_offset, u32 len);
void (*copy_skb_header)(struct device *pdev,
struct sk_buff *skb,
struct mlx5e_mpw_info *wi,
u32 page_idx, u32 offset,
u32 headlen);
void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
};
struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
......@@ -658,6 +699,7 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mkey mkey;
struct mlx5_core_mkey umr_mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
......@@ -730,6 +772,21 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u16 byte_cnt,
struct mlx5e_mpw_info *wi,
struct sk_buff *skb);
void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u16 byte_cnt,
struct mlx5e_mpw_info *wi,
struct sk_buff *skb);
void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi);
void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
......@@ -763,7 +820,7 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
int num_channels);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5e_tx_wqe *wqe, int bf_sz)
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
......@@ -777,9 +834,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
if (bf_sz)
__iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
__iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
else
mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
/* flush the write-combining mapped buffer */
wmb();
......@@ -800,6 +857,11 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
static inline int mlx5e_get_mtt_octw(int npages)
{
return ALIGN(npages, 8) / 2;
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
......
......@@ -179,6 +179,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->rx_csum_sw = 0;
s->rx_wqe_err = 0;
s->rx_mpwqe_filler = 0;
s->rx_mpwqe_frag = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
......@@ -190,6 +191,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->rx_csum_sw += rq_stats->csum_sw;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
......@@ -379,7 +381,6 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
wqe->data.lkey = c->mkey_be;
wqe->data.byte_count = cpu_to_be32(byte_count);
}
......@@ -390,6 +391,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->priv = c->priv;
rq->mkey_be = c->mkey_be;
rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
return 0;
......@@ -1256,6 +1259,7 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
param->icosq = true;
}
......@@ -1263,7 +1267,7 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz = 0;
u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
memset(cparam, 0, sizeof(*cparam));
......@@ -2458,6 +2462,13 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
indirection_rqt[i] = i % num_channels;
}
static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, striding_rq) &&
MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
......@@ -2466,7 +2477,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
priv->params.rq_wq_type = MLX5_CAP_GEN(mdev, striding_rq) ?
priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
......@@ -2639,6 +2650,41 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
}
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *mkc;
int inlen = sizeof(*in);
u64 npages =
mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
mkc = &in->seg;
mkc->status = MLX5_MKEY_STATUS_FREE;
mkc->flags = MLX5_PERM_UMR_EN |
MLX5_PERM_LOCAL_READ |
MLX5_PERM_LOCAL_WRITE |
MLX5_ACCESS_MODE_MTT;
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
mkc->flags_pd = cpu_to_be32(priv->pdn);
mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
mkc->log2_page_size = PAGE_SHIFT;
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
NULL, NULL);
kvfree(in);
return err;
}
static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
......@@ -2688,10 +2734,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_dealloc_transport_domain;
}
err = mlx5e_create_umr_mkey(priv);
if (err) {
mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
goto err_destroy_mkey;
}
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(mdev, "create tises failed, %d\n", err);
goto err_destroy_mkey;
goto err_destroy_umr_mkey;
}
err = mlx5e_open_drop_rq(priv);
......@@ -2774,6 +2826,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err_destroy_tises:
mlx5e_destroy_tises(priv);
err_destroy_umr_mkey:
mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mkey);
......@@ -2812,6 +2867,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_close_drop_rq(priv);
mlx5e_destroy_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
......
......@@ -58,7 +58,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe, 0);
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
}
......@@ -310,7 +310,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe, bf_sz);
mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
......
......@@ -84,6 +84,9 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
switch (icowi->opcode) {
case MLX5_OPCODE_NOP:
break;
case MLX5_OPCODE_UMR:
mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
break;
default:
WARN_ONCE(true,
"mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment