Commit 0082dd8a authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-07-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-07-28

Misc and small update to mlx5 driver:

1) Aya adds PCIe relaxed ordering support for mlx5 netdev queues.
2) Eran Refactors pages data base to be per vf/function to speedup
   unload time.
3) Parav changes eswitch steering initialization to account for
   tota_vports rather than for only active vports and
   Link non uplink representors to PCI device, for uniform naming scheme.

4) Tariq, trivial RX code improvements and missing inidirect calls
   wrappers.

5) Small cleanup patches
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f21bbd63 22f9d2f4
...@@ -530,6 +530,8 @@ typedef struct sk_buff * ...@@ -530,6 +530,8 @@ typedef struct sk_buff *
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
enum mlx5e_rq_flag { enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT, MLX5E_RQ_FLAG_XDP_XMIT,
MLX5E_RQ_FLAG_XDP_REDIRECT, MLX5E_RQ_FLAG_XDP_REDIRECT,
...@@ -812,6 +814,13 @@ struct mlx5e_priv { ...@@ -812,6 +814,13 @@ struct mlx5e_priv {
struct mlx5e_scratchpad scratchpad; struct mlx5e_scratchpad scratchpad;
}; };
struct mlx5e_rx_handlers {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
};
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
struct mlx5e_profile { struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev, int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev, struct net_device *netdev,
...@@ -828,58 +837,17 @@ struct mlx5e_profile { ...@@ -828,58 +837,17 @@ struct mlx5e_profile {
void (*update_carrier)(struct mlx5e_priv *priv); void (*update_carrier)(struct mlx5e_priv *priv);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps; mlx5e_stats_grp_t *stats_grps;
struct { const struct mlx5e_rx_handlers *rx_handlers;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
int max_tc; int max_tc;
u8 rq_groups; u8 rq_groups;
}; };
void mlx5e_build_ptys2ethtool_map(void); void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params); struct mlx5e_params *params);
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
...@@ -982,8 +950,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, ...@@ -982,8 +950,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
...@@ -1008,6 +974,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); ...@@ -1008,6 +974,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb); bool enable_mc_lb);
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
/* common netdev helpers */ /* common netdev helpers */
void mlx5e_create_q_counters(struct mlx5e_priv *priv); void mlx5e_create_q_counters(struct mlx5e_priv *priv);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define __MLX5_EN_TXRX_H___ #define __MLX5_EN_TXRX_H___
#include "en.h" #include "en.h"
#include <linux/indirect_call_wrapper.h>
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
...@@ -18,6 +19,33 @@ enum mlx5e_icosq_wqe_type { ...@@ -18,6 +19,33 @@ enum mlx5e_icosq_wqe_type {
#endif #endif
}; };
/* General */
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
/* RX */
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
/* TX */
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
static inline bool static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{ {
...@@ -360,7 +388,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, ...@@ -360,7 +388,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
switch (swp_spec->tun_l4_proto) { switch (swp_spec->tun_l4_proto) {
case IPPROTO_UDP: case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
/* fall through */ fallthrough;
case IPPROTO_TCP: case IPPROTO_TCP:
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break; break;
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
#include "en/xdp.h" #include "en/xdp.h"
#include "en/params.h" #include "en/params.h"
#include <linux/indirect_call_wrapper.h>
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{ {
...@@ -153,11 +152,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, ...@@ -153,11 +152,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
return true; return true;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fall through */ fallthrough;
case XDP_ABORTED: case XDP_ABORTED:
xdp_abort: xdp_abort:
trace_xdp_exception(rq->netdev, prog, act); trace_xdp_exception(rq->netdev, prog, act);
/* fall through */ fallthrough;
case XDP_DROP: case XDP_DROP:
rq->stats->xdp_drop++; rq->stats->xdp_drop++;
return true; return true;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "setup.h" #include "setup.h"
#include "en/params.h" #include "en/params.h"
#include "en/txrx.h"
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding * change unexpectedly, and mlx5e has a minimum valid stride size for striding
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include "en/xdp.h" #include "en/xdp.h"
#include "en/params.h" #include "en/params.h"
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
#include <linux/indirect_call_wrapper.h>
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{ {
......
...@@ -47,7 +47,6 @@ ...@@ -47,7 +47,6 @@
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt); struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void); void mlx5e_ipsec_inverse_table_init(void);
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
......
...@@ -470,7 +470,7 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s ...@@ -470,7 +470,7 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
if (likely(!skb->decrypted)) if (likely(!skb->decrypted))
goto out; goto out;
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
/* fall-through */ fallthrough;
case MLX5E_KTLS_SYNC_FAIL: case MLX5E_KTLS_SYNC_FAIL:
goto err_out; goto err_out;
} }
......
...@@ -60,6 +60,16 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, ...@@ -60,6 +60,16 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
mutex_unlock(&mdev->mlx5e_res.td.list_lock); mutex_unlock(&mdev->mlx5e_res.td.list_lock);
} }
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{
bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
struct mlx5_core_mkey *mkey) struct mlx5_core_mkey *mkey)
{ {
...@@ -76,7 +86,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, ...@@ -76,7 +86,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, lr, 1);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, pd, pdn); MLX5_SET(mkc, mkc, pd, pdn);
MLX5_SET(mkc, mkc, length64, 1); MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, qpn, 0xffffff);
......
...@@ -243,7 +243,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) ...@@ -243,7 +243,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
return MLX5E_NUM_PFLAGS; return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST: case ETH_SS_TEST:
return mlx5e_self_test_num(priv); return mlx5e_self_test_num(priv);
/* fallthrough */ fallthrough;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -45,7 +45,6 @@ ...@@ -45,7 +45,6 @@
#include "en_tc.h" #include "en_tc.h"
#include "en_rep.h" #include "en_rep.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "en_accel/tls.h" #include "en_accel/tls.h"
#include "accel/ipsec.h" #include "accel/ipsec.h"
...@@ -65,7 +64,6 @@ ...@@ -65,7 +64,6 @@
#include "en/hv_vhca_stats.h" #include "en/hv_vhca_stats.h"
#include "en/devlink.h" #include "en/devlink.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{ {
...@@ -276,7 +274,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, ...@@ -276,7 +274,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift); MLX5_SET64(mkc, mkc, len, npages << page_shift);
...@@ -428,29 +426,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -428,29 +426,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
pool_size = MLX5_MPWRQ_PAGES_PER_WQE << pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
mlx5e_mpwqe_get_log_rq_size(params, xsk); mlx5e_mpwqe_get_log_rq_size(params, xsk);
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) {
err = -EINVAL;
netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
goto err_rq_wq_destroy;
}
#endif
if (!rq->handle_rx_cqe) {
err = -EINVAL;
netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
goto err_rq_wq_destroy;
}
rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
mlx5e_xsk_skb_from_cqe_mpwrq_linear :
mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
mlx5e_skb_from_cqe_mpwrq_linear :
mlx5e_skb_from_cqe_mpwrq_nonlinear;
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides = rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
...@@ -492,30 +467,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -492,30 +467,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (err) if (err)
goto err_free; goto err_free;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
c->priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else
#endif
rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
if (!rq->handle_rx_cqe) {
err = -EINVAL;
netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
goto err_free;
}
rq->wqe.skb_from_cqe = xsk ?
mlx5e_xsk_skb_from_cqe_linear :
mlx5e_rx_is_linear_skb(params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->mkey_be = c->mkey_be; rq->mkey_be = c->mkey_be;
} }
err = mlx5e_rq_set_handlers(rq, params, xsk);
if (err)
goto err_free;
if (xsk) { if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL); MEM_TYPE_XSK_BUFF_POOL, NULL);
...@@ -5288,8 +5246,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { ...@@ -5288,8 +5246,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_rx = mlx5e_update_nic_rx, .update_rx = mlx5e_update_nic_rx,
.update_stats = mlx5e_update_ndo_stats, .update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier, .update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, .rx_handlers = &mlx5e_rx_handlers_nic,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC, .max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps, .stats_grps = mlx5e_nic_stats_grps,
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "esw/chains.h" #include "esw/chains.h"
#include "en.h" #include "en.h"
#include "en_rep.h" #include "en_rep.h"
#include "en/txrx.h"
#include "en_tc.h" #include "en_tc.h"
#include "en/rep/tc.h" #include "en/rep/tc.h"
#include "en/rep/neigh.h" #include "en/rep/neigh.h"
...@@ -699,8 +700,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) ...@@ -699,8 +700,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
SET_NETDEV_DEV(netdev, mdev->device);
if (rep->vport == MLX5_VPORT_UPLINK) { if (rep->vport == MLX5_VPORT_UPLINK) {
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */ /* we want a persistent mac for the uplink rep */
mlx5_query_mac_address(mdev, netdev->dev_addr); mlx5_query_mac_address(mdev, netdev->dev_addr);
...@@ -1143,8 +1144,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { ...@@ -1143,8 +1144,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.enable = mlx5e_rep_enable, .enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx, .update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_update_ndo_stats, .update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, .rx_handlers = &mlx5e_rx_handlers_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = 1, .max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps, .stats_grps = mlx5e_rep_stats_grps,
...@@ -1163,8 +1163,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { ...@@ -1163,8 +1163,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_rx = mlx5e_update_rep_rx, .update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_update_ndo_stats, .update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier, .update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, .rx_handlers = &mlx5e_rx_handlers_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = MLX5E_MAX_NUM_TC, .max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_ul_rep_stats_grps, .stats_grps = mlx5e_ul_rep_stats_grps,
......
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
#include "lib/port_tun.h" #include "lib/port_tun.h"
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep;
struct mlx5e_neigh_update_table { struct mlx5e_neigh_update_table {
struct rhashtable neigh_ht; struct rhashtable neigh_ht;
/* Save the neigh hash entries in a list in addition to the hash table /* Save the neigh hash entries in a list in addition to the hash table
...@@ -223,10 +225,6 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); ...@@ -223,10 +225,6 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_vf_rep(struct net_device *netdev); bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
......
...@@ -34,22 +34,39 @@ ...@@ -34,22 +34,39 @@
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include <net/page_pool.h> #include <net/page_pool.h>
#include <net/inet_ecn.h> #include <net/inet_ecn.h>
#include "en.h" #include "en.h"
#include "en/txrx.h"
#include "en_tc.h" #include "en_tc.h"
#include "eswitch.h" #include "eswitch.h"
#include "en_rep.h" #include "en_rep.h"
#include "en/rep/tc.h" #include "en/rep/tc.h"
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h" #include "en_accel/tls_rxtx.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "en/xdp.h" #include "en/xdp.h"
#include "en/xsk/rx.h" #include "en/xsk/rx.h"
#include "en/health.h" #include "en/health.h"
#include "en/params.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe = mlx5e_handle_rx_cqe,
.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
};
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{ {
...@@ -370,7 +387,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, ...@@ -370,7 +387,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
mlx5e_put_rx_frag(rq, wi, recycle); mlx5e_put_rx_frag(rq, wi, recycle);
} }
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
...@@ -537,14 +554,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -537,14 +554,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
return err; return err;
} }
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
/* Don't recycle, this function is called on rq/netdev close */ /* Don't recycle, this function is called on rq/netdev close */
mlx5e_free_rx_mpwqe(rq, wi, false); mlx5e_free_rx_mpwqe(rq, wi, false);
} }
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{ {
struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u8 wqe_bulk; u8 wqe_bulk;
...@@ -685,7 +702,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -685,7 +702,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
return i; return i;
} }
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{ {
struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
...@@ -1106,7 +1123,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, ...@@ -1106,7 +1123,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
xdp->frame_sz = rq->buff.frame0_sz; xdp->frame_sz = rq->buff.frame0_sz;
} }
struct sk_buff * static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{ {
...@@ -1146,7 +1163,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -1146,7 +1163,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return skb; return skb;
} }
struct sk_buff * static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{ {
...@@ -1201,7 +1218,7 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1201,7 +1218,7 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
} }
} }
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi; struct mlx5e_wqe_frag_info *wi;
...@@ -1244,7 +1261,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1244,7 +1261,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
} }
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
struct net_device *netdev = rq->netdev; struct net_device *netdev = rq->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -1299,8 +1316,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1299,8 +1316,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5_wq_cyc_pop(wq); mlx5_wq_cyc_pop(wq);
} }
void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5_cqe64 *cqe)
{ {
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id); u16 wqe_id = be16_to_cpu(cqe->wqe_id);
...@@ -1358,9 +1374,14 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, ...@@ -1358,9 +1374,14 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
mlx5e_free_rx_mpwqe(rq, wi, true); mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
} }
const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
};
#endif #endif
struct sk_buff * static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx) u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{ {
...@@ -1406,7 +1427,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w ...@@ -1406,7 +1427,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return skb; return skb;
} }
struct sk_buff * static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx) u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{ {
...@@ -1456,7 +1477,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -1456,7 +1477,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb; return skb;
} }
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id); u16 wqe_id = be16_to_cpu(cqe->wqe_id);
...@@ -1652,7 +1673,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -1652,7 +1673,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
stats->bytes += cqe_bcnt; stats->bytes += cqe_bcnt;
} }
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi; struct mlx5e_wqe_frag_info *wi;
...@@ -1688,11 +1709,15 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1688,11 +1709,15 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5_wq_cyc_pop(wq); mlx5_wq_cyc_pop(wq);
} }
const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
.handle_rx_cqe = mlx5i_handle_rx_cqe,
.handle_rx_cqe_mpwqe = NULL, /* Not supported */
};
#endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* CONFIG_MLX5_CORE_IPOIB */
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi; struct mlx5e_wqe_frag_info *wi;
...@@ -1729,3 +1754,55 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1729,3 +1754,55 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
} }
#endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* CONFIG_MLX5_EN_IPSEC */
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{
struct mlx5_core_dev *mdev = rq->mdev;
struct mlx5e_channel *c = rq->channel;
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
mlx5e_xsk_skb_from_cqe_mpwrq_linear :
mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
mlx5e_skb_from_cqe_mpwrq_linear :
mlx5e_skb_from_cqe_mpwrq_nonlinear;
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev)) {
netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
return -EINVAL;
}
#endif
if (!rq->handle_rx_cqe) {
netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL;
}
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
mlx5e_xsk_skb_from_cqe_linear :
mlx5e_rx_is_linear_skb(params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
c->priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else
#endif
rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) {
netdev_err(c->netdev, "RX handler of RQ is not set\n");
return -EINVAL;
}
}
return 0;
}
...@@ -31,8 +31,8 @@ ...@@ -31,8 +31,8 @@
*/ */
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/indirect_call_wrapper.h>
#include "en.h" #include "en.h"
#include "en/txrx.h"
#include "en/xdp.h" #include "en/xdp.h"
#include "en/xsk/rx.h" #include "en/xsk/rx.h"
#include "en/xsk/tx.h" #include "en/xsk/tx.h"
......
...@@ -271,7 +271,6 @@ struct mlx5_eswitch { ...@@ -271,7 +271,6 @@ struct mlx5_eswitch {
struct mlx5_esw_offload offloads; struct mlx5_esw_offload offloads;
int mode; int mode;
int nvports;
u16 manager_vport; u16 manager_vport;
u16 first_host_vport; u16 first_host_vport;
struct mlx5_esw_functions esw_funcs; struct mlx5_esw_functions esw_funcs;
......
...@@ -1132,7 +1132,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, ...@@ -1132,7 +1132,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
} }
} }
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
...@@ -1165,7 +1165,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -1165,7 +1165,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
goto ns_err; goto ns_err;
} }
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
MLX5_ESW_MISS_FLOWS + esw->total_vports; MLX5_ESW_MISS_FLOWS + esw->total_vports;
/* create the slow path fdb with encap set, so further table instances /* create the slow path fdb with encap set, so further table instances
...@@ -1202,7 +1202,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -1202,7 +1202,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
...@@ -1270,7 +1270,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -1270,7 +1270,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err) if (err)
goto miss_rule_err; goto miss_rule_err;
esw->nvports = nvports;
kvfree(flow_group_in); kvfree(flow_group_in);
return 0; return 0;
...@@ -1311,7 +1310,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) ...@@ -1311,7 +1310,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
MLX5_FLOW_STEERING_MODE_DMFS); MLX5_FLOW_STEERING_MODE_DMFS);
} }
static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
...@@ -1325,7 +1324,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) ...@@ -1325,7 +1324,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
ft_attr.prio = 1; ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr); ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
...@@ -1346,14 +1345,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) ...@@ -1346,14 +1345,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
mlx5_destroy_flow_table(offloads->ft_offloads); mlx5_destroy_flow_table(offloads->ft_offloads);
} }
static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports) static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
u32 *flow_group_in; u32 *flow_group_in;
int nvports;
int err = 0; int err = 0;
nvports = nvports + MLX5_ESW_MISS_FLOWS; nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
flow_group_in = kvzalloc(inlen, GFP_KERNEL); flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in) if (!flow_group_in)
return -ENOMEM; return -ENOMEM;
...@@ -1986,15 +1986,8 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) ...@@ -1986,15 +1986,8 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static int esw_offloads_steering_init(struct mlx5_eswitch *esw) static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{ {
int num_vfs = esw->esw_funcs.num_vfs;
int total_vports;
int err; int err;
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
total_vports = esw->total_vports;
else
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock); mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table); hash_init(esw->fdb_table.offloads.vports.table);
...@@ -2003,7 +1996,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -2003,7 +1996,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err) if (err)
goto create_acl_err; goto create_acl_err;
err = esw_create_offloads_table(esw, total_vports); err = esw_create_offloads_table(esw);
if (err) if (err)
goto create_offloads_err; goto create_offloads_err;
...@@ -2011,11 +2004,11 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -2011,11 +2004,11 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err) if (err)
goto create_restore_err; goto create_restore_err;
err = esw_create_offloads_fdb_tables(esw, total_vports); err = esw_create_offloads_fdb_tables(esw);
if (err) if (err)
goto create_fdb_err; goto create_fdb_err;
err = esw_create_vport_rx_group(esw, total_vports); err = esw_create_vport_rx_group(esw);
if (err) if (err)
goto create_fg_err; goto create_fg_err;
...@@ -2353,7 +2346,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -2353,7 +2346,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
goto out; goto out;
/* fall through */ fallthrough;
case MLX5_CAP_INLINE_MODE_L2: case MLX5_CAP_INLINE_MODE_L2:
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
...@@ -2465,13 +2458,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, ...@@ -2465,13 +2458,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
esw->offloads.encap = encap; esw->offloads.encap = encap;
err = esw_create_offloads_fdb_tables(esw, esw->nvports); err = esw_create_offloads_fdb_tables(esw);
if (err) { if (err) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Failed re-creating fast FDB table"); "Failed re-creating fast FDB table");
esw->offloads.encap = !encap; esw->offloads.encap = !encap;
(void)esw_create_offloads_fdb_tables(esw, esw->nvports); (void)esw_create_offloads_fdb_tables(esw);
} }
unlock: unlock:
......
...@@ -339,14 +339,14 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, ...@@ -339,14 +339,14 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
switch (opcode) { switch (opcode) {
case MLX5_CQE_REQ_ERR: case MLX5_CQE_REQ_ERR:
status = ((struct mlx5_err_cqe *)cqe)->syndrome; status = ((struct mlx5_err_cqe *)cqe)->syndrome;
/* Fall through */ fallthrough;
case MLX5_CQE_REQ: case MLX5_CQE_REQ:
mlx5_fpga_conn_sq_cqe(conn, cqe, status); mlx5_fpga_conn_sq_cqe(conn, cqe, status);
break; break;
case MLX5_CQE_RESP_ERR: case MLX5_CQE_RESP_ERR:
status = ((struct mlx5_err_cqe *)cqe)->syndrome; status = ((struct mlx5_err_cqe *)cqe)->syndrome;
/* Fall through */ fallthrough;
case MLX5_CQE_RESP_SEND: case MLX5_CQE_RESP_SEND:
mlx5_fpga_conn_rq_cqe(conn, cqe, status); mlx5_fpga_conn_rq_cqe(conn, cqe, status);
break; break;
......
...@@ -464,8 +464,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = { ...@@ -464,8 +464,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.update_rx = mlx5i_update_nic_rx, .update_rx = mlx5i_update_nic_rx,
.update_stats = NULL, /* mlx5i_update_stats */ .update_stats = NULL, /* mlx5i_update_stats */
.update_carrier = NULL, /* no HW update in IB link */ .update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, .rx_handlers = &mlx5i_rx_handlers,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC, .max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps, .stats_grps = mlx5i_stats_grps,
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
extern const struct ethtool_ops mlx5i_ethtool_ops; extern const struct ethtool_ops mlx5i_ethtool_ops;
extern const struct ethtool_ops mlx5i_pkey_ethtool_ops; extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
extern const struct mlx5e_rx_handlers mlx5i_rx_handlers;
#define MLX5_IB_GRH_BYTES 40 #define MLX5_IB_GRH_BYTES 40
#define MLX5_IPOIB_ENCAP_LEN 4 #define MLX5_IPOIB_ENCAP_LEN 4
...@@ -117,7 +118,6 @@ struct mlx5i_tx_wqe { ...@@ -117,7 +118,6 @@ struct mlx5i_tx_wqe {
void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more); struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
#endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* CONFIG_MLX5_CORE_IPOIB */
......
...@@ -349,8 +349,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { ...@@ -349,8 +349,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.disable = NULL, .disable = NULL,
.update_rx = mlx5i_update_nic_rx, .update_rx = mlx5i_update_nic_rx,
.update_stats = NULL, .update_stats = NULL,
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, .rx_handlers = &mlx5i_rx_handlers,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC, .max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
}; };
......
...@@ -198,13 +198,13 @@ static void mlx5_lag_fib_update(struct work_struct *work) ...@@ -198,13 +198,13 @@ static void mlx5_lag_fib_update(struct work_struct *work)
/* Protect internal structures from changes */ /* Protect internal structures from changes */
rtnl_lock(); rtnl_lock();
switch (fib_work->event) { switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL: case FIB_EVENT_ENTRY_DEL:
mlx5_lag_fib_route_event(ldev, fib_work->event, mlx5_lag_fib_route_event(ldev, fib_work->event,
fib_work->fen_info.fi); fib_work->fen_info.fi);
fib_info_put(fib_work->fen_info.fi); fib_info_put(fib_work->fen_info.fi);
break; break;
case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL: case FIB_EVENT_NH_DEL:
fib_nh = fib_work->fnh_info.fib_nh; fib_nh = fib_work->fnh_info.fib_nh;
mlx5_lag_fib_nexthop_event(ldev, mlx5_lag_fib_nexthop_event(ldev,
...@@ -255,7 +255,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, ...@@ -255,7 +255,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
switch (event) { switch (event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL: case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info, fen_info = container_of(info, struct fib_entry_notifier_info,
info); info);
...@@ -278,7 +278,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, ...@@ -278,7 +278,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
*/ */
fib_info_hold(fib_work->fen_info.fi); fib_info_hold(fib_work->fen_info.fi);
break; break;
case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL: case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info, fnh_info = container_of(info, struct fib_nh_notifier_info,
info); info);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/xarray.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/eq.h" #include "lib/eq.h"
...@@ -73,15 +74,45 @@ enum { ...@@ -73,15 +74,45 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
}; };
static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
{
struct rb_root *root;
int err;
root = xa_load(&dev->priv.page_root_xa, func_id);
if (root)
return root;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
}
*root = RB_ROOT;
return root;
}
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{ {
struct rb_root *root = &dev->priv.page_root;
struct rb_node **new = &root->rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct rb_root *root;
struct rb_node **new;
struct fw_page *nfp; struct fw_page *nfp;
struct fw_page *tfp; struct fw_page *tfp;
int i; int i;
root = page_root_per_func_id(dev, func_id);
if (IS_ERR(root))
return PTR_ERR(root);
new = &root->rb_node;
while (*new) { while (*new) {
parent = *new; parent = *new;
tfp = rb_entry(parent, struct fw_page, rb_node); tfp = rb_entry(parent, struct fw_page, rb_node);
...@@ -111,13 +142,20 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u ...@@ -111,13 +142,20 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
return 0; return 0;
} }
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
u32 func_id)
{ {
struct rb_root *root = &dev->priv.page_root;
struct rb_node *tmp = root->rb_node;
struct fw_page *result = NULL; struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp; struct fw_page *tfp;
root = xa_load(&dev->priv.page_root_xa, func_id);
if (WARN_ON_ONCE(!root))
return NULL;
tmp = root->rb_node;
while (tmp) { while (tmp) {
tfp = rb_entry(tmp, struct fw_page, rb_node); tfp = rb_entry(tmp, struct fw_page, rb_node);
if (tfp->addr < addr) { if (tfp->addr < addr) {
...@@ -191,7 +229,13 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) ...@@ -191,7 +229,13 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
bool in_free_list) bool in_free_list)
{ {
rb_erase(&fwp->rb_node, &dev->priv.page_root); struct rb_root *root;
root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
if (WARN_ON_ONCE(!root))
return;
rb_erase(&fwp->rb_node, root);
if (in_free_list) if (in_free_list)
list_del(&fwp->list); list_del(&fwp->list);
dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK, dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
...@@ -200,12 +244,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, ...@@ -200,12 +244,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp); kfree(fwp);
} }
static void free_4k(struct mlx5_core_dev *dev, u64 addr) static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
{ {
struct fw_page *fwp; struct fw_page *fwp;
int n; int n;
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
if (!fwp) { if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n"); mlx5_core_warn_rl(dev, "page not found\n");
return; return;
...@@ -340,7 +384,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -340,7 +384,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_4k: out_4k:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
out_free: out_free:
kvfree(in); kvfree(in);
if (notify_fail) if (notify_fail)
...@@ -351,16 +395,19 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -351,16 +395,19 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
bool ec_function) bool ec_function)
{ {
struct rb_root *root;
struct rb_node *p; struct rb_node *p;
int npages = 0; int npages = 0;
p = rb_first(&dev->priv.page_root); root = xa_load(&dev->priv.page_root_xa, func_id);
if (WARN_ON_ONCE(!root))
return;
p = rb_first(root);
while (p) { while (p) {
struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
p = rb_next(p); p = rb_next(p);
if (fwp->func_id != func_id)
continue;
npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
free_fwp(dev, fwp, fwp->free_count); free_fwp(dev, fwp, fwp->free_count);
} }
...@@ -378,6 +425,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, ...@@ -378,6 +425,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
static int reclaim_pages_cmd(struct mlx5_core_dev *dev, static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size) u32 *in, int in_size, u32 *out, int out_size)
{ {
struct rb_root *root;
struct fw_page *fwp; struct fw_page *fwp;
struct rb_node *p; struct rb_node *p;
u32 func_id; u32 func_id;
...@@ -391,12 +439,14 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, ...@@ -391,12 +439,14 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
npages = MLX5_GET(manage_pages_in, in, input_num_entries); npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id); func_id = MLX5_GET(manage_pages_in, in, function_id);
p = rb_first(&dev->priv.page_root); root = xa_load(&dev->priv.page_root_xa, func_id);
if (WARN_ON_ONCE(!root))
return -EEXIST;
p = rb_first(root);
while (p && i < npages) { while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node); fwp = rb_entry(p, struct fw_page, rb_node);
p = rb_next(p); p = rb_next(p);
if (fwp->func_id != func_id)
continue;
MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
i++; i++;
...@@ -430,7 +480,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, ...@@ -430,7 +480,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
MLX5_SET(manage_pages_in, in, input_num_entries, npages); MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
func_id, npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) { if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
...@@ -446,7 +497,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, ...@@ -446,7 +497,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
} }
for (i = 0; i < num_claimed; i++) for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
if (nclaimed) if (nclaimed)
*nclaimed = num_claimed; *nclaimed = num_claimed;
...@@ -560,35 +611,49 @@ static int optimal_reclaimed_pages(void) ...@@ -560,35 +611,49 @@ static int optimal_reclaimed_pages(void)
return ret; return ret;
} }
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
struct rb_root *root, u16 func_id)
{ {
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
struct fw_page *fwp;
struct rb_node *p;
int nclaimed = 0;
int err = 0;
do { while (!RB_EMPTY_ROOT(root)) {
p = rb_first(&dev->priv.page_root); int nclaimed;
if (p) { int err;
fwp = rb_entry(p, struct fw_page, rb_node);
err = reclaim_pages(dev, fwp->func_id, err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
optimal_reclaimed_pages(), &nclaimed, mlx5_core_is_ecpf(dev));
&nclaimed, mlx5_core_is_ecpf(dev)); if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
if (err) { err, func_id);
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", return err;
err);
return err;
}
if (nclaimed)
end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
} }
if (nclaimed)
end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
if (time_after(jiffies, end)) { if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
break; break;
} }
} while (p); }
return 0;
}
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
{
struct rb_root *root;
unsigned long id;
void *entry;
xa_for_each(&dev->priv.page_root_xa, id, entry) {
root = entry;
mlx5_reclaim_root_pages(dev, root, id);
xa_erase(&dev->priv.page_root_xa, id);
kfree(root);
}
WARN_ON(!xa_empty(&dev->priv.page_root_xa));
WARN(dev->priv.fw_pages, WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n", "FW pages counter is %d after reclaiming all pages\n",
...@@ -605,17 +670,19 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) ...@@ -605,17 +670,19 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
int mlx5_pagealloc_init(struct mlx5_core_dev *dev) int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{ {
dev->priv.page_root = RB_ROOT;
INIT_LIST_HEAD(&dev->priv.free_list); INIT_LIST_HEAD(&dev->priv.free_list);
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
if (!dev->priv.pg_wq) if (!dev->priv.pg_wq)
return -ENOMEM; return -ENOMEM;
xa_init(&dev->priv.page_root_xa);
return 0; return 0;
} }
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{ {
xa_destroy(&dev->priv.page_root_xa);
destroy_workqueue(dev->priv.pg_wq); destroy_workqueue(dev->priv.pg_wq);
} }
......
...@@ -395,7 +395,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -395,7 +395,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
/* Check that all mask fields were consumed */ /* Check that all mask fields were consumed */
for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) { for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
if (((u8 *)&mask)[i] != 0) { if (((u8 *)&mask)[i] != 0) {
mlx5dr_err(dmn, "Mask contains unsupported parameters\n"); mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} }
...@@ -474,14 +474,13 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher) ...@@ -474,14 +474,13 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
int ret; int ret;
next_matcher = NULL; next_matcher = NULL;
if (!list_empty(&tbl->matcher_list)) list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { if (tmp_matcher->prio >= matcher->prio) {
if (tmp_matcher->prio >= matcher->prio) { next_matcher = tmp_matcher;
next_matcher = tmp_matcher; break;
break;
}
first = false;
} }
first = false;
}
prev_matcher = NULL; prev_matcher = NULL;
if (next_matcher && !first) if (next_matcher && !first)
......
...@@ -574,9 +574,8 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste, ...@@ -574,9 +574,8 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
{ {
struct mlx5dr_rule_member *rule_mem; struct mlx5dr_rule_member *rule_mem;
if (!list_empty(&ste->rule_list)) list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) rule_mem->ste = new_ste;
rule_mem->ste = new_ste;
} }
static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
......
...@@ -110,7 +110,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, ...@@ -110,7 +110,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode)) if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
break; break;
/* fall through */ fallthrough;
case MLX5_CAP_INLINE_MODE_L2: case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2; *min_inline_mode = MLX5_INLINE_MODE_L2;
break; break;
......
...@@ -541,7 +541,7 @@ struct mlx5_priv { ...@@ -541,7 +541,7 @@ struct mlx5_priv {
/* pages stuff */ /* pages stuff */
struct mlx5_nb pg_nb; struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq; struct workqueue_struct *pg_wq;
struct rb_root page_root; struct xarray page_root_xa;
int fw_pages; int fw_pages;
atomic_t reg_pages; atomic_t reg_pages;
struct list_head free_list; struct list_head free_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment