Commit 0d5c56a2 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2020-05-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-05-09

This series includes updates to mlx5 netdev driver and bonding updates
to support getting the next active tx slave.

1) merge commit with mlx5-next that includes bonding updates from Maor
   Bonding: Add support to get xmit slave
2) Maxim makes some general code improvements to TX data path
3) Tariq makes some general code improvements to kTLS and mlx5 accel layer
in preparation for mlx5 TLS RX.
====================
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2c674bec 28bff095
...@@ -1331,11 +1331,11 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, ...@@ -1331,11 +1331,11 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
return bond_tx_drop(bond->dev, skb); return bond_tx_drop(bond->dev, skb);
} }
netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
struct sk_buff *skb)
{ {
struct bonding *bond = netdev_priv(bond_dev);
struct ethhdr *eth_data;
struct slave *tx_slave = NULL; struct slave *tx_slave = NULL;
struct ethhdr *eth_data;
u32 hash_index; u32 hash_index;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
...@@ -1357,7 +1357,7 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1357,7 +1357,7 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves; struct bond_up_slave *slaves;
unsigned int count; unsigned int count;
slaves = rcu_dereference(bond->slave_arr); slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0; count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) if (likely(count))
tx_slave = slaves->arr[hash_index % tx_slave = slaves->arr[hash_index %
...@@ -1366,20 +1366,29 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1366,20 +1366,29 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break; break;
} }
} }
return bond_do_alb_xmit(skb, bond, tx_slave); return tx_slave;
} }
netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct ethhdr *eth_data; struct slave *tx_slave;
tx_slave = bond_xmit_tlb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave);
}
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff); static const __be32 ip_bcast = htonl(0xffffffff);
int hash_size = 0; struct slave *tx_slave = NULL;
const u8 *hash_start = NULL;
bool do_tx_balance = true; bool do_tx_balance = true;
struct ethhdr *eth_data;
u32 hash_index = 0; u32 hash_index = 0;
const u8 *hash_start = NULL; int hash_size = 0;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
eth_data = eth_hdr(skb); eth_data = eth_hdr(skb);
...@@ -1491,14 +1500,22 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1491,14 +1500,22 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves; struct bond_up_slave *slaves;
unsigned int count; unsigned int count;
slaves = rcu_dereference(bond->slave_arr); slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0; count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) if (likely(count))
tx_slave = slaves->arr[bond_xmit_hash(bond, skb) % tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
count]; count];
} }
} }
return tx_slave;
}
netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *tx_slave = NULL;
tx_slave = bond_xmit_alb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave); return bond_do_alb_xmit(skb, bond, tx_slave);
} }
......
This diff is collapsed.
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "en.h"
static inline bool is_metadata_hdr_valid(struct sk_buff *skb) static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{ {
......
...@@ -339,16 +339,6 @@ struct mlx5e_cq_decomp { ...@@ -339,16 +339,6 @@ struct mlx5e_cq_decomp {
u16 wqe_counter; u16 wqe_counter;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_tx_wqe_info {
struct sk_buff *skb;
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
#ifdef CONFIG_MLX5_EN_TLS
struct page *resync_dump_frag_page;
#endif
};
enum mlx5e_dma_map_type { enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE, MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE MLX5E_DMA_MAP_PAGE
...@@ -370,18 +360,6 @@ enum { ...@@ -370,18 +360,6 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX, MLX5E_SQ_STATE_PENDING_XSK_TX,
}; };
struct mlx5e_icosq_wqe_info {
u8 opcode;
u8 num_wqebbs;
/* Auxiliary data for different opcodes. */
union {
struct {
struct mlx5e_rq *rq;
} umr;
};
};
struct mlx5e_txqsq { struct mlx5e_txqsq {
/* data path */ /* data path */
...@@ -484,11 +462,6 @@ struct mlx5e_xdp_info_fifo { ...@@ -484,11 +462,6 @@ struct mlx5e_xdp_info_fifo {
u32 mask; u32 mask;
}; };
struct mlx5e_xdp_wqe_info {
u8 num_wqebbs;
u8 num_pkts;
};
struct mlx5e_xdp_mpwqe { struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */ /* Current MPWQE session */
struct mlx5e_tx_wqe *wqe; struct mlx5e_tx_wqe *wqe;
...@@ -919,7 +892,7 @@ void mlx5e_build_ptys2ethtool_map(void); ...@@ -919,7 +892,7 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev); struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq); void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
......
...@@ -27,6 +27,11 @@ ...@@ -27,6 +27,11 @@
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
};
static inline bool static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{ {
...@@ -81,6 +86,16 @@ mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) ...@@ -81,6 +86,16 @@ mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
return wqe; return wqe;
} }
struct mlx5e_tx_wqe_info {
struct sk_buff *skb;
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
#ifdef CONFIG_MLX5_EN_TLS
struct page *resync_dump_frag_page;
#endif
};
static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
...@@ -109,6 +124,18 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) ...@@ -109,6 +124,18 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi; return pi;
} }
struct mlx5e_icosq_wqe_info {
u8 wqe_type;
u8 num_wqebbs;
/* Auxiliary data for different wqe types. */
union {
struct {
struct mlx5e_rq *rq;
} umr;
};
};
static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
...@@ -125,7 +152,7 @@ static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) ...@@ -125,7 +152,7 @@ static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
for (; wi < edge_wi; wi++) { for (; wi < edge_wi; wi++) {
*wi = (struct mlx5e_icosq_wqe_info) { *wi = (struct mlx5e_icosq_wqe_info) {
.opcode = MLX5_OPCODE_NOP, .wqe_type = MLX5E_ICOSQ_WQE_NOP,
.num_wqebbs = 1, .num_wqebbs = 1,
}; };
mlx5e_post_nop(wq, sq->sqn, &sq->pc); mlx5e_post_nop(wq, sq->sqn, &sq->pc);
......
...@@ -137,6 +137,11 @@ mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session) ...@@ -137,6 +137,11 @@ mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS; session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
} }
struct mlx5e_xdp_wqe_info {
u8 num_wqebbs;
u8 num_pkts;
};
static inline void static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd, struct mlx5e_xdp_xmit_data *xdptxd,
......
...@@ -102,33 +102,49 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) ...@@ -102,33 +102,49 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
udp_hdr(skb)->len = htons(payload_len); udp_hdr(skb)->len = htons(payload_len);
} }
static inline struct sk_buff * struct mlx5e_accel_tx_state {
mlx5e_accel_handle_tx(struct sk_buff *skb, #ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_tx_tls_state tls;
#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
struct mlx5e_txqsq *sq, struct mlx5e_txqsq *sq,
struct net_device *dev, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe, struct mlx5e_accel_tx_state *state)
u16 *pi)
{ {
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
mlx5e_udp_gso_handle_tx_skb(skb);
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi); /* May send SKBs and WQEs. */
if (unlikely(!skb)) if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
return NULL; return false;
} }
#endif #endif
return true;
}
static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_state *state)
{
#ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb); if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
if (unlikely(!skb)) return false;
return NULL;
} }
#endif #endif
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) return true;
mlx5e_udp_gso_handle_tx_skb(skb);
return skb;
} }
#endif /* __MLX5E_EN_ACCEL_H__ */ #endif /* __MLX5E_EN_ACCEL_H__ */
...@@ -233,11 +233,10 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb, ...@@ -233,11 +233,10 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq)); ntohs(mdata->content.tx.seq));
} }
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
struct mlx5e_tx_wqe *wqe, struct mlx5_wqe_eth_seg *eseg,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata; struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry; struct mlx5e_ipsec_sa_entry *sa_entry;
...@@ -245,7 +244,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, ...@@ -245,7 +244,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct sec_path *sp; struct sec_path *sp;
if (!xo) if (!xo)
return skb; return true;
sp = skb_sec_path(skb); sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) { if (unlikely(sp->len != 1)) {
...@@ -276,16 +275,16 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, ...@@ -276,16 +275,16 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata); atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop; goto drop;
} }
mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo); mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo); sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo); mlx5e_ipsec_set_metadata(skb, mdata, xo);
return skb; return true;
drop: drop:
kfree_skb(skb); kfree_skb(skb);
return NULL; return false;
} }
static inline struct xfrm_state * static inline struct xfrm_state *
......
...@@ -52,8 +52,8 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, ...@@ -52,8 +52,8 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
struct mlx5e_tx_wqe *wqe, struct mlx5_wqe_eth_seg *eseg,
struct sk_buff *skb); struct sk_buff *skb);
#endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* CONFIG_MLX5_EN_IPSEC */
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h> #include <net/tls.h>
#include "accel/tls.h" #include "accel/tls.h"
#include "en_accel/tls_rxtx.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \ (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
...@@ -95,10 +96,9 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx) ...@@ -95,10 +96,9 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx); void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct mlx5e_txqsq *sq, struct sk_buff *skb, int datalen,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
struct mlx5e_tx_wqe **wqe, u16 *pi);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi, struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc); u32 *dma_fifo_cc);
......
...@@ -108,10 +108,11 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq, ...@@ -108,10 +108,11 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq,
{ {
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
memset(wi, 0, sizeof(*wi)); *wi = (struct mlx5e_tx_wqe_info) {
wi->num_wqebbs = num_wqebbs; .num_wqebbs = num_wqebbs,
wi->num_bytes = num_bytes; .num_bytes = num_bytes,
wi->resync_dump_frag_page = page; .resync_dump_frag_page = page,
};
} }
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
...@@ -134,14 +135,14 @@ post_static_params(struct mlx5e_txqsq *sq, ...@@ -134,14 +135,14 @@ post_static_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx, struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence) bool fence)
{ {
u16 pi, num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS;
struct mlx5e_umr_wqe *umr_wqe; struct mlx5e_umr_wqe *umr_wqe;
u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi); umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL); tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
sq->pc += MLX5E_KTLS_STATIC_WQEBBS; sq->pc += num_wqebbs;
} }
static void static void
...@@ -149,14 +150,14 @@ post_progress_params(struct mlx5e_txqsq *sq, ...@@ -149,14 +150,14 @@ post_progress_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx, struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence) bool fence)
{ {
u16 pi, num_wqebbs = MLX5E_KTLS_PROGRESS_WQEBBS;
struct mlx5e_tx_wqe *wqe; struct mlx5e_tx_wqe *wqe;
u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi); wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL); tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; sq->pc += num_wqebbs;
} }
static void static void
...@@ -166,8 +167,6 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, ...@@ -166,8 +167,6 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
{ {
bool progress_fence = skip_static_post || !fence_first_post; bool progress_fence = skip_static_post || !fence_first_post;
mlx5e_txqsq_get_next_pi(sq, MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS);
if (!skip_static_post) if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post); post_static_params(sq, priv_tx, fence_first_post);
...@@ -274,6 +273,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir ...@@ -274,6 +273,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
int fsz; int fsz;
u16 pi; u16 pi;
BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi); wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
...@@ -342,7 +342,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -342,7 +342,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_sq_stats *stats = sq->stats; struct mlx5e_sq_stats *stats = sq->stats;
enum mlx5e_ktls_sync_retval ret; enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {}; struct tx_sync_info info = {};
u8 num_wqebbs;
int i = 0; int i = 0;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info); ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
...@@ -371,9 +370,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -371,9 +370,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
return MLX5E_KTLS_SYNC_DONE; return MLX5E_KTLS_SYNC_DONE;
} }
num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
for (; i < info.nr_frags; i++) { for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0; unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i]; skb_frag_t *f = &info.frags[i];
...@@ -413,35 +409,18 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -413,35 +409,18 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
return MLX5E_KTLS_SYNC_FAIL; return MLX5E_KTLS_SYNC_FAIL;
} }
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct mlx5e_txqsq *sq, struct sk_buff *skb, int datalen,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
struct mlx5e_tx_wqe **wqe, u16 *pi)
{ {
struct mlx5e_ktls_offload_context_tx *priv_tx; struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats; struct mlx5e_sq_stats *stats = sq->stats;
struct mlx5_wqe_ctrl_seg *cseg;
struct tls_context *tls_ctx;
int datalen;
u32 seq; u32 seq;
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
goto out;
tls_ctx = tls_get_ctx(skb->sk);
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) { if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false); mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
*pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
*wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
stats->tls_ctx++; stats->tls_ctx++;
} }
...@@ -452,31 +431,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, ...@@ -452,31 +431,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
switch (ret) { switch (ret) {
case MLX5E_KTLS_SYNC_DONE: case MLX5E_KTLS_SYNC_DONE:
*pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
*wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
break; break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA: case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
if (likely(!skb->decrypted)) if (likely(!skb->decrypted))
goto out; goto out;
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
/* fall-through */ /* fall-through */
default: /* MLX5E_KTLS_SYNC_FAIL */ case MLX5E_KTLS_SYNC_FAIL:
goto err_out; goto err_out;
} }
} }
priv_tx->expected_seq = seq + datalen; priv_tx->expected_seq = seq + datalen;
cseg = &(*wqe)->ctrl; state->tls_tisn = priv_tx->tisn;
cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen; stats->tls_encrypted_bytes += datalen;
out: out:
return skb; return true;
err_out: err_out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NULL; return false;
} }
...@@ -184,18 +184,17 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, ...@@ -184,18 +184,17 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
nskb->queue_mapping = skb->queue_mapping; nskb->queue_mapping = skb->queue_mapping;
} }
static struct sk_buff * static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi,
struct mlx5e_tls *tls) struct mlx5e_tls *tls)
{ {
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
struct mlx5e_tx_wqe *wqe;
struct sync_info info; struct sync_info info;
struct sk_buff *nskb; struct sk_buff *nskb;
int linear_len = 0; int linear_len = 0;
int headln; int headln;
u16 pi;
int i; int i;
sq->stats->tls_ooo++; sq->stats->tls_ooo++;
...@@ -217,7 +216,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, ...@@ -217,7 +216,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
if (likely(payload <= -info.sync_len)) if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload /* SKB payload doesn't require offload
*/ */
return skb; return true;
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out; goto err_out;
...@@ -247,21 +246,19 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, ...@@ -247,21 +246,19 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len; sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn)); cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true); pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
*pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); wqe = MLX5E_TX_FETCH_WQE(sq, pi);
*wqe = MLX5E_TX_FETCH_WQE(sq, *pi); mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
return skb;
return true;
err_out: err_out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NULL; return false;
} }
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context; struct mlx5e_tls_offload_context_tx *context;
...@@ -270,41 +267,45 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, ...@@ -270,41 +267,45 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen; int datalen;
u32 skb_seq; u32 skb_seq;
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
goto out;
}
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out; return true;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen) if (!datalen)
goto out; return true;
tls_ctx = tls_get_ctx(skb->sk); tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != netdev)) if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto out; goto err_out;
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
skb_seq = ntohl(tcp_hdr(skb)->seq); skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx); context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq; expected_seq = context->expected_seq;
if (unlikely(expected_seq != skb_seq)) { if (unlikely(expected_seq != skb_seq))
skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls); return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
goto out;
}
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL; return false;
goto out;
} }
context->expected_seq = skb_seq + datalen; context->expected_seq = skb_seq + datalen;
out: return true;
return skb;
err_out:
dev_kfree_skb_any(skb);
return false;
}
void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state)
{
cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
} }
static int tls_update_resync_sn(struct net_device *netdev, static int tls_update_resync_sn(struct net_device *netdev,
......
...@@ -40,11 +40,14 @@ ...@@ -40,11 +40,14 @@
#include "en.h" #include "en.h"
#include "en/txrx.h" #include "en/txrx.h"
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_accel_tx_tls_state {
struct mlx5e_txqsq *sq, u32 tls_tisn;
struct sk_buff *skb, };
struct mlx5e_tx_wqe **wqe,
u16 *pi); bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt); u32 *cqe_bcnt);
......
...@@ -1364,13 +1364,12 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) ...@@ -1364,13 +1364,12 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
/* last doorbell out, godspeed .. */ /* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *nop; struct mlx5e_tx_wqe *nop;
wi = &sq->db.wqe_info[pi]; sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
.num_wqebbs = 1,
};
memset(wi, 0, sizeof(*wi));
wi->num_wqebbs = 1;
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
} }
...@@ -1482,20 +1481,21 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -1482,20 +1481,21 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
/* Pre initialize fixed WQE fields */ /* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg; struct mlx5_wqe_data_seg *dseg;
sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
.num_wqebbs = 1,
.num_pkts = 1,
};
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
wi->num_wqebbs = 1;
wi->num_pkts = 1;
} }
} }
......
...@@ -505,9 +505,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -505,9 +505,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
MLX5_OPCODE_UMR); MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
sq->db.wqe_info[pi].opcode = MLX5_OPCODE_UMR; sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
sq->db.wqe_info[pi].num_wqebbs = MLX5E_UMR_WQEBBS; .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
sq->db.wqe_info[pi].umr.rq = rq; .num_wqebbs = MLX5E_UMR_WQEBBS,
.umr.rq = rq,
};
sq->pc += MLX5E_UMR_WQEBBS; sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl; sq->doorbell_cseg = &umr_wqe->ctrl;
...@@ -616,15 +619,18 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -616,15 +619,18 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break; break;
} }
if (likely(wi->opcode == MLX5_OPCODE_UMR)) switch (wi->wqe_type) {
case MLX5E_ICOSQ_WQE_UMR_RX:
wi->umr.rq->mpwqe.umr_completed++; wi->umr.rq->mpwqe.umr_completed++;
else if (unlikely(wi->opcode != MLX5_OPCODE_NOP)) break;
case MLX5E_ICOSQ_WQE_NOP:
break;
default:
netdev_WARN_ONCE(cq->channel->netdev, netdev_WARN_ONCE(cq->channel->netdev,
"Bad OPCODE in ICOSQ WQE info: 0x%x\n", "Bad WQE type in ICOSQ WQE info: 0x%x\n",
wi->opcode); wi->wqe_type);
}
} while (!last_wqe); } while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
sq->cc = sqcc; sq->cc = sqcc;
......
...@@ -265,7 +265,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -265,7 +265,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
} }
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more) struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
...@@ -373,32 +373,38 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -373,32 +373,38 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more); num_dma, wi, cseg, xmit_more);
return NETDEV_TX_OK; return;
err_drop: err_drop:
stats->dropped++; stats->dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} }
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_accel_tx_state accel = {};
struct mlx5e_tx_wqe *wqe; struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq; struct mlx5e_txqsq *sq;
u16 pi; u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)]; sq = priv->txq2sq[skb_get_queue_mapping(skb)];
/* May send SKBs and WQEs. */
if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
goto out;
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
wqe = MLX5E_TX_FETCH_WQE(sq, pi); wqe = MLX5E_TX_FETCH_WQE(sq, pi);
/* might send skbs and update wqe and pi */ /* May update the WQE, but may not post other WQEs. */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
if (unlikely(!skb)) goto out;
return NETDEV_TX_OK;
return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more()); mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
out:
return NETDEV_TX_OK;
} }
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
...@@ -568,9 +574,8 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, ...@@ -568,9 +574,8 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
} }
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
bool xmit_more)
{ {
struct mlx5i_tx_wqe *wqe; struct mlx5i_tx_wqe *wqe;
...@@ -648,12 +653,10 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -648,12 +653,10 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more); num_dma, wi, cseg, xmit_more);
return NETDEV_TX_OK; return;
err_drop: err_drop:
stats->dropped++; stats->dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} }
#endif #endif
...@@ -78,8 +78,11 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) ...@@ -78,8 +78,11 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
struct mlx5e_tx_wqe *nopwqe; struct mlx5e_tx_wqe *nopwqe;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
sq->db.wqe_info[pi].opcode = MLX5_OPCODE_NOP; sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
sq->db.wqe_info[pi].num_wqebbs = 1; .wqe_type = MLX5E_ICOSQ_WQE_NOP,
.num_wqebbs = 1,
};
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
} }
......
...@@ -663,7 +663,9 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, ...@@ -663,7 +663,9 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
struct mlx5_ib_ah *mah = to_mah(address); struct mlx5_ib_ah *mah = to_mah(address);
struct mlx5i_priv *ipriv = epriv->ppriv; struct mlx5i_priv *ipriv = epriv->ppriv;
return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
return NETDEV_TX_OK;
} }
static void mlx5i_set_pkey_index(struct net_device *netdev, int id) static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
......
...@@ -113,9 +113,8 @@ struct mlx5i_tx_wqe { ...@@ -113,9 +113,8 @@ struct mlx5i_tx_wqe {
#define MLX5I_SQ_FETCH_WQE(sq, pi) \ #define MLX5I_SQ_FETCH_WQE(sq, pi) \
((struct mlx5i_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5i_tx_wqe))) ((struct mlx5i_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5i_tx_wqe)))
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
bool xmit_more);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* Beware of lock dependencies (preferably, no locks should be acquired * Beware of lock dependencies (preferably, no locks should be acquired
* under it). * under it).
*/ */
static DEFINE_MUTEX(lag_mutex); static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2) u8 remap_port2)
...@@ -274,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -274,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1) if (!dev0 || !dev1)
return; return;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
tracker = ldev->tracker; tracker = ldev->tracker;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
...@@ -458,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, ...@@ -458,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break; break;
} }
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev->tracker = tracker; ldev->tracker = tracker;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
if (changed) if (changed)
mlx5_queue_bond_work(ldev, 0); mlx5_queue_bond_work(ldev, 0);
...@@ -502,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ...@@ -502,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS) if (fn >= MLX5_MAX_PORTS)
return; return;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev->pf[fn].dev = dev; ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev; ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0; ldev->tracker.netdev_state[fn].link_up = 0;
...@@ -510,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ...@@ -510,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev; dev->priv.lag = ldev;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
} }
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
...@@ -525,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, ...@@ -525,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS) if (i == MLX5_MAX_PORTS)
return; return;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf)); memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL; dev->priv.lag = NULL;
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
} }
/* Must be called with intf_mutex held */ /* Must be called with intf_mutex held */
...@@ -607,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev) ...@@ -607,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
bool res; bool res;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev); res = ldev && __mlx5_lag_is_roce(ldev);
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return res; return res;
} }
...@@ -621,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev) ...@@ -621,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
bool res; bool res;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev); res = ldev && __mlx5_lag_is_active(ldev);
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return res; return res;
} }
...@@ -635,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev) ...@@ -635,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
bool res; bool res;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev); res = ldev && __mlx5_lag_is_sriov(ldev);
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return res; return res;
} }
...@@ -664,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -664,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev))) if (!(ldev && __mlx5_lag_is_roce(ldev)))
...@@ -681,12 +681,36 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -681,12 +681,36 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev); dev_hold(ndev);
unlock: unlock:
mutex_unlock(&lag_mutex); spin_unlock(&lag_lock);
return ndev; return ndev;
} }
EXPORT_SYMBOL(mlx5_lag_get_roce_netdev); EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
u8 port = 0;
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
if (ldev->pf[MLX5_LAG_P1].netdev == slave)
port = MLX5_LAG_P1;
else
port = MLX5_LAG_P2;
port = ldev->v2p_map[port];
unlock:
spin_unlock(&lag_lock);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
{ {
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
...@@ -723,7 +747,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -723,7 +747,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters); memset(values, 0, sizeof(*values) * num_counters);
mutex_lock(&lag_mutex); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) { if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS; num_ports = MLX5_MAX_PORTS;
...@@ -733,6 +757,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -733,6 +757,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1; num_ports = 1;
mdev[MLX5_LAG_P1] = dev; mdev[MLX5_LAG_P1] = dev;
} }
spin_unlock(&lag_lock);
for (i = 0; i < num_ports; ++i) { for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {}; u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
...@@ -742,14 +767,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -742,14 +767,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in, ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
out); out);
if (ret) if (ret)
goto unlock; goto free;
for (j = 0; j < num_counters; ++j) for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j])); values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
} }
unlock: free:
mutex_unlock(&lag_mutex);
kvfree(out); kvfree(out);
return ret; return ret;
} }
......
...@@ -1074,6 +1074,8 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); ...@@ -1074,6 +1074,8 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values, u64 *values,
int num_counters, int num_counters,
......
...@@ -1148,6 +1148,12 @@ struct netdev_net_notifier { ...@@ -1148,6 +1148,12 @@ struct netdev_net_notifier {
* int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
* Called to release previously enslaved netdev. * Called to release previously enslaved netdev.
* *
* struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
* struct sk_buff *skb,
* bool all_slaves);
* Get the xmit slave of master device. If all_slaves is true, function
* assume all the slaves can transmit.
*
* Feature/offload setting functions. * Feature/offload setting functions.
* netdev_features_t (*ndo_fix_features)(struct net_device *dev, * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features); * netdev_features_t features);
...@@ -1391,6 +1397,9 @@ struct net_device_ops { ...@@ -1391,6 +1397,9 @@ struct net_device_ops {
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int (*ndo_del_slave)(struct net_device *dev, int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev); struct net_device *slave_dev);
struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
netdev_features_t (*ndo_fix_features)(struct net_device *dev, netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features); netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev, int (*ndo_set_features)(struct net_device *dev,
...@@ -2745,6 +2754,9 @@ void netdev_freemem(struct net_device *dev); ...@@ -2745,6 +2754,9 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void); void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev); int init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
......
...@@ -158,6 +158,10 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char ...@@ -158,6 +158,10 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev); int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb);
struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
struct sk_buff *skb);
void bond_alb_monitor(struct work_struct *); void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
......
...@@ -200,7 +200,8 @@ struct bonding { ...@@ -200,7 +200,8 @@ struct bonding {
struct slave __rcu *curr_active_slave; struct slave __rcu *curr_active_slave;
struct slave __rcu *current_arp_slave; struct slave __rcu *current_arp_slave;
struct slave __rcu *primary_slave; struct slave __rcu *primary_slave;
struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ struct bond_up_slave __rcu *usable_slaves;
struct bond_up_slave __rcu *all_slaves;
bool force_primary; bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *, int (*recv_probe)(const struct sk_buff *, struct bonding *,
......
...@@ -7861,6 +7861,28 @@ void netdev_bonding_info_change(struct net_device *dev, ...@@ -7861,6 +7861,28 @@ void netdev_bonding_info_change(struct net_device *dev,
} }
EXPORT_SYMBOL(netdev_bonding_info_change); EXPORT_SYMBOL(netdev_bonding_info_change);
/**
* netdev_get_xmit_slave - Get the xmit slave of master device
* @skb: The packet
* @all_slaves: assume all the slaves are active
*
* The reference counters are not incremented so the caller must be
* careful with locks. The caller must hold RCU lock.
* %NULL is returned if no slave is found.
*/
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (!ops->ndo_get_xmit_slave)
return NULL;
return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
}
EXPORT_SYMBOL(netdev_get_xmit_slave);
static void netdev_adjacent_add_links(struct net_device *dev) static void netdev_adjacent_add_links(struct net_device *dev)
{ {
struct netdev_adjacent *iter; struct netdev_adjacent *iter;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment