Commit 7d6541fb authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-05-14' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-05-14

Misc update for mlx5e netdevice driver

From Gal Pressman:
 - Remove MLX5E_TEST_BIT macros and use test_bit instead
 - Use __set_bit when possible

From Eran Ben Elisha:
  - Improve debug print on initial RX posting timeout

From Or Gerlitz:
 - Support offloaded TC flows with no matches on headers
 - mlx5e TC cleanups

Trivial cleanups From Roi, Tariq and Saeed:
  - Use bool as return type for mlx5e_xdp_handle
  - Use u8 instead of int for LRO number of segments
  - Skip redundant checks when providing NUD lastuse feedback
  - Remove redundant vport context vlan update
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7ed19eb9 0e5c04f6
......@@ -288,8 +288,6 @@ enum {
MLX5E_RQ_STATE_AM,
};
#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr))
struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
......
......@@ -49,7 +49,7 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
if (sq->state & BIT(MLX5E_SQ_STATE_TLS)) {
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
if (unlikely(!skb))
return NULL;
......@@ -57,7 +57,7 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
if (unlikely(!skb))
return NULL;
......
......@@ -277,7 +277,6 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_cvlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
priv->fs.vlan.active_cvlans_rule[vid] = NULL;
......
......@@ -747,23 +747,24 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
{
unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
struct mlx5e_channel *c = rq->channel;
struct mlx5_wq_ll *wq = &rq->wq;
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
while (time_before(jiffies, exp_time)) {
do {
if (wq->cur_sz >= min_wqes)
return 0;
msleep(20);
}
} while (time_before(jiffies, exp_time));
netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
c->ix, rq->rqn, wq->cur_sz, min_wqes);
netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
rq->rqn, wq->cur_sz, min_wqes);
return -ETIMEDOUT;
}
......@@ -819,7 +820,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
goto err_destroy_rq;
if (params->rx_dim_enabled)
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
return 0;
......@@ -2128,13 +2129,11 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
int err = 0;
int i;
for (i = 0; i < chs->num; i++) {
err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
if (err)
break;
}
for (i = 0; i < chs->num; i++)
err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
err ? 0 : 20000);
return err;
return err ? -ETIMEDOUT : 0;
}
static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
......
......@@ -681,8 +681,8 @@ static int mlx5e_rep_open(struct net_device *dev)
goto unlock;
if (!mlx5_modify_vport_admin_state(priv->mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
......@@ -699,8 +699,8 @@ static int mlx5e_rep_close(struct net_device *dev)
mutex_lock(&priv->state_lock);
mlx5_modify_vport_admin_state(priv->mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
......
......@@ -450,7 +450,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
struct mlx5_wq_ll *wq = &rq->wq;
int err;
if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
if (mlx5_wq_ll_is_full(wq))
......@@ -508,7 +508,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
......@@ -525,7 +525,7 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
......@@ -681,11 +681,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
struct net_device *netdev = rq->netdev;
int lro_num_seg;
skb->mac_len = ETH_HLEN;
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
......@@ -808,9 +807,9 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
}
/* returns true if packet was consumed by xdp */
static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len)
static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len)
{
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
struct xdp_buff xdp;
......@@ -1133,7 +1132,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5_cqe64 *cqe;
int work_done = 0;
if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (cq->decmprs_left)
......@@ -1186,7 +1185,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
......
......@@ -450,7 +450,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_txqsq, cq);
if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
......
......@@ -48,7 +48,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{
struct net_dim_sample dim_sample;
if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_AM)))
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return;
net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes,
......@@ -60,7 +60,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{
struct net_dim_sample dim_sample;
if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_AM)))
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return;
net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes,
......
......@@ -227,6 +227,13 @@ enum {
SET_VLAN_INSERT = BIT(1)
};
enum mlx5_flow_match_level {
MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
};
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
......@@ -238,6 +245,7 @@ struct mlx5_esw_flow_attr {
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
u8 match_level;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
......
......@@ -91,8 +91,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS;
if (attr->match_level == MLX5_MATCH_NONE)
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
else
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment