Commit 292dd654 authored by David S. Miller's avatar David S. Miller

Merge branch 'mellanox-net'

Or Gerlitz says:

====================
mlx4 driver encapsulation/steering fixes

The 1st patch fixes a bug in the TX path that supports offloading the
TX checksum of (VXLAN) encapsulated TCP packets. It turns out that the
bug is revealed only when the receiver runs in non-offloaded mode, so
we somehow missed it so far... please queue it for -stable >= 3.14

The 2nd patch makes sure not to leak steering entry on error flow,
please queue it to 3.17-stable
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9cc233fb 571e1b2c
...@@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, ...@@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
&mflow->reg_id[i]); &mflow->reg_id[i]);
if (err) if (err)
goto err_free; goto err_create_flow;
i++; i++;
} }
if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
if (err) if (err)
goto err_free; goto err_create_flow;
i++;
} }
return &mflow->ibflow; return &mflow->ibflow;
err_create_flow:
while (i) {
(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
i--;
}
err_free: err_free:
kfree(mflow); kfree(mflow);
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
* whether LSO is used */ * whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | if (!skb->encapsulation)
MLX4_WQE_CTRL_TCP_UDP_CSUM); tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
else
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
ring->tx_csum++; ring->tx_csum++;
} }
......
...@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, ...@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
cur->ib.dst_gid_msk); cur->ib.dst_gid_msk);
break; break;
case MLX4_NET_TRANS_RULE_ID_VXLAN:
len += snprintf(buf + len, BUF_SIZE - len,
"VNID = %d ", be32_to_cpu(cur->vxlan.vni));
break;
case MLX4_NET_TRANS_RULE_ID_IPV6: case MLX4_NET_TRANS_RULE_ID_IPV6:
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment