Commit b2761c51 authored by Yishai Hadas's avatar Yishai Hadas Committed by Ben Hutchings

IB/mlx4: Fix error flow when sending mads under SRIOV

commit a6100603 upstream.

Fix mad send error flow to prevent double freeing address handles,
and leaking tx_ring entries when SRIOV is active.

If ib_mad_post_send fails, the address handle pointer in the tx_ring entry
must be set to NULL (or there will be a double-free) and tx_tail must be
incremented (or there will be a leak of tx_ring entries).
The tx_ring is handled the same way in the send-completion handler.

Fixes: 37bfc7c1 ("IB/mlx4: SR-IOV multiplex and demultiplex MADs")
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Reviewed-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
[bwh: Backported to 3.16: adjust context]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 7787e63f
...@@ -531,7 +531,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, ...@@ -531,7 +531,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock); spin_unlock(&tun_qp->tx_lock);
if (ret) if (ret)
goto out; goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah) if (tun_qp->tx_ring[tun_tx_ix].ah)
...@@ -600,9 +600,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, ...@@ -600,9 +600,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.send_flags = IB_SEND_SIGNALED; wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(src_qp, &wr, &bad_wr); ret = ib_post_send(src_qp, &wr, &bad_wr);
out: if (!ret)
if (ret) return 0;
ib_destroy_ah(ah); out:
spin_lock(&tun_qp->tx_lock);
tun_qp->tx_ix_tail++;
spin_unlock(&tun_qp->tx_lock);
tun_qp->tx_ring[tun_tx_ix].ah = NULL;
end:
ib_destroy_ah(ah);
return ret; return ret;
} }
...@@ -1256,9 +1262,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, ...@@ -1256,9 +1262,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
ret = ib_post_send(send_qp, &wr, &bad_wr); ret = ib_post_send(send_qp, &wr, &bad_wr);
if (!ret)
return 0;
spin_lock(&sqp->tx_lock);
sqp->tx_ix_tail++;
spin_unlock(&sqp->tx_lock);
sqp->tx_ring[wire_tx_ix].ah = NULL;
out: out:
if (ret) ib_destroy_ah(ah);
ib_destroy_ah(ah);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment