Commit 5e4a7ccc authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Fix extra deref in rxe_rcv_mcast_pkt()

rxe_rcv_mcast_pkt() dropped a reference to ib_device when no error
occurred causing an underflow on the reference counter.  This code is
cleaned up to be clearer and easier to read.

Fixes: 899aba89 ("RDMA/rxe: Fix FIXME in rxe_udp_encap_recv()")
Link: https://lore.kernel.org/r/20210304192048.2958-1-rpearson@hpe.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 21e27ac8
...@@ -237,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) ...@@ -237,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
struct rxe_mc_elem *mce; struct rxe_mc_elem *mce;
struct rxe_qp *qp; struct rxe_qp *qp;
union ib_gid dgid; union ib_gid dgid;
struct sk_buff *per_qp_skb;
struct rxe_pkt_info *per_qp_pkt;
int err; int err;
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
...@@ -250,10 +248,15 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) ...@@ -250,10 +248,15 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
/* lookup mcast group corresponding to mgid, takes a ref */ /* lookup mcast group corresponding to mgid, takes a ref */
mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid); mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
if (!mcg) if (!mcg)
goto err1; /* mcast group not registered */ goto drop; /* mcast group not registered */
spin_lock_bh(&mcg->mcg_lock); spin_lock_bh(&mcg->mcg_lock);
/* this is unreliable datagram service so we let
* failures to deliver a multicast packet to a
* single QP happen and just move on and try
* the rest of them on the list
*/
list_for_each_entry(mce, &mcg->qp_list, qp_list) { list_for_each_entry(mce, &mcg->qp_list, qp_list) {
qp = mce->qp; qp = mce->qp;
...@@ -266,39 +269,47 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) ...@@ -266,39 +269,47 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
if (err) if (err)
continue; continue;
/* for all but the last qp create a new clone of the /* for all but the last QP create a new clone of the
* skb and pass to the qp. If an error occurs in the * skb and pass to the QP. Pass the original skb to
* checks for the last qp in the list we need to * the last QP in the list.
* free the skb since it hasn't been passed on to
* rxe_rcv_pkt() which would free it later.
*/ */
if (mce->qp_list.next != &mcg->qp_list) { if (mce->qp_list.next != &mcg->qp_list) {
per_qp_skb = skb_clone(skb, GFP_ATOMIC); struct sk_buff *cskb;
if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) { struct rxe_pkt_info *cpkt;
kfree_skb(per_qp_skb);
continue;
}
} else {
per_qp_skb = skb;
/* show we have consumed the skb */
skb = NULL;
}
if (unlikely(!per_qp_skb)) cskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!cskb))
continue; continue;
per_qp_pkt = SKB_TO_PKT(per_qp_skb); if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
per_qp_pkt->qp = qp; kfree_skb(cskb);
break;
}
cpkt = SKB_TO_PKT(cskb);
cpkt->qp = qp;
rxe_add_ref(qp); rxe_add_ref(qp);
rxe_rcv_pkt(per_qp_pkt, per_qp_skb); rxe_rcv_pkt(cpkt, cskb);
} else {
pkt->qp = qp;
rxe_add_ref(qp);
rxe_rcv_pkt(pkt, skb);
skb = NULL; /* mark consumed */
}
} }
spin_unlock_bh(&mcg->mcg_lock); spin_unlock_bh(&mcg->mcg_lock);
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */ rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
err1: if (likely(!skb))
/* free skb if not consumed */ return;
/* This only occurs if one of the checks fails on the last
* QP in the list above
*/
drop:
kfree_skb(skb); kfree_skb(skb);
ib_device_put(&rxe->ib_dev); ib_device_put(&rxe->ib_dev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment