Commit 6ea0032f authored by David S. Miller's avatar David S. Miller

Merge branch 'reduce-open-coded-skb-next-access-for-gso-segment-walking'

Jason A. Donenfeld says:

====================
reduce open coded skb->next access for gso segment walking

This patchset introduces the skb_list_walk_safe helper macro, in order
to add some sanity to the myrid ways drivers have of walking through gso
segments. The goal is to reduce future bugs commonly caused by open
coding these sorts of things, and to in the future make it easier to
swap out the underlying list representation.

This first patch series addresses the easy uses of drivers iterating
over the returned list of skb_gso_segments, for drivers that live in
drivers/net/*. There are still other use cases to tackle later for
net/*, and after these low-hanging fruits are taken care of, I imagine
there are more subtle cases of gso segment walking that isn't just a
direct return value from skb_gso_segments, and eventually this will have
to be tackled. This series is the first in that direction.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 542d3065 66de4b17
...@@ -7874,8 +7874,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); ...@@ -7874,8 +7874,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
struct netdev_queue *txq, struct sk_buff *skb) struct netdev_queue *txq, struct sk_buff *skb)
{ {
struct sk_buff *segs, *nskb;
u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
struct sk_buff *segs, *seg, *next;
/* Estimate the number of fragments in the worst case */ /* Estimate the number of fragments in the worst case */
if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
...@@ -7898,12 +7898,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, ...@@ -7898,12 +7898,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
if (IS_ERR(segs) || !segs) if (IS_ERR(segs) || !segs)
goto tg3_tso_bug_end; goto tg3_tso_bug_end;
do { skb_list_walk_safe(segs, seg, next) {
nskb = segs; skb_mark_not_on_list(seg);
segs = segs->next; tg3_start_xmit(seg, tp->dev);
nskb->next = NULL; }
tg3_start_xmit(nskb, tp->dev);
} while (segs);
tg3_tso_bug_end: tg3_tso_bug_end:
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
......
...@@ -2892,7 +2892,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, ...@@ -2892,7 +2892,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
struct sk_buff *segs, *curr; struct sk_buff *segs, *curr, *next;
struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss; struct myri10ge_slice_state *ss;
netdev_tx_t status; netdev_tx_t status;
...@@ -2901,10 +2901,8 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, ...@@ -2901,10 +2901,8 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
if (IS_ERR(segs)) if (IS_ERR(segs))
goto drop; goto drop;
while (segs) { skb_list_walk_safe(segs, curr, next) {
curr = segs; skb_mark_not_on_list(curr);
segs = segs->next;
curr->next = NULL;
status = myri10ge_xmit(curr, dev); status = myri10ge_xmit(curr, dev);
if (status != 0) { if (status != 0) {
dev_kfree_skb_any(curr); dev_kfree_skb_any(curr);
......
...@@ -307,12 +307,9 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, ...@@ -307,12 +307,9 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
skb = segments; skb = segments;
while (skb) { skb_list_walk_safe(skb, skb, next) {
next = skb->next; skb_mark_not_on_list(skb);
skb->next = NULL;
efx_enqueue_skb(tx_queue, skb); efx_enqueue_skb(tx_queue, skb);
skb = next;
} }
return 0; return 0;
......
...@@ -1223,7 +1223,7 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, ...@@ -1223,7 +1223,7 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
{ {
struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct sk_buff *segs; struct sk_buff *segs, *curr, *next;
int maclen, datalen; int maclen, datalen;
int status; int status;
int gso_size, gso_type, gso_segs; int gso_size, gso_type, gso_segs;
...@@ -1282,11 +1282,8 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, ...@@ -1282,11 +1282,8 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
status = 0; status = 0;
while (segs) { skb_list_walk_safe(segs, curr, next) {
struct sk_buff *curr = segs; skb_mark_not_on_list(curr);
segs = segs->next;
curr->next = NULL;
if (port->tso && curr->len > dev->mtu) { if (port->tso && curr->len > dev->mtu) {
skb_shinfo(curr)->gso_size = gso_size; skb_shinfo(curr)->gso_size = gso_size;
skb_shinfo(curr)->gso_type = gso_type; skb_shinfo(curr)->gso_type = gso_type;
......
...@@ -341,6 +341,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) ...@@ -341,6 +341,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
features |= tap->tap_features; features |= tap->tap_features;
if (netif_needs_gso(skb, features)) { if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false); struct sk_buff *segs = __skb_gso_segment(skb, features, false);
struct sk_buff *next;
if (IS_ERR(segs)) if (IS_ERR(segs))
goto drop; goto drop;
...@@ -352,16 +353,13 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) ...@@ -352,16 +353,13 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
} }
consume_skb(skb); consume_skb(skb);
while (segs) { skb_list_walk_safe(segs, skb, next) {
struct sk_buff *nskb = segs->next; skb_mark_not_on_list(skb);
if (ptr_ring_produce(&q->ring, skb)) {
segs->next = NULL; kfree_skb(skb);
if (ptr_ring_produce(&q->ring, segs)) { kfree_skb_list(next);
kfree_skb(segs);
kfree_skb_list(nskb);
break; break;
} }
segs = nskb;
} }
} else { } else {
/* If we receive a partial checksum and the tap side /* If we receive a partial checksum and the tap side
......
...@@ -1897,8 +1897,8 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, ...@@ -1897,8 +1897,8 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
{ {
if (skb_shinfo(skb)->gso_size) { if (skb_shinfo(skb)->gso_size) {
netdev_features_t features = tp->netdev->features; netdev_features_t features = tp->netdev->features;
struct sk_buff *segs, *seg, *next;
struct sk_buff_head seg_list; struct sk_buff_head seg_list;
struct sk_buff *segs, *nskb;
features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
segs = skb_gso_segment(skb, features); segs = skb_gso_segment(skb, features);
...@@ -1907,12 +1907,10 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, ...@@ -1907,12 +1907,10 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
__skb_queue_head_init(&seg_list); __skb_queue_head_init(&seg_list);
do { skb_list_walk_safe(segs, seg, next) {
nskb = segs; skb_mark_not_on_list(seg);
segs = segs->next; __skb_queue_tail(&seg_list, seg);
nskb->next = NULL; }
__skb_queue_tail(&seg_list, nskb);
} while (segs);
skb_queue_splice(&seg_list, list); skb_queue_splice(&seg_list, list);
dev_kfree_skb(skb); dev_kfree_skb(skb);
......
...@@ -62,12 +62,4 @@ struct wg_device { ...@@ -62,12 +62,4 @@ struct wg_device {
int wg_device_init(void); int wg_device_init(void);
void wg_device_uninit(void); void wg_device_uninit(void);
/* Later after the dust settles, this can be moved into include/linux/skbuff.h,
* where virtually all code that deals with GSO segs can benefit, around ~30
* drivers as of writing.
*/
#define skb_list_walk_safe(first, skb, next) \
for (skb = first, next = skb->next; skb; \
skb = next, next = skb ? skb->next : NULL)
#endif /* _WG_DEVICE_H */ #endif /* _WG_DEVICE_H */
...@@ -847,10 +847,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, ...@@ -847,10 +847,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
else if (next) else if (next)
consume_skb(skb); consume_skb(skb);
while (next) { skb_list_walk_safe(next, tmp, next) {
tmp = next;
next = tmp->next;
memcpy(tmp->cb, cb, sizeof(tmp->cb)); memcpy(tmp->cb, cb, sizeof(tmp->cb));
/* /*
* Compute the length of all the data added for the A-MSDU. * Compute the length of all the data added for the A-MSDU.
...@@ -880,9 +877,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, ...@@ -880,9 +877,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
skb_shinfo(tmp)->gso_size = 0; skb_shinfo(tmp)->gso_size = 0;
} }
tmp->prev = NULL; skb_mark_not_on_list(tmp);
tmp->next = NULL;
__skb_queue_tail(mpdus_skb, tmp); __skb_queue_tail(mpdus_skb, tmp);
i++; i++;
} }
......
...@@ -1478,6 +1478,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb) ...@@ -1478,6 +1478,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb->next = NULL; skb->next = NULL;
} }
/* Iterate through singly-linked GSO fragments of an skb. */
#define skb_list_walk_safe(first, skb, next) \
for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \
(skb) = (next), (next) = (skb) ? (skb)->next : NULL)
static inline void skb_list_del_init(struct sk_buff *skb) static inline void skb_list_del_init(struct sk_buff *skb)
{ {
__list_del_entry(&skb->list); __list_del_entry(&skb->list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment