Commit 18de1e51 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

gve: add gve_features_check()

It is suboptimal to attempt skb linearization from ndo_start_xmit()
if a gso skb has pathological layout, or if host stack does not have
access to the payload (TCP direct). Linearization of large skbs
can also fail under memory pressure.

We should instead have an ndo_features_check() so that we can
fallback to GSO, which is supported even for TCP direct,
and generally much more efficient (no payload copy).
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Bailey Forrest <bcf@google.com>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Jeroen de Borst <jeroendb@google.com>
Cc: Praveen Kaligineedi <pkaligineedi@google.com>
Cc: Shailend Chand <shailend@google.com>
Cc: Ziwei Xiao <ziweixiao@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 380b50ae
......@@ -33,6 +33,9 @@
#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
......
......@@ -79,6 +79,18 @@ static int gve_verify_driver_compatibility(struct gve_priv *priv)
return err;
}
static netdev_features_t gve_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
struct gve_priv *priv = netdev_priv(dev);
if (!gve_is_gqi(priv))
return gve_features_check_dqo(skb, dev, features);
return features;
}
static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
......@@ -1879,6 +1891,7 @@ static int gve_set_features(struct net_device *netdev,
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
.ndo_features_check = gve_features_check,
.ndo_open = gve_open,
.ndo_stop = gve_close,
.ndo_get_stats64 = gve_get_stats,
......
......@@ -843,6 +843,16 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
return true;
}
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
if (skb_is_gso(skb) && !gve_can_send_tso(skb))
return features & ~NETIF_F_GSO_MASK;
return features;
}
/* Attempt to transmit specified SKB.
*
* Returns 0 if the SKB was transmitted or dropped.
......@@ -854,11 +864,10 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
if (tx->dqo.qpl) {
if (skb_is_gso(skb))
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto drop;
if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto drop;
if (tx->dqo.qpl) {
/* We do not need to verify the number of buffers used per
* packet or per segment in case of TSO as with 2K size buffers
* none of the TX packet rules would be violated.
......@@ -868,24 +877,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
*/
num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO);
} else {
if (skb_is_gso(skb)) {
/* If TSO doesn't meet HW requirements, attempt to linearize the
* packet.
*/
if (unlikely(!gve_can_send_tso(skb) &&
skb_linearize(skb) < 0)) {
net_err_ratelimited("%s: Failed to transmit TSO packet\n",
priv->dev->name);
goto drop;
}
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto drop;
num_buffer_descs = gve_num_buffer_descs_needed(skb);
} else {
num_buffer_descs = gve_num_buffer_descs_needed(skb);
num_buffer_descs = gve_num_buffer_descs_needed(skb);
if (!skb_is_gso(skb)) {
if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
if (unlikely(skb_linearize(skb) < 0))
goto drop;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment