Commit e524a6a9 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: parse metadata prepend before XDP runs

Calling memcpy to shift metadata out of the way for XDP to run
seems like an overkill.  The most common metadata contents are
8 bytes containing type and flow hash.  Simply parse the metadata
before we run XDP.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5cd4fbea
...@@ -284,6 +284,12 @@ struct nfp_net_rx_desc { ...@@ -284,6 +284,12 @@ struct nfp_net_rx_desc {
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0) #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
struct nfp_meta_parsed {
u32 hash_type;
u32 hash;
u32 mark;
};
struct nfp_net_rx_hash { struct nfp_net_rx_hash {
__be32 hash_type; __be32 hash_type;
__be32 hash; __be32 hash;
......
...@@ -1402,8 +1402,9 @@ static void nfp_net_rx_csum(struct nfp_net_dp *dp, ...@@ -1402,8 +1402,9 @@ static void nfp_net_rx_csum(struct nfp_net_dp *dp,
} }
} }
static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, static void
unsigned int type, __be32 *hash) nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
unsigned int type, __be32 *hash)
{ {
if (!(netdev->features & NETIF_F_RXHASH)) if (!(netdev->features & NETIF_F_RXHASH))
return; return;
...@@ -1412,16 +1413,18 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, ...@@ -1412,16 +1413,18 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
case NFP_NET_RSS_IPV4: case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6: case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX: case NFP_NET_RSS_IPV6_EX:
skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3); meta->hash_type = PKT_HASH_TYPE_L3;
break; break;
default: default:
skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4); meta->hash_type = PKT_HASH_TYPE_L4;
break; break;
} }
meta->hash = get_unaligned_be32(hash);
} }
static void static void
nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb, nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, struct nfp_net_rx_desc *rxd) void *data, struct nfp_net_rx_desc *rxd)
{ {
struct nfp_net_rx_hash *rx_hash = data; struct nfp_net_rx_hash *rx_hash = data;
...@@ -1429,12 +1432,12 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb, ...@@ -1429,12 +1432,12 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
return; return;
nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type), nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
&rx_hash->hash); &rx_hash->hash);
} }
static void * static void *
nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, int meta_len) void *data, int meta_len)
{ {
u32 meta_info; u32 meta_info;
...@@ -1446,13 +1449,13 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, ...@@ -1446,13 +1449,13 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
switch (meta_info & NFP_NET_META_FIELD_MASK) { switch (meta_info & NFP_NET_META_FIELD_MASK) {
case NFP_NET_META_HASH: case NFP_NET_META_HASH:
meta_info >>= NFP_NET_META_FIELD_SIZE; meta_info >>= NFP_NET_META_FIELD_SIZE;
nfp_net_set_hash(netdev, skb, nfp_net_set_hash(netdev, meta,
meta_info & NFP_NET_META_FIELD_MASK, meta_info & NFP_NET_META_FIELD_MASK,
(__be32 *)data); (__be32 *)data);
data += 4; data += 4;
break; break;
case NFP_NET_META_MARK: case NFP_NET_META_MARK:
skb->mark = get_unaligned_be32(data); meta->mark = get_unaligned_be32(data);
data += 4; data += 4;
break; break;
default: default:
...@@ -1587,12 +1590,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1587,12 +1590,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
while (pkts_polled < budget) { while (pkts_polled < budget) {
unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
u8 meta_prepend[NFP_NET_MAX_PREPEND];
struct nfp_net_rx_buf *rxbuf; struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd; struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
dma_addr_t new_dma_addr; dma_addr_t new_dma_addr;
void *new_frag; void *new_frag;
u8 *meta;
idx = rx_ring->rd_p & (rx_ring->cnt - 1); idx = rx_ring->rd_p & (rx_ring->cnt - 1);
...@@ -1605,6 +1607,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1605,6 +1607,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
*/ */
dma_rmb(); dma_rmb();
memset(&meta, 0, sizeof(meta));
rx_ring->rd_p++; rx_ring->rd_p++;
pkts_polled++; pkts_polled++;
...@@ -1638,9 +1642,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1638,9 +1642,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
r_vec->rx_bytes += pkt_len; r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync); u64_stats_update_end(&r_vec->rx_sync);
/* Pointer to start of metadata */
meta = rxbuf->frag + meta_off;
if (unlikely(meta_len > NFP_NET_MAX_PREPEND || if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
(dp->rx_offset && meta_len > dp->rx_offset))) { (dp->rx_offset && meta_len > dp->rx_offset))) {
nn_dp_warn(dp, "oversized RX packet metadata %u\n", nn_dp_warn(dp, "oversized RX packet metadata %u\n",
...@@ -1652,6 +1653,23 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1652,6 +1653,23 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
data_len); data_len);
if (!dp->chained_metadata_format) {
nfp_net_set_hash_desc(dp->netdev, &meta,
rxbuf->frag + meta_off, rxd);
} else if (meta_len) {
void *end;
end = nfp_net_parse_meta(dp->netdev, &meta,
rxbuf->frag + meta_off,
meta_len);
if (unlikely(end != rxbuf->frag + pkt_off)) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
continue;
}
}
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp)) { dp->bpf_offload_xdp)) {
unsigned int dma_off; unsigned int dma_off;
...@@ -1660,12 +1678,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1660,12 +1678,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
/* Move prepend out of the way */
if (xdp_prog->xdp_adjust_head) {
memcpy(meta_prepend, meta, meta_len);
meta = meta_prepend;
}
act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start, act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
&pkt_off, &pkt_len); &pkt_off, &pkt_len);
switch (act) { switch (act) {
...@@ -1709,19 +1721,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1709,19 +1721,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_reserve(skb, pkt_off); skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
if (!dp->chained_metadata_format) { skb->mark = meta.mark;
nfp_net_set_hash_desc(dp->netdev, skb, meta, rxd); skb_set_hash(skb, meta.hash, meta.hash_type);
} else if (meta_len) {
void *end;
end = nfp_net_parse_meta(dp->netdev, skb, meta,
meta_len);
if (unlikely(end != meta + meta_len)) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
continue;
}
}
skb_record_rx_queue(skb, rx_ring->idx); skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, dp->netdev); skb->protocol = eth_type_trans(skb, dp->netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment