Commit bfb564e7 authored by Krishna Kumar's avatar Krishna Kumar Committed by David S. Miller

core: Factor out flow calculation from get_rps_cpu

Factor out flow calculation code from get_rps_cpu, since other
functions can use the same code.

Revisions:

v2 (Ben): Separate flow calcuation out and use in select queue.
v3 (Arnd): Don't re-implement MIN.
v4 (Changli): skb->data points to ethernet header in macvtap, and
	make a fast path. Tested macvtap with this patch.
v5 (Changli):
	- Cache skb->rxhash in skb_get_rxhash
	- macvtap may not have pow(2) queues, so change code for
	  queue selection.
    (Arnd):
	- Use first available queue if all fails.
Signed-off-by: default avatarKrishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6891dd25
...@@ -558,6 +558,15 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, ...@@ -558,6 +558,15 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config, unsigned int to, struct ts_config *config,
struct ts_state *state); struct ts_state *state);
extern __u32 __skb_get_rxhash(struct sk_buff *skb);
static inline __u32 skb_get_rxhash(struct sk_buff *skb)
{
if (!skb->rxhash)
skb->rxhash = __skb_get_rxhash(skb);
return skb->rxhash;
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET #ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{ {
......
...@@ -2259,69 +2259,41 @@ static inline void ____napi_schedule(struct softnet_data *sd, ...@@ -2259,69 +2259,41 @@ static inline void ____napi_schedule(struct softnet_data *sd,
__raise_softirq_irqoff(NET_RX_SOFTIRQ); __raise_softirq_irqoff(NET_RX_SOFTIRQ);
} }
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
/* /*
* get_rps_cpu is called from netif_receive_skb and returns the target * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
* CPU from the RPS map of the receiving queue for a given skb. * and src/dst port numbers. Returns a non-zero hash number on success
* rcu_read_lock must be held on entry. * and 0 on failure.
*/ */
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, __u32 __skb_get_rxhash(struct sk_buff *skb)
struct rps_dev_flow **rflowp)
{ {
int nhoff, hash = 0;
struct ipv6hdr *ip6; struct ipv6hdr *ip6;
struct iphdr *ip; struct iphdr *ip;
struct netdev_rx_queue *rxqueue;
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table;
int cpu = -1;
u8 ip_proto; u8 ip_proto;
u16 tcpu;
u32 addr1, addr2, ihl; u32 addr1, addr2, ihl;
union { union {
u32 v32; u32 v32;
u16 v16[2]; u16 v16[2];
} ports; } ports;
if (skb_rx_queue_recorded(skb)) { nhoff = skb_network_offset(skb);
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
"on queue %u, but number of RX queues is %u\n",
dev->name, index, dev->num_rx_queues);
goto done;
}
rxqueue = dev->_rx + index;
} else
rxqueue = dev->_rx;
if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
goto done;
if (skb->rxhash)
goto got_hash; /* Skip hash computation on packet header */
switch (skb->protocol) { switch (skb->protocol) {
case __constant_htons(ETH_P_IP): case __constant_htons(ETH_P_IP):
if (!pskb_may_pull(skb, sizeof(*ip))) if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
goto done; goto done;
ip = (struct iphdr *) skb->data; ip = (struct iphdr *) skb->data + nhoff;
ip_proto = ip->protocol; ip_proto = ip->protocol;
addr1 = (__force u32) ip->saddr; addr1 = (__force u32) ip->saddr;
addr2 = (__force u32) ip->daddr; addr2 = (__force u32) ip->daddr;
ihl = ip->ihl; ihl = ip->ihl;
break; break;
case __constant_htons(ETH_P_IPV6): case __constant_htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, sizeof(*ip6))) if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
goto done; goto done;
ip6 = (struct ipv6hdr *) skb->data; ip6 = (struct ipv6hdr *) skb->data + nhoff;
ip_proto = ip6->nexthdr; ip_proto = ip6->nexthdr;
addr1 = (__force u32) ip6->saddr.s6_addr32[3]; addr1 = (__force u32) ip6->saddr.s6_addr32[3];
addr2 = (__force u32) ip6->daddr.s6_addr32[3]; addr2 = (__force u32) ip6->daddr.s6_addr32[3];
...@@ -2330,6 +2302,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -2330,6 +2302,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
default: default:
goto done; goto done;
} }
switch (ip_proto) { switch (ip_proto) {
case IPPROTO_TCP: case IPPROTO_TCP:
case IPPROTO_UDP: case IPPROTO_UDP:
...@@ -2338,8 +2311,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -2338,8 +2311,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
case IPPROTO_AH: case IPPROTO_AH:
case IPPROTO_SCTP: case IPPROTO_SCTP:
case IPPROTO_UDPLITE: case IPPROTO_UDPLITE:
if (pskb_may_pull(skb, (ihl * 4) + 4)) { if (pskb_may_pull(skb, (ihl * 4) + 4 + nhoff)) {
ports.v32 = * (__force u32 *) (skb->data + (ihl * 4)); ports.v32 = * (__force u32 *) (skb->data + nhoff +
(ihl * 4));
if (ports.v16[1] < ports.v16[0]) if (ports.v16[1] < ports.v16[0])
swap(ports.v16[0], ports.v16[1]); swap(ports.v16[0], ports.v16[1]);
break; break;
...@@ -2352,11 +2326,55 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -2352,11 +2326,55 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
/* get a consistent hash (same value on both flow directions) */ /* get a consistent hash (same value on both flow directions) */
if (addr2 < addr1) if (addr2 < addr1)
swap(addr1, addr2); swap(addr1, addr2);
skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
if (!skb->rxhash)
skb->rxhash = 1;
got_hash: hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
if (!hash)
hash = 1;
done:
return hash;
}
EXPORT_SYMBOL(__skb_get_rxhash);
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
* rcu_read_lock must be held on entry.
*/
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
struct netdev_rx_queue *rxqueue;
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table;
int cpu = -1;
u16 tcpu;
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
"on queue %u, but number of RX queues is %u\n",
dev->name, index, dev->num_rx_queues);
goto done;
}
rxqueue = dev->_rx + index;
} else
rxqueue = dev->_rx;
if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
goto done;
if (!skb_get_rxhash(skb))
goto done;
flow_table = rcu_dereference(rxqueue->rps_flow_table); flow_table = rcu_dereference(rxqueue->rps_flow_table);
sock_flow_table = rcu_dereference(rps_sock_flow_table); sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) { if (flow_table && sock_flow_table) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment