Commit 3958afa1 authored by Tom Herbert's avatar Tom Herbert Committed by David S. Miller

net: Change skb_get_rxhash to skb_get_hash

Changing name of function as part of making the hash in skbuff to be
generic property, not just for receive path.
Signed-off-by: default avatarTom Herbert <therbert@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1aee6cc2
...@@ -224,7 +224,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, ...@@ -224,7 +224,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out; goto out;
/* Check if we can use flow to select a queue */ /* Check if we can use flow to select a queue */
rxq = skb_get_rxhash(skb); rxq = skb_get_hash(skb);
if (rxq) { if (rxq) {
tap = rcu_dereference(vlan->taps[rxq % numvtaps]); tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
goto out; goto out;
......
...@@ -358,7 +358,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) ...@@ -358,7 +358,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
numqueues = ACCESS_ONCE(tun->numqueues); numqueues = ACCESS_ONCE(tun->numqueues);
txq = skb_get_rxhash(skb); txq = skb_get_hash(skb);
if (txq) { if (txq) {
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
if (e) if (e)
...@@ -1146,7 +1146,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1146,7 +1146,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0); skb_probe_transport_header(skb, 0);
rxhash = skb_get_rxhash(skb); rxhash = skb_get_hash(skb);
netif_rx_ni(skb); netif_rx_ni(skb);
tun->dev->stats.rx_packets++; tun->dev->stats.rx_packets++;
......
...@@ -1405,7 +1405,7 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb) ...@@ -1405,7 +1405,7 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
unsigned int range = (port_max - port_min) + 1; unsigned int range = (port_max - port_min) + 1;
u32 hash; u32 hash;
hash = skb_get_rxhash(skb); hash = skb_get_hash(skb);
if (!hash) if (!hash)
hash = jhash(skb->data, 2 * ETH_ALEN, hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol); (__force u32) skb->protocol);
......
...@@ -703,11 +703,11 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, ...@@ -703,11 +703,11 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config, unsigned int to, struct ts_config *config,
struct ts_state *state); struct ts_state *state);
void __skb_get_rxhash(struct sk_buff *skb); void __skb_get_hash(struct sk_buff *skb);
static inline __u32 skb_get_rxhash(struct sk_buff *skb) static inline __u32 skb_get_hash(struct sk_buff *skb)
{ {
if (!skb->l4_rxhash) if (!skb->l4_rxhash)
__skb_get_rxhash(skb); __skb_get_hash(skb);
return skb->rxhash; return skb->rxhash;
} }
......
...@@ -3006,7 +3006,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -3006,7 +3006,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
} }
skb_reset_network_header(skb); skb_reset_network_header(skb);
if (!skb_get_rxhash(skb)) if (!skb_get_hash(skb))
goto done; goto done;
flow_table = rcu_dereference(rxqueue->rps_flow_table); flow_table = rcu_dereference(rxqueue->rps_flow_table);
...@@ -3151,7 +3151,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) ...@@ -3151,7 +3151,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
rcu_read_lock(); rcu_read_lock();
fl = rcu_dereference(sd->flow_limit); fl = rcu_dereference(sd->flow_limit);
if (fl) { if (fl) {
new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1); new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
old_flow = fl->history[fl->history_head]; old_flow = fl->history[fl->history_head];
fl->history[fl->history_head] = new_flow; fl->history[fl->history_head] = new_flow;
......
...@@ -202,12 +202,12 @@ static __always_inline u32 __flow_hash_1word(u32 a) ...@@ -202,12 +202,12 @@ static __always_inline u32 __flow_hash_1word(u32 a)
} }
/* /*
* __skb_get_rxhash: calculate a flow hash based on src/dst addresses * __skb_get_hash: calculate a flow hash based on src/dst addresses
* and src/dst port numbers. Sets rxhash in skb to non-zero hash value * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
* on success, zero indicates no valid hash. Also, sets l4_rxhash in skb * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
* if hash is a canonical 4-tuple hash over transport ports. * if hash is a canonical 4-tuple hash over transport ports.
*/ */
void __skb_get_rxhash(struct sk_buff *skb) void __skb_get_hash(struct sk_buff *skb)
{ {
struct flow_keys keys; struct flow_keys keys;
u32 hash; u32 hash;
...@@ -234,7 +234,7 @@ void __skb_get_rxhash(struct sk_buff *skb) ...@@ -234,7 +234,7 @@ void __skb_get_rxhash(struct sk_buff *skb)
skb->rxhash = hash; skb->rxhash = hash;
} }
EXPORT_SYMBOL(__skb_get_rxhash); EXPORT_SYMBOL(__skb_get_hash);
/* /*
* Returns a Tx hash based on the given packet descriptor a Tx queues' number * Returns a Tx hash based on the given packet descriptor a Tx queues' number
......
...@@ -963,7 +963,7 @@ static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) ...@@ -963,7 +963,7 @@ static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd) struct tpacket3_hdr *ppd)
{ {
ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb); ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
} }
static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
...@@ -1295,7 +1295,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, ...@@ -1295,7 +1295,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
if (!skb) if (!skb)
return 0; return 0;
} }
skb_get_rxhash(skb); skb_get_hash(skb);
idx = fanout_demux_hash(f, skb, num); idx = fanout_demux_hash(f, skb, num);
break; break;
case PACKET_FANOUT_LB: case PACKET_FANOUT_LB:
......
...@@ -220,7 +220,7 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb) ...@@ -220,7 +220,7 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb)
static u32 flow_get_rxhash(struct sk_buff *skb) static u32 flow_get_rxhash(struct sk_buff *skb)
{ {
return skb_get_rxhash(skb); return skb_get_hash(skb);
} }
static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
......
...@@ -222,7 +222,7 @@ META_COLLECTOR(int_maclen) ...@@ -222,7 +222,7 @@ META_COLLECTOR(int_maclen)
META_COLLECTOR(int_rxhash) META_COLLECTOR(int_rxhash)
{ {
dst->value = skb_get_rxhash(skb); dst->value = skb_get_hash(skb);
} }
/************************************************************************** /**************************************************************************
......
...@@ -226,7 +226,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) ...@@ -226,7 +226,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
/* By forcing low order bit to 1, we make sure to not /* By forcing low order bit to 1, we make sure to not
* collide with a local flow (socket pointers are word aligned) * collide with a local flow (socket pointers are word aligned)
*/ */
sk = (struct sock *)(skb_get_rxhash(skb) | 1L); sk = (struct sock *)(skb_get_hash(skb) | 1L);
} }
root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment