Commit 12b16dad authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

filter: optimize accesses to ancillary data

We can translate pseudo load instructions at filter check time to
dedicated instructions to speed up filtering and avoid one switch().
libpcap currently uses SKF_AD_PROTOCOL, but custom filters probably use
other ancillary accesses.

Note : I made the assertion that ancillary data was always accessed with
BPF_LD|BPF_?|BPF_ABS instructions, not with BPF_LD|BPF_?|BPF_IND ones
(offset given by K constant, not by K + X register)

On x86_64, this saves a few bytes of text :

# size net/core/filter.o.*
   text	   data	    bss	    dec	    hex	filename
   4864	      0	      0	   4864	   1300	net/core/filter.o.new
   4944	      0	      0	   4944	   1350	net/core/filter.o.old
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cb8f4048
...@@ -85,6 +85,17 @@ enum { ...@@ -85,6 +85,17 @@ enum {
BPF_S_JMP_JGT_X, BPF_S_JMP_JGT_X,
BPF_S_JMP_JSET_K, BPF_S_JMP_JSET_K,
BPF_S_JMP_JSET_X, BPF_S_JMP_JSET_X,
/* Ancillary data */
BPF_S_ANC_PROTOCOL,
BPF_S_ANC_PKTTYPE,
BPF_S_ANC_IFINDEX,
BPF_S_ANC_NLATTR,
BPF_S_ANC_NLATTR_NEST,
BPF_S_ANC_MARK,
BPF_S_ANC_QUEUE,
BPF_S_ANC_HATYPE,
BPF_S_ANC_RXHASH,
BPF_S_ANC_CPU,
}; };
/* No hurry in this branch */ /* No hurry in this branch */
...@@ -107,11 +118,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, ...@@ -107,11 +118,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
{ {
if (k >= 0) if (k >= 0)
return skb_header_pointer(skb, k, size, buffer); return skb_header_pointer(skb, k, size, buffer);
else { return __load_pointer(skb, k, size);
if (k >= SKF_AD_OFF)
return NULL;
return __load_pointer(skb, k, size);
}
} }
/** /**
...@@ -269,7 +276,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb, ...@@ -269,7 +276,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
A = get_unaligned_be32(ptr); A = get_unaligned_be32(ptr);
continue; continue;
} }
break; return 0;
case BPF_S_LD_H_ABS: case BPF_S_LD_H_ABS:
k = K; k = K;
load_h: load_h:
...@@ -278,7 +285,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb, ...@@ -278,7 +285,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
A = get_unaligned_be16(ptr); A = get_unaligned_be16(ptr);
continue; continue;
} }
break; return 0;
case BPF_S_LD_B_ABS: case BPF_S_LD_B_ABS:
k = K; k = K;
load_b: load_b:
...@@ -287,7 +294,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb, ...@@ -287,7 +294,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
A = *(u8 *)ptr; A = *(u8 *)ptr;
continue; continue;
} }
break; return 0;
case BPF_S_LD_W_LEN: case BPF_S_LD_W_LEN:
A = skb->len; A = skb->len;
continue; continue;
...@@ -338,45 +345,35 @@ unsigned int sk_run_filter(const struct sk_buff *skb, ...@@ -338,45 +345,35 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
case BPF_S_STX: case BPF_S_STX:
mem[K] = X; mem[K] = X;
continue; continue;
default: case BPF_S_ANC_PROTOCOL:
WARN_ON(1);
return 0;
}
/*
* Handle ancillary data, which are impossible
* (or very difficult) to get parsing packet contents.
*/
switch (k-SKF_AD_OFF) {
case SKF_AD_PROTOCOL:
A = ntohs(skb->protocol); A = ntohs(skb->protocol);
continue; continue;
case SKF_AD_PKTTYPE: case BPF_S_ANC_PKTTYPE:
A = skb->pkt_type; A = skb->pkt_type;
continue; continue;
case SKF_AD_IFINDEX: case BPF_S_ANC_IFINDEX:
if (!skb->dev) if (!skb->dev)
return 0; return 0;
A = skb->dev->ifindex; A = skb->dev->ifindex;
continue; continue;
case SKF_AD_MARK: case BPF_S_ANC_MARK:
A = skb->mark; A = skb->mark;
continue; continue;
case SKF_AD_QUEUE: case BPF_S_ANC_QUEUE:
A = skb->queue_mapping; A = skb->queue_mapping;
continue; continue;
case SKF_AD_HATYPE: case BPF_S_ANC_HATYPE:
if (!skb->dev) if (!skb->dev)
return 0; return 0;
A = skb->dev->type; A = skb->dev->type;
continue; continue;
case SKF_AD_RXHASH: case BPF_S_ANC_RXHASH:
A = skb->rxhash; A = skb->rxhash;
continue; continue;
case SKF_AD_CPU: case BPF_S_ANC_CPU:
A = raw_smp_processor_id(); A = raw_smp_processor_id();
continue; continue;
case SKF_AD_NLATTR: { case BPF_S_ANC_NLATTR: {
struct nlattr *nla; struct nlattr *nla;
if (skb_is_nonlinear(skb)) if (skb_is_nonlinear(skb))
...@@ -392,7 +389,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb, ...@@ -392,7 +389,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
A = 0; A = 0;
continue; continue;
} }
case SKF_AD_NLATTR_NEST: { case BPF_S_ANC_NLATTR_NEST: {
struct nlattr *nla; struct nlattr *nla;
if (skb_is_nonlinear(skb)) if (skb_is_nonlinear(skb))
...@@ -412,6 +409,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb, ...@@ -412,6 +409,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
continue; continue;
} }
default: default:
WARN_ON(1);
return 0; return 0;
} }
} }
...@@ -600,6 +598,24 @@ int sk_chk_filter(struct sock_filter *filter, int flen) ...@@ -600,6 +598,24 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
pc + ftest->jf + 1 >= flen) pc + ftest->jf + 1 >= flen)
return -EINVAL; return -EINVAL;
break; break;
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
code = BPF_S_ANC_##CODE; \
break
switch (ftest->k) {
ANCILLARY(PROTOCOL);
ANCILLARY(PKTTYPE);
ANCILLARY(IFINDEX);
ANCILLARY(NLATTR);
ANCILLARY(NLATTR_NEST);
ANCILLARY(MARK);
ANCILLARY(QUEUE);
ANCILLARY(HATYPE);
ANCILLARY(RXHASH);
ANCILLARY(CPU);
}
} }
ftest->code = code; ftest->code = code;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment