Commit 69dda13f authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-05-13

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix out of bounds backwards jumps due to a bug in dead code
   removal, from Daniel.

2) Fix libbpf users by detecting unsupported BTF kernel features
   and sanitize them before load, from Andrii.

3) Fix undefined behavior in narrow load handling of context
   fields, from Krzesimir.

4) Various BPF uapi header doc/man page fixes, from Quentin.

5) Misc .gitignore fixups to exclude built files, from Kelsey.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3285a9aa e2f7fc0a
...@@ -629,7 +629,7 @@ union bpf_attr { ...@@ -629,7 +629,7 @@ union bpf_attr {
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
* **->swhash** and *skb*\ **->l4hash** to 0). * **->swhash** and *skb*\ **->l4hash** to 0).
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -654,7 +654,7 @@ union bpf_attr { ...@@ -654,7 +654,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the * flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update. * checksum to update.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -686,7 +686,7 @@ union bpf_attr { ...@@ -686,7 +686,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the * flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update. * checksum to update.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -741,7 +741,7 @@ union bpf_attr { ...@@ -741,7 +741,7 @@ union bpf_attr {
* efficient, but it is handled through an action code where the * efficient, but it is handled through an action code where the
* redirection happens only after the eBPF program has returned. * redirection happens only after the eBPF program has returned.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -806,7 +806,7 @@ union bpf_attr { ...@@ -806,7 +806,7 @@ union bpf_attr {
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
* be **ETH_P_8021Q**. * be **ETH_P_8021Q**.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -818,7 +818,7 @@ union bpf_attr { ...@@ -818,7 +818,7 @@ union bpf_attr {
* Description * Description
* Pop a VLAN header from the packet associated to *skb*. * Pop a VLAN header from the packet associated to *skb*.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1168,7 +1168,7 @@ union bpf_attr { ...@@ -1168,7 +1168,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must * All values for *flags* are reserved for future usage, and must
* be left at zero. * be left at zero.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1281,7 +1281,7 @@ union bpf_attr { ...@@ -1281,7 +1281,7 @@ union bpf_attr {
* implicitly linearizes, unclones and drops offloads from the * implicitly linearizes, unclones and drops offloads from the
* *skb*. * *skb*.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1317,7 +1317,7 @@ union bpf_attr { ...@@ -1317,7 +1317,7 @@ union bpf_attr {
* **bpf_skb_pull_data()** to effectively unclone the *skb* from * **bpf_skb_pull_data()** to effectively unclone the *skb* from
* the very beginning in case it is indeed cloned. * the very beginning in case it is indeed cloned.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1369,7 +1369,7 @@ union bpf_attr { ...@@ -1369,7 +1369,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must * All values for *flags* are reserved for future usage, and must
* be left at zero. * be left at zero.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1384,7 +1384,7 @@ union bpf_attr { ...@@ -1384,7 +1384,7 @@ union bpf_attr {
* can be used to prepare the packet for pushing or popping * can be used to prepare the packet for pushing or popping
* headers. * headers.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1518,20 +1518,20 @@ union bpf_attr { ...@@ -1518,20 +1518,20 @@ union bpf_attr {
* * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
* Adjusting mss in this way is not allowed for datagrams. * Adjusting mss in this way is not allowed for datagrams.
* *
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **: * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **: * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
* Any new space is reserved to hold a tunnel header. * Any new space is reserved to hold a tunnel header.
* Configure skb offsets and other fields accordingly. * Configure skb offsets and other fields accordingly.
* *
* * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **: * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
* * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
* Use with ENCAP_L3 flags to further specify the tunnel type. * Use with ENCAP_L3 flags to further specify the tunnel type.
* *
* * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **: * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
* Use with ENCAP_L3/L4 flags to further specify the tunnel * Use with ENCAP_L3/L4 flags to further specify the tunnel
* type; **len** is the length of the inner MAC header. * type; *len* is the length of the inner MAC header.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1610,7 +1610,7 @@ union bpf_attr { ...@@ -1610,7 +1610,7 @@ union bpf_attr {
* more flexibility as the user is free to store whatever meta * more flexibility as the user is free to store whatever meta
* data they need. * data they need.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1852,7 +1852,7 @@ union bpf_attr { ...@@ -1852,7 +1852,7 @@ union bpf_attr {
* copied if necessary (i.e. if data was not linear and if start * copied if necessary (i.e. if data was not linear and if start
* and end pointers do not point to the same chunk). * and end pointers do not point to the same chunk).
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1886,7 +1886,7 @@ union bpf_attr { ...@@ -1886,7 +1886,7 @@ union bpf_attr {
* only possible to shrink the packet as of this writing, * only possible to shrink the packet as of this writing,
* therefore *delta* must be a negative integer. * therefore *delta* must be a negative integer.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2061,18 +2061,18 @@ union bpf_attr { ...@@ -2061,18 +2061,18 @@ union bpf_attr {
* **BPF_LWT_ENCAP_IP** * **BPF_LWT_ENCAP_IP**
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
* must be IPv4 or IPv6, followed by zero or more * must be IPv4 or IPv6, followed by zero or more
* additional headers, up to LWT_BPF_MAX_HEADROOM total * additional headers, up to **LWT_BPF_MAX_HEADROOM**
* bytes in all prepended headers. Please note that * total bytes in all prepended headers. Please note that
* if skb_is_gso(skb) is true, no more than two headers * if **skb_is_gso**\ (*skb*) is true, no more than two
* can be prepended, and the inner header, if present, * headers can be prepended, and the inner header, if
* should be either GRE or UDP/GUE. * present, should be either GRE or UDP/GUE.
* *
* BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
* type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
* by bpf programs of types BPF_PROG_TYPE_LWT_IN and * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
* BPF_PROG_TYPE_LWT_XMIT. * **BPF_PROG_TYPE_LWT_XMIT**.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2087,7 +2087,7 @@ union bpf_attr { ...@@ -2087,7 +2087,7 @@ union bpf_attr {
* inside the outermost IPv6 Segment Routing Header can be * inside the outermost IPv6 Segment Routing Header can be
* modified through this helper. * modified through this helper.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2103,7 +2103,7 @@ union bpf_attr { ...@@ -2103,7 +2103,7 @@ union bpf_attr {
* after the segments are accepted. *delta* can be as well * after the segments are accepted. *delta* can be as well
* positive (growing) as negative (shrinking). * positive (growing) as negative (shrinking).
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2126,13 +2126,13 @@ union bpf_attr { ...@@ -2126,13 +2126,13 @@ union bpf_attr {
* Type of *param*: **int**. * Type of *param*: **int**.
* **SEG6_LOCAL_ACTION_END_B6** * **SEG6_LOCAL_ACTION_END_B6**
* End.B6 action: Endpoint bound to an SRv6 policy. * End.B6 action: Endpoint bound to an SRv6 policy.
* Type of param: **struct ipv6_sr_hdr**. * Type of *param*: **struct ipv6_sr_hdr**.
* **SEG6_LOCAL_ACTION_END_B6_ENCAP** * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
* End.B6.Encap action: Endpoint bound to an SRv6 * End.B6.Encap action: Endpoint bound to an SRv6
* encapsulation policy. * encapsulation policy.
* Type of param: **struct ipv6_sr_hdr**. * Type of *param*: **struct ipv6_sr_hdr**.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2285,7 +2285,8 @@ union bpf_attr { ...@@ -2285,7 +2285,8 @@ union bpf_attr {
* Return * Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure. * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock** * For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple. * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
* *
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description * Description
...@@ -2321,7 +2322,8 @@ union bpf_attr { ...@@ -2321,7 +2322,8 @@ union bpf_attr {
* Return * Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure. * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock** * For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple. * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
* *
* int bpf_sk_release(struct bpf_sock *sock) * int bpf_sk_release(struct bpf_sock *sock)
* Description * Description
...@@ -2490,31 +2492,34 @@ union bpf_attr { ...@@ -2490,31 +2492,34 @@ union bpf_attr {
* network namespace *netns*. The return value must be checked, * network namespace *netns*. The return value must be checked,
* and if non-**NULL**, released via **bpf_sk_release**\ (). * and if non-**NULL**, released via **bpf_sk_release**\ ().
* *
* This function is identical to bpf_sk_lookup_tcp, except that it * This function is identical to **bpf_sk_lookup_tcp**\ (), except
* also returns timewait or request sockets. Use bpf_sk_fullsock * that it also returns timewait or request sockets. Use
* or bpf_tcp_socket to access the full structure. * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
* full structure.
* *
* This helper is available only if the kernel was compiled with * This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option. * **CONFIG_NET** configuration option.
* Return * Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure. * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock** * For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple. * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
* *
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description * Description
* Check whether iph and th contain a valid SYN cookie ACK for * Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in sk. * the listening socket in *sk*.
* *
* iph points to the start of the IPv4 or IPv6 header, while * *iph* points to the start of the IPv4 or IPv6 header, while
* iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr). * *iph_len* contains **sizeof**\ (**struct iphdr**) or
* **sizeof**\ (**struct ip6hdr**).
* *
* th points to the start of the TCP header, while th_len contains * *th* points to the start of the TCP header, while *th_len*
* sizeof(struct tcphdr). * contains **sizeof**\ (**struct tcphdr**).
* *
* Return * Return
* 0 if iph and th are a valid SYN cookie ACK, or a negative error * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
* otherwise. * error otherwise.
* *
* int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
* Description * Description
...@@ -2592,17 +2597,17 @@ union bpf_attr { ...@@ -2592,17 +2597,17 @@ union bpf_attr {
* and save the result in *res*. * and save the result in *res*.
* *
* The string may begin with an arbitrary amount of white space * The string may begin with an arbitrary amount of white space
* (as determined by isspace(3)) followed by a single optional '-' * (as determined by **isspace**\ (3)) followed by a single
* sign. * optional '**-**' sign.
* *
* Five least significant bits of *flags* encode base, other bits * Five least significant bits of *flags* encode base, other bits
* are currently unused. * are currently unused.
* *
* Base must be either 8, 10, 16 or 0 to detect it automatically * Base must be either 8, 10, 16 or 0 to detect it automatically
* similar to user space strtol(3). * similar to user space **strtol**\ (3).
* Return * Return
* Number of characters consumed on success. Must be positive but * Number of characters consumed on success. Must be positive but
* no more than buf_len. * no more than *buf_len*.
* *
* **-EINVAL** if no valid digits were found or unsupported base * **-EINVAL** if no valid digits were found or unsupported base
* was provided. * was provided.
...@@ -2616,16 +2621,16 @@ union bpf_attr { ...@@ -2616,16 +2621,16 @@ union bpf_attr {
* given base and save the result in *res*. * given base and save the result in *res*.
* *
* The string may begin with an arbitrary amount of white space * The string may begin with an arbitrary amount of white space
* (as determined by isspace(3)). * (as determined by **isspace**\ (3)).
* *
* Five least significant bits of *flags* encode base, other bits * Five least significant bits of *flags* encode base, other bits
* are currently unused. * are currently unused.
* *
* Base must be either 8, 10, 16 or 0 to detect it automatically * Base must be either 8, 10, 16 or 0 to detect it automatically
* similar to user space strtoul(3). * similar to user space **strtoul**\ (3).
* Return * Return
* Number of characters consumed on success. Must be positive but * Number of characters consumed on success. Must be positive but
* no more than buf_len. * no more than *buf_len*.
* *
* **-EINVAL** if no valid digits were found or unsupported base * **-EINVAL** if no valid digits were found or unsupported base
* was provided. * was provided.
...@@ -2634,26 +2639,26 @@ union bpf_attr { ...@@ -2634,26 +2639,26 @@ union bpf_attr {
* *
* void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
* Description * Description
* Get a bpf-local-storage from a sk. * Get a bpf-local-storage from a *sk*.
* *
* Logically, it could be thought of getting the value from * Logically, it could be thought of getting the value from
* a *map* with *sk* as the **key**. From this * a *map* with *sk* as the **key**. From this
* perspective, the usage is not much different from * perspective, the usage is not much different from
* **bpf_map_lookup_elem(map, &sk)** except this * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
* helper enforces the key must be a **bpf_fullsock()** * helper enforces the key must be a full socket and the map must
* and the map must be a BPF_MAP_TYPE_SK_STORAGE also. * be a **BPF_MAP_TYPE_SK_STORAGE** also.
* *
* Underneath, the value is stored locally at *sk* instead of * Underneath, the value is stored locally at *sk* instead of
* the map. The *map* is used as the bpf-local-storage **type**. * the *map*. The *map* is used as the bpf-local-storage
* The bpf-local-storage **type** (i.e. the *map*) is searched * "type". The bpf-local-storage "type" (i.e. the *map*) is
* against all bpf-local-storages residing at sk. * searched against all bpf-local-storages residing at *sk*.
* *
* An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf-local-storage will be * used such that a new bpf-local-storage will be
* created if one does not exist. *value* can be used * created if one does not exist. *value* can be used
* together with BPF_SK_STORAGE_GET_F_CREATE to specify * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
* the initial value of a bpf-local-storage. If *value* is * the initial value of a bpf-local-storage. If *value* is
* NULL, the new bpf-local-storage will be zero initialized. * **NULL**, the new bpf-local-storage will be zero initialized.
* Return * Return
* A bpf-local-storage pointer is returned on success. * A bpf-local-storage pointer is returned on success.
* *
...@@ -2662,7 +2667,7 @@ union bpf_attr { ...@@ -2662,7 +2667,7 @@ union bpf_attr {
* *
* int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
* Description * Description
* Delete a bpf-local-storage from a sk. * Delete a bpf-local-storage from a *sk*.
* Return * Return
* 0 on success. * 0 on success.
* *
......
...@@ -338,7 +338,7 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) ...@@ -338,7 +338,7 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
} }
static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
s32 end_new, u32 curr, const bool probe_pass) s32 end_new, s32 curr, const bool probe_pass)
{ {
const s64 imm_min = S32_MIN, imm_max = S32_MAX; const s64 imm_min = S32_MIN, imm_max = S32_MAX;
s32 delta = end_new - end_old; s32 delta = end_new - end_old;
...@@ -356,7 +356,7 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, ...@@ -356,7 +356,7 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
} }
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
s32 end_new, u32 curr, const bool probe_pass) s32 end_new, s32 curr, const bool probe_pass)
{ {
const s32 off_min = S16_MIN, off_max = S16_MAX; const s32 off_min = S16_MIN, off_max = S16_MAX;
s32 delta = end_new - end_old; s32 delta = end_new - end_old;
......
...@@ -7599,7 +7599,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -7599,7 +7599,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->dst_reg, insn->dst_reg,
shift); shift);
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1); (1ULL << size * 8) - 1);
} }
} }
......
#!/usr/bin/python3 #!/usr/bin/python3
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
# #
# Copyright (C) 2018 Netronome Systems, Inc. # Copyright (C) 2018-2019 Netronome Systems, Inc.
# In case user attempts to run with Python 2. # In case user attempts to run with Python 2.
from __future__ import print_function from __future__ import print_function
...@@ -39,7 +39,7 @@ class Helper(object): ...@@ -39,7 +39,7 @@ class Helper(object):
Break down helper function protocol into smaller chunks: return type, Break down helper function protocol into smaller chunks: return type,
name, distincts arguments. name, distincts arguments.
""" """
arg_re = re.compile('((const )?(struct )?(\w+|...))( (\**)(\w+))?$') arg_re = re.compile('((\w+ )*?(\w+|...))( (\**)(\w+))?$')
res = {} res = {}
proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$') proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$')
...@@ -54,8 +54,8 @@ class Helper(object): ...@@ -54,8 +54,8 @@ class Helper(object):
capture = arg_re.match(a) capture = arg_re.match(a)
res['args'].append({ res['args'].append({
'type' : capture.group(1), 'type' : capture.group(1),
'star' : capture.group(6), 'star' : capture.group(5),
'name' : capture.group(7) 'name' : capture.group(6)
}) })
return res return res
......
...@@ -629,7 +629,7 @@ union bpf_attr { ...@@ -629,7 +629,7 @@ union bpf_attr {
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
* **->swhash** and *skb*\ **->l4hash** to 0). * **->swhash** and *skb*\ **->l4hash** to 0).
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -654,7 +654,7 @@ union bpf_attr { ...@@ -654,7 +654,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the * flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update. * checksum to update.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -686,7 +686,7 @@ union bpf_attr { ...@@ -686,7 +686,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the * flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update. * checksum to update.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -741,7 +741,7 @@ union bpf_attr { ...@@ -741,7 +741,7 @@ union bpf_attr {
* efficient, but it is handled through an action code where the * efficient, but it is handled through an action code where the
* redirection happens only after the eBPF program has returned. * redirection happens only after the eBPF program has returned.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -806,7 +806,7 @@ union bpf_attr { ...@@ -806,7 +806,7 @@ union bpf_attr {
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
* be **ETH_P_8021Q**. * be **ETH_P_8021Q**.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -818,7 +818,7 @@ union bpf_attr { ...@@ -818,7 +818,7 @@ union bpf_attr {
* Description * Description
* Pop a VLAN header from the packet associated to *skb*. * Pop a VLAN header from the packet associated to *skb*.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1168,7 +1168,7 @@ union bpf_attr { ...@@ -1168,7 +1168,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must * All values for *flags* are reserved for future usage, and must
* be left at zero. * be left at zero.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1281,7 +1281,7 @@ union bpf_attr { ...@@ -1281,7 +1281,7 @@ union bpf_attr {
* implicitly linearizes, unclones and drops offloads from the * implicitly linearizes, unclones and drops offloads from the
* *skb*. * *skb*.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1317,7 +1317,7 @@ union bpf_attr { ...@@ -1317,7 +1317,7 @@ union bpf_attr {
* **bpf_skb_pull_data()** to effectively unclone the *skb* from * **bpf_skb_pull_data()** to effectively unclone the *skb* from
* the very beginning in case it is indeed cloned. * the very beginning in case it is indeed cloned.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1369,7 +1369,7 @@ union bpf_attr { ...@@ -1369,7 +1369,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must * All values for *flags* are reserved for future usage, and must
* be left at zero. * be left at zero.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1384,7 +1384,7 @@ union bpf_attr { ...@@ -1384,7 +1384,7 @@ union bpf_attr {
* can be used to prepare the packet for pushing or popping * can be used to prepare the packet for pushing or popping
* headers. * headers.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1518,20 +1518,20 @@ union bpf_attr { ...@@ -1518,20 +1518,20 @@ union bpf_attr {
* * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
* Adjusting mss in this way is not allowed for datagrams. * Adjusting mss in this way is not allowed for datagrams.
* *
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 **: * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
* * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 **: * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
* Any new space is reserved to hold a tunnel header. * Any new space is reserved to hold a tunnel header.
* Configure skb offsets and other fields accordingly. * Configure skb offsets and other fields accordingly.
* *
* * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE **: * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
* * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
* Use with ENCAP_L3 flags to further specify the tunnel type. * Use with ENCAP_L3 flags to further specify the tunnel type.
* *
* * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **: * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
* Use with ENCAP_L3/L4 flags to further specify the tunnel * Use with ENCAP_L3/L4 flags to further specify the tunnel
* type; **len** is the length of the inner MAC header. * type; *len* is the length of the inner MAC header.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1610,7 +1610,7 @@ union bpf_attr { ...@@ -1610,7 +1610,7 @@ union bpf_attr {
* more flexibility as the user is free to store whatever meta * more flexibility as the user is free to store whatever meta
* data they need. * data they need.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1852,7 +1852,7 @@ union bpf_attr { ...@@ -1852,7 +1852,7 @@ union bpf_attr {
* copied if necessary (i.e. if data was not linear and if start * copied if necessary (i.e. if data was not linear and if start
* and end pointers do not point to the same chunk). * and end pointers do not point to the same chunk).
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -1886,7 +1886,7 @@ union bpf_attr { ...@@ -1886,7 +1886,7 @@ union bpf_attr {
* only possible to shrink the packet as of this writing, * only possible to shrink the packet as of this writing,
* therefore *delta* must be a negative integer. * therefore *delta* must be a negative integer.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2061,18 +2061,18 @@ union bpf_attr { ...@@ -2061,18 +2061,18 @@ union bpf_attr {
* **BPF_LWT_ENCAP_IP** * **BPF_LWT_ENCAP_IP**
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
* must be IPv4 or IPv6, followed by zero or more * must be IPv4 or IPv6, followed by zero or more
* additional headers, up to LWT_BPF_MAX_HEADROOM total * additional headers, up to **LWT_BPF_MAX_HEADROOM**
* bytes in all prepended headers. Please note that * total bytes in all prepended headers. Please note that
* if skb_is_gso(skb) is true, no more than two headers * if **skb_is_gso**\ (*skb*) is true, no more than two
* can be prepended, and the inner header, if present, * headers can be prepended, and the inner header, if
* should be either GRE or UDP/GUE. * present, should be either GRE or UDP/GUE.
* *
* BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
* type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
* by bpf programs of types BPF_PROG_TYPE_LWT_IN and * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
* BPF_PROG_TYPE_LWT_XMIT. * **BPF_PROG_TYPE_LWT_XMIT**.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2087,7 +2087,7 @@ union bpf_attr { ...@@ -2087,7 +2087,7 @@ union bpf_attr {
* inside the outermost IPv6 Segment Routing Header can be * inside the outermost IPv6 Segment Routing Header can be
* modified through this helper. * modified through this helper.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2103,7 +2103,7 @@ union bpf_attr { ...@@ -2103,7 +2103,7 @@ union bpf_attr {
* after the segments are accepted. *delta* can be as well * after the segments are accepted. *delta* can be as well
* positive (growing) as negative (shrinking). * positive (growing) as negative (shrinking).
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2126,13 +2126,13 @@ union bpf_attr { ...@@ -2126,13 +2126,13 @@ union bpf_attr {
* Type of *param*: **int**. * Type of *param*: **int**.
* **SEG6_LOCAL_ACTION_END_B6** * **SEG6_LOCAL_ACTION_END_B6**
* End.B6 action: Endpoint bound to an SRv6 policy. * End.B6 action: Endpoint bound to an SRv6 policy.
* Type of param: **struct ipv6_sr_hdr**. * Type of *param*: **struct ipv6_sr_hdr**.
* **SEG6_LOCAL_ACTION_END_B6_ENCAP** * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
* End.B6.Encap action: Endpoint bound to an SRv6 * End.B6.Encap action: Endpoint bound to an SRv6
* encapsulation policy. * encapsulation policy.
* Type of param: **struct ipv6_sr_hdr**. * Type of *param*: **struct ipv6_sr_hdr**.
* *
* A call to this helper is susceptible to change the underlaying * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers * packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be * previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with * performed again, if the helper is used in combination with
...@@ -2285,7 +2285,8 @@ union bpf_attr { ...@@ -2285,7 +2285,8 @@ union bpf_attr {
* Return * Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure. * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock** * For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple. * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
* *
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description * Description
...@@ -2321,7 +2322,8 @@ union bpf_attr { ...@@ -2321,7 +2322,8 @@ union bpf_attr {
* Return * Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure. * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock** * For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple. * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
* *
* int bpf_sk_release(struct bpf_sock *sock) * int bpf_sk_release(struct bpf_sock *sock)
* Description * Description
...@@ -2490,31 +2492,34 @@ union bpf_attr { ...@@ -2490,31 +2492,34 @@ union bpf_attr {
* network namespace *netns*. The return value must be checked, * network namespace *netns*. The return value must be checked,
* and if non-**NULL**, released via **bpf_sk_release**\ (). * and if non-**NULL**, released via **bpf_sk_release**\ ().
* *
* This function is identical to bpf_sk_lookup_tcp, except that it * This function is identical to **bpf_sk_lookup_tcp**\ (), except
* also returns timewait or request sockets. Use bpf_sk_fullsock * that it also returns timewait or request sockets. Use
* or bpf_tcp_socket to access the full structure. * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
* full structure.
* *
* This helper is available only if the kernel was compiled with * This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option. * **CONFIG_NET** configuration option.
* Return * Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure. * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock** * For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple. * result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
* *
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description * Description
* Check whether iph and th contain a valid SYN cookie ACK for * Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in sk. * the listening socket in *sk*.
* *
* iph points to the start of the IPv4 or IPv6 header, while * *iph* points to the start of the IPv4 or IPv6 header, while
* iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr). * *iph_len* contains **sizeof**\ (**struct iphdr**) or
* **sizeof**\ (**struct ip6hdr**).
* *
* th points to the start of the TCP header, while th_len contains * *th* points to the start of the TCP header, while *th_len*
* sizeof(struct tcphdr). * contains **sizeof**\ (**struct tcphdr**).
* *
* Return * Return
* 0 if iph and th are a valid SYN cookie ACK, or a negative error * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
* otherwise. * error otherwise.
* *
* int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
* Description * Description
...@@ -2592,17 +2597,17 @@ union bpf_attr { ...@@ -2592,17 +2597,17 @@ union bpf_attr {
* and save the result in *res*. * and save the result in *res*.
* *
* The string may begin with an arbitrary amount of white space * The string may begin with an arbitrary amount of white space
* (as determined by isspace(3)) followed by a single optional '-' * (as determined by **isspace**\ (3)) followed by a single
* sign. * optional '**-**' sign.
* *
* Five least significant bits of *flags* encode base, other bits * Five least significant bits of *flags* encode base, other bits
* are currently unused. * are currently unused.
* *
* Base must be either 8, 10, 16 or 0 to detect it automatically * Base must be either 8, 10, 16 or 0 to detect it automatically
* similar to user space strtol(3). * similar to user space **strtol**\ (3).
* Return * Return
* Number of characters consumed on success. Must be positive but * Number of characters consumed on success. Must be positive but
* no more than buf_len. * no more than *buf_len*.
* *
* **-EINVAL** if no valid digits were found or unsupported base * **-EINVAL** if no valid digits were found or unsupported base
* was provided. * was provided.
...@@ -2616,16 +2621,16 @@ union bpf_attr { ...@@ -2616,16 +2621,16 @@ union bpf_attr {
* given base and save the result in *res*. * given base and save the result in *res*.
* *
* The string may begin with an arbitrary amount of white space * The string may begin with an arbitrary amount of white space
* (as determined by isspace(3)). * (as determined by **isspace**\ (3)).
* *
* Five least significant bits of *flags* encode base, other bits * Five least significant bits of *flags* encode base, other bits
* are currently unused. * are currently unused.
* *
* Base must be either 8, 10, 16 or 0 to detect it automatically * Base must be either 8, 10, 16 or 0 to detect it automatically
* similar to user space strtoul(3). * similar to user space **strtoul**\ (3).
* Return * Return
* Number of characters consumed on success. Must be positive but * Number of characters consumed on success. Must be positive but
* no more than buf_len. * no more than *buf_len*.
* *
* **-EINVAL** if no valid digits were found or unsupported base * **-EINVAL** if no valid digits were found or unsupported base
* was provided. * was provided.
...@@ -2634,26 +2639,26 @@ union bpf_attr { ...@@ -2634,26 +2639,26 @@ union bpf_attr {
* *
* void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
* Description * Description
* Get a bpf-local-storage from a sk. * Get a bpf-local-storage from a *sk*.
* *
* Logically, it could be thought of getting the value from * Logically, it could be thought of getting the value from
* a *map* with *sk* as the **key**. From this * a *map* with *sk* as the **key**. From this
* perspective, the usage is not much different from * perspective, the usage is not much different from
* **bpf_map_lookup_elem(map, &sk)** except this * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
* helper enforces the key must be a **bpf_fullsock()** * helper enforces the key must be a full socket and the map must
* and the map must be a BPF_MAP_TYPE_SK_STORAGE also. * be a **BPF_MAP_TYPE_SK_STORAGE** also.
* *
* Underneath, the value is stored locally at *sk* instead of * Underneath, the value is stored locally at *sk* instead of
* the map. The *map* is used as the bpf-local-storage **type**. * the *map*. The *map* is used as the bpf-local-storage
* The bpf-local-storage **type** (i.e. the *map*) is searched * "type". The bpf-local-storage "type" (i.e. the *map*) is
* against all bpf-local-storages residing at sk. * searched against all bpf-local-storages residing at *sk*.
* *
* An optional *flags* (BPF_SK_STORAGE_GET_F_CREATE) can be * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf-local-storage will be * used such that a new bpf-local-storage will be
* created if one does not exist. *value* can be used * created if one does not exist. *value* can be used
* together with BPF_SK_STORAGE_GET_F_CREATE to specify * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
* the initial value of a bpf-local-storage. If *value* is * the initial value of a bpf-local-storage. If *value* is
* NULL, the new bpf-local-storage will be zero initialized. * **NULL**, the new bpf-local-storage will be zero initialized.
* Return * Return
* A bpf-local-storage pointer is returned on success. * A bpf-local-storage pointer is returned on success.
* *
...@@ -2662,7 +2667,7 @@ union bpf_attr { ...@@ -2662,7 +2667,7 @@ union bpf_attr {
* *
* int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
* Description * Description
* Delete a bpf-local-storage from a sk. * Delete a bpf-local-storage from a *sk*.
* Return * Return
* 0 on success. * 0 on success.
* *
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include "btf.h" #include "btf.h"
#include "str_error.h" #include "str_error.h"
#include "libbpf_util.h" #include "libbpf_util.h"
#include "libbpf_internal.h"
#ifndef EM_BPF #ifndef EM_BPF
#define EM_BPF 247 #define EM_BPF 247
...@@ -128,6 +129,10 @@ struct bpf_capabilities { ...@@ -128,6 +129,10 @@ struct bpf_capabilities {
__u32 name:1; __u32 name:1;
/* v5.2: kernel support for global data sections. */ /* v5.2: kernel support for global data sections. */
__u32 global_data:1; __u32 global_data:1;
/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
__u32 btf_func:1;
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
__u32 btf_datasec:1;
}; };
/* /*
...@@ -1021,6 +1026,74 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx) ...@@ -1021,6 +1026,74 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
return false; return false;
} }
static void bpf_object__sanitize_btf(struct bpf_object *obj)
{
bool has_datasec = obj->caps.btf_datasec;
bool has_func = obj->caps.btf_func;
struct btf *btf = obj->btf;
struct btf_type *t;
int i, j, vlen;
__u16 kind;
if (!obj->btf || (has_func && has_datasec))
return;
for (i = 1; i <= btf__get_nr_types(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
kind = BTF_INFO_KIND(t->info);
if (!has_datasec && kind == BTF_KIND_VAR) {
/* replace VAR with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
t->size = sizeof(int);
*(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
} else if (!has_datasec && kind == BTF_KIND_DATASEC) {
/* replace DATASEC with STRUCT */
struct btf_var_secinfo *v = (void *)(t + 1);
struct btf_member *m = (void *)(t + 1);
struct btf_type *vt;
char *name;
name = (char *)btf__name_by_offset(btf, t->name_off);
while (*name) {
if (*name == '.')
*name = '_';
name++;
}
vlen = BTF_INFO_VLEN(t->info);
t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
for (j = 0; j < vlen; j++, v++, m++) {
/* order of field assignments is important */
m->offset = v->offset * 8;
m->type = v->type;
/* preserve variable name as member name */
vt = (void *)btf__type_by_id(btf, v->type);
m->name_off = vt->name_off;
}
} else if (!has_func && kind == BTF_KIND_FUNC_PROTO) {
/* replace FUNC_PROTO with ENUM */
vlen = BTF_INFO_VLEN(t->info);
t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
t->size = sizeof(__u32); /* kernel enforced */
} else if (!has_func && kind == BTF_KIND_FUNC) {
/* replace FUNC with TYPEDEF */
t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
}
}
}
static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
{
if (!obj->btf_ext)
return;
if (!obj->caps.btf_func) {
btf_ext__free(obj->btf_ext);
obj->btf_ext = NULL;
}
}
static int bpf_object__elf_collect(struct bpf_object *obj, int flags) static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
{ {
Elf *elf = obj->efile.elf; Elf *elf = obj->efile.elf;
...@@ -1164,8 +1237,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1164,8 +1237,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
obj->btf = NULL; obj->btf = NULL;
} else { } else {
err = btf__finalize_data(obj, obj->btf); err = btf__finalize_data(obj, obj->btf);
if (!err) if (!err) {
bpf_object__sanitize_btf(obj);
err = btf__load(obj->btf); err = btf__load(obj->btf);
}
if (err) { if (err) {
pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n", pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
BTF_ELF_SEC, err); BTF_ELF_SEC, err);
...@@ -1187,6 +1262,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1187,6 +1262,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
BTF_EXT_ELF_SEC, BTF_EXT_ELF_SEC,
PTR_ERR(obj->btf_ext)); PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL; obj->btf_ext = NULL;
} else {
bpf_object__sanitize_btf_ext(obj);
} }
} }
} }
...@@ -1556,12 +1633,63 @@ bpf_object__probe_global_data(struct bpf_object *obj) ...@@ -1556,12 +1633,63 @@ bpf_object__probe_global_data(struct bpf_object *obj)
return 0; return 0;
} }
static int bpf_object__probe_btf_func(struct bpf_object *obj)
{
const char strs[] = "\0int\0x\0a";
/* void x(int a) {} */
__u32 types[] = {
/* int */
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* FUNC_PROTO */ /* [2] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
BTF_PARAM_ENC(7, 1),
/* FUNC x */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
};
int res;
res = libbpf__probe_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (res < 0)
return res;
if (res > 0)
obj->caps.btf_func = 1;
return 0;
}
static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
{
const char strs[] = "\0x\0.data";
/* static int a; */
__u32 types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* VAR x */ /* [2] */
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
BTF_VAR_STATIC,
/* DATASEC val */ /* [3] */
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
};
int res;
res = libbpf__probe_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (res < 0)
return res;
if (res > 0)
obj->caps.btf_datasec = 1;
return 0;
}
static int static int
bpf_object__probe_caps(struct bpf_object *obj) bpf_object__probe_caps(struct bpf_object *obj)
{ {
int (*probe_fn[])(struct bpf_object *obj) = { int (*probe_fn[])(struct bpf_object *obj) = {
bpf_object__probe_name, bpf_object__probe_name,
bpf_object__probe_global_data, bpf_object__probe_global_data,
bpf_object__probe_btf_func,
bpf_object__probe_btf_datasec,
}; };
int i, ret; int i, ret;
......
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
* Internal libbpf helpers.
*
* Copyright (c) 2019 Facebook
*/
#ifndef __LIBBPF_LIBBPF_INTERNAL_H
#define __LIBBPF_LIBBPF_INTERNAL_H
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
BTF_INT_ENC(encoding, bits_offset, bits)
#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
#define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "bpf.h" #include "bpf.h"
#include "libbpf.h" #include "libbpf.h"
#include "libbpf_internal.h"
static bool grep(const char *buffer, const char *pattern) static bool grep(const char *buffer, const char *pattern)
{ {
...@@ -132,21 +133,43 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) ...@@ -132,21 +133,43 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
return errno != EINVAL && errno != EOPNOTSUPP; return errno != EINVAL && errno != EOPNOTSUPP;
} }
static int load_btf(void) int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len)
{ {
#define BTF_INFO_ENC(kind, kind_flag, vlen) \ struct btf_header hdr = {
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) .magic = BTF_MAGIC,
#define BTF_TYPE_ENC(name, info, size_or_type) \ .version = BTF_VERSION,
(name), (info), (size_or_type) .hdr_len = sizeof(struct btf_header),
#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \ .type_len = types_len,
((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) .str_off = types_len,
#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \ .str_len = str_len,
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ };
BTF_INT_ENC(encoding, bits_offset, bits) int btf_fd, btf_len;
#define BTF_MEMBER_ENC(name, type, bits_offset) \ __u8 *raw_btf;
(name), (type), (bits_offset)
btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l"; raw_btf = malloc(btf_len);
if (!raw_btf)
return -ENOMEM;
memcpy(raw_btf, &hdr, sizeof(hdr));
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
if (btf_fd < 0) {
free(raw_btf);
return 0;
}
close(btf_fd);
free(raw_btf);
return 1;
}
static int load_sk_storage_btf(void)
{
const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
/* struct bpf_spin_lock { /* struct bpf_spin_lock {
* int val; * int val;
* }; * };
...@@ -155,7 +178,7 @@ static int load_btf(void) ...@@ -155,7 +178,7 @@ static int load_btf(void)
* struct bpf_spin_lock l; * struct bpf_spin_lock l;
* }; * };
*/ */
__u32 btf_raw_types[] = { __u32 types[] = {
/* int */ /* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* struct bpf_spin_lock */ /* [2] */ /* struct bpf_spin_lock */ /* [2] */
...@@ -166,23 +189,9 @@ static int load_btf(void) ...@@ -166,23 +189,9 @@ static int load_btf(void)
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
}; };
struct btf_header btf_hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
.type_len = sizeof(btf_raw_types),
.str_off = sizeof(btf_raw_types),
.str_len = sizeof(btf_str_sec),
};
__u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
sizeof(btf_str_sec)];
memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
btf_str_sec, sizeof(btf_str_sec));
return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0); return libbpf__probe_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
} }
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
...@@ -222,7 +231,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) ...@@ -222,7 +231,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
value_size = 8; value_size = 8;
max_entries = 0; max_entries = 0;
map_flags = BPF_F_NO_PREALLOC; map_flags = BPF_F_NO_PREALLOC;
btf_fd = load_btf(); btf_fd = load_sk_storage_btf();
if (btf_fd < 0) if (btf_fd < 0)
return false; return false;
break; break;
......
...@@ -32,3 +32,5 @@ test_tcpnotify_user ...@@ -32,3 +32,5 @@ test_tcpnotify_user
test_libbpf test_libbpf
test_tcp_check_syncookie_user test_tcp_check_syncookie_user
alu32 alu32
libbpf.pc
libbpf.so.*
...@@ -178,3 +178,198 @@ ...@@ -178,3 +178,198 @@
.result_unpriv = REJECT, .result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
}, },
{
"jump test 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
},
.result = ACCEPT,
.retval = 2,
},
{
"jump test 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
},
.result = ACCEPT,
.retval = 3,
},
{
"jump test 8",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_JMP_IMM(BPF_JA, 0, 0, -20),
},
.result = ACCEPT,
.retval = 3,
},
{
"jump/call test 9",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "jump out of range from insn 1 to 4",
},
{
"jump/call test 10",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "last insn is not an exit or jmp",
},
{
"jump/call test 11",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 26),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -31),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 3,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment