Commit f9810d56 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Luis Henriques

netlink, mmap: fix edge-case leakages in nf queue zero-copy

commit 6bb0fef4 upstream.

When netlink mmap on receive side is the consumer of nf queue data,
it can happen that in some edge cases, we write skb shared info into
the user space mmap buffer:

Assume a possible rx ring frame size of only 4096, and the network skb,
which is being zero-copied into the netlink skb, contains page frags
with an overall skb->len larger than the linear part of the netlink
skb.

skb_zerocopy(), which is generic and thus not aware of the fact that
shared info cannot be accessed for such skbs then tries to write and
fill frags, thus leaking kernel data/pointers and in some corner cases
possibly writing out of bounds of the mmap area (when filling the
last slot in the ring buffer this way).

I.e. the ring buffer slot is then of status NL_MMAP_STATUS_VALID, has
an advertised length larger than 4096, where the linear part is visible
at the slot beginning, and the leaked sizeof(struct skb_shared_info)
has been written to the beginning of the next slot (also corrupting
the struct nl_mmap_hdr slot header incl. status etc), since skb->end
points to skb->data + ring->frame_size - NL_MMAP_HDRLEN.

The fix adds and lets __netlink_alloc_skb() take the actual needed
linear room for the network skb + meta data into account. It's completely
irrelevant for non-mmaped netlink sockets, but in case mmap sockets
are used, it can be decided whether the available skb_tailroom() is
really large enough for the buffer, or whether it needs to internally
fallback to a normal alloc_skb().

>From nf queue side, the information whether the destination port is
an mmap RX ring is not really available without extra port-to-socket
lookup, thus it can only be determined in lower layers i.e. when
__netlink_alloc_skb() is called that checks internally for this. I
chose to add the extra ldiff parameter as mmap will then still work:
We have data_len and hlen in nfqnl_build_packet_message(), data_len
is the full length (capped at queue->copy_range) for skb_zerocopy()
and hlen some possible part of data_len that needs to be copied; the
rem_len variable indicates the needed remaining linear mmap space.

The only other workaround in nf queue internally would be after
allocation time by f.e. cap'ing the data_len to the skb_tailroom()
iff we deal with an mmap skb, but that would 1) expose the fact that
we use a mmap skb to upper layers, and 2) trim the skb where we
otherwise could just have moved the full skb into the normal receive
queue.

After the patch, in my test case the ring slot doesn't fit and therefore
shows NL_MMAP_STATUS_COPY, where a full skb carries all the data and
thus needs to be picked up via recv().

Fixes: 3ab1f683 ("nfnetlink: add support for memory mapped netlink")
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
[ luis: backported to 3.16: adjusted context ]
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent 9a8359ae
...@@ -66,8 +66,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); ...@@ -66,8 +66,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group); extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
u32 dst_portid, gfp_t gfp_mask); extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
unsigned int ldiff, u32 dst_portid,
gfp_t gfp_mask);
static inline struct sk_buff *
netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
gfp_t gfp_mask)
{
return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
}
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation); __u32 group, gfp_t allocation);
......
...@@ -284,7 +284,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, ...@@ -284,7 +284,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
__be32 **packet_id_ptr) __be32 **packet_id_ptr)
{ {
size_t size; size_t size;
size_t data_len = 0, cap_len = 0; size_t data_len = 0, cap_len = 0, rem_len = 0;
unsigned int hlen = 0; unsigned int hlen = 0;
struct sk_buff *skb; struct sk_buff *skb;
struct nlattr *nla; struct nlattr *nla;
...@@ -341,6 +341,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, ...@@ -341,6 +341,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
hlen = min_t(unsigned int, hlen, data_len); hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen; size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len; cap_len = entskb->len;
rem_len = data_len - hlen;
break; break;
} }
...@@ -352,7 +353,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, ...@@ -352,7 +353,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
+ nla_total_size(sizeof(u_int32_t))); /* gid */ + nla_total_size(sizeof(u_int32_t))); /* gid */
} }
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, skb = __netlink_alloc_skb(net->nfnl, size, rem_len, queue->peer_portid,
GFP_ATOMIC); GFP_ATOMIC);
if (!skb) { if (!skb) {
skb_tx_error(entskb); skb_tx_error(entskb);
......
...@@ -1840,15 +1840,16 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, ...@@ -1840,15 +1840,16 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
} }
EXPORT_SYMBOL(netlink_unicast); EXPORT_SYMBOL(netlink_unicast);
struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
u32 dst_portid, gfp_t gfp_mask) unsigned int ldiff, u32 dst_portid,
gfp_t gfp_mask)
{ {
#ifdef CONFIG_NETLINK_MMAP #ifdef CONFIG_NETLINK_MMAP
unsigned int maxlen, linear_size;
struct sock *sk = NULL; struct sock *sk = NULL;
struct sk_buff *skb; struct sk_buff *skb;
struct netlink_ring *ring; struct netlink_ring *ring;
struct nl_mmap_hdr *hdr; struct nl_mmap_hdr *hdr;
unsigned int maxlen;
sk = netlink_getsockbyportid(ssk, dst_portid); sk = netlink_getsockbyportid(ssk, dst_portid);
if (IS_ERR(sk)) if (IS_ERR(sk))
...@@ -1859,7 +1860,11 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, ...@@ -1859,7 +1860,11 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL) if (ring->pg_vec == NULL)
goto out_put; goto out_put;
if (ring->frame_size - NL_MMAP_HDRLEN < size) /* We need to account the full linear size needed as a ring
* slot cannot have non-linear parts.
*/
linear_size = size + ldiff;
if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
goto out_put; goto out_put;
skb = alloc_skb_head(gfp_mask); skb = alloc_skb_head(gfp_mask);
...@@ -1873,13 +1878,14 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, ...@@ -1873,13 +1878,14 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
/* check again under lock */ /* check again under lock */
maxlen = ring->frame_size - NL_MMAP_HDRLEN; maxlen = ring->frame_size - NL_MMAP_HDRLEN;
if (maxlen < size) if (maxlen < linear_size)
goto out_free; goto out_free;
netlink_forward_ring(ring); netlink_forward_ring(ring);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED); hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL) if (hdr == NULL)
goto err2; goto err2;
netlink_ring_setup_skb(skb, sk, ring, hdr); netlink_ring_setup_skb(skb, sk, ring, hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED); netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending); atomic_inc(&ring->pending);
...@@ -1905,7 +1911,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, ...@@ -1905,7 +1911,7 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
#endif #endif
return alloc_skb(size, gfp_mask); return alloc_skb(size, gfp_mask);
} }
EXPORT_SYMBOL_GPL(netlink_alloc_skb); EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
int netlink_has_listeners(struct sock *sk, unsigned int group) int netlink_has_listeners(struct sock *sk, unsigned int group)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment