Commit e889eb17 authored by Paolo Abeni's avatar Paolo Abeni

Merge tag 'nf-24-05-29' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

Patch #1 syzbot reports that nf_reinject() could be called without
         rcu_read_lock() when flushing pending packets at nfnetlink
         queue removal, from Eric Dumazet.

Patch #2 flushes ipset list:set when canceling garbage collection to
         reference to other lists to fix a race, from Jozsef Kadlecsik.

Patch #3 restores q-in-q matching with nft_payload by reverting
         f6ae9f12 ("netfilter: nft_payload: add C-VLAN support").

Patch #4 fixes vlan mangling in skbuff when vlan offload is present
         in skbuff, without this patch nft_payload corrupts packets
         in this case.

Patch #5 fixes possible nul-deref in tproxy no IP address is found in
         netdevice, reported by syzbot and patch from Florian Westphal.

Patch #6 removes a superfluous restriction which prevents loose fib
         lookups from input and forward hooks, from Eric Garver.

My assessment is that patches #1, #2 and #5 address possible kernel
crash, anything else in this batch fixes broken features.

netfilter pull request 24-05-29

* tag 'nf-24-05-29' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
  netfilter: nft_fib: allow from forward/input without iif selector
  netfilter: tproxy: bail out if IP has been disabled on the device
  netfilter: nft_payload: skbuff vlan metadata mangle support
  netfilter: nft_payload: restore vlan q-in-q match support
  netfilter: ipset: Add list flush to cancel_gc
  netfilter: nfnetlink_queue: acquire rcu_read_lock() in instance_destroy_rcu()
====================

Link: https://lore.kernel.org/r/20240528225519.1155786-1-pablo@netfilter.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 2dc8b1e7 e8ded22e
...@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) ...@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
laddr = 0; laddr = 0;
indev = __in_dev_get_rcu(skb->dev); indev = __in_dev_get_rcu(skb->dev);
if (!indev)
return daddr;
in_dev_for_each_ifa_rcu(ifa, indev) { in_dev_for_each_ifa_rcu(ifa, indev) {
if (ifa->ifa_flags & IFA_F_SECONDARY) if (ifa->ifa_flags & IFA_F_SECONDARY)
......
...@@ -549,6 +549,9 @@ list_set_cancel_gc(struct ip_set *set) ...@@ -549,6 +549,9 @@ list_set_cancel_gc(struct ip_set *set)
if (SET_WITH_TIMEOUT(set)) if (SET_WITH_TIMEOUT(set))
timer_shutdown_sync(&map->gc); timer_shutdown_sync(&map->gc);
/* Flush list to drop references to other ipsets */
list_set_flush(set);
} }
static const struct ip_set_type_variant set_variant = { static const struct ip_set_type_variant set_variant = {
......
...@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head) ...@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
rcu); rcu);
rcu_read_lock();
nfqnl_flush(inst, NULL, 0); nfqnl_flush(inst, NULL, 0);
rcu_read_unlock();
kfree(inst); kfree(inst);
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
......
...@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, ...@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
switch (priv->result) { switch (priv->result) {
case NFT_FIB_RESULT_OIF: case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME: case NFT_FIB_RESULT_OIFNAME:
hooks = (1 << NF_INET_PRE_ROUTING); hooks = (1 << NF_INET_PRE_ROUTING) |
if (priv->flags & NFTA_FIB_F_IIF) { (1 << NF_INET_LOCAL_IN) |
hooks |= (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD); (1 << NF_INET_FORWARD);
}
break; break;
case NFT_FIB_RESULT_ADDRTYPE: case NFT_FIB_RESULT_ADDRTYPE:
if (priv->flags & NFTA_FIB_F_IIF) if (priv->flags & NFTA_FIB_F_IIF)
......
...@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) ...@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
int mac_off = skb_mac_header(skb) - skb->data; int mac_off = skb_mac_header(skb) - skb->data;
u8 *vlanh, *dst_u8 = (u8 *) d; u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth; struct vlan_ethhdr veth;
u8 vlan_hlen = 0;
if ((skb->protocol == htons(ETH_P_8021AD) ||
skb->protocol == htons(ETH_P_8021Q)) &&
offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth; vlanh = (u8 *) &veth;
if (offset < VLAN_ETH_HLEN + vlan_hlen) { if (offset < VLAN_ETH_HLEN) {
u8 ethlen = len; u8 ethlen = len;
if (vlan_hlen && if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
return false;
else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false; return false;
if (offset + len > VLAN_ETH_HLEN + vlan_hlen) if (offset + len > VLAN_ETH_HLEN)
ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen; ethlen -= offset + len - VLAN_ETH_HLEN;
memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); memcpy(dst_u8, vlanh + offset, ethlen);
len -= ethlen; len -= ethlen;
if (len == 0) if (len == 0)
return true; return true;
dst_u8 += ethlen; dst_u8 += ethlen;
offset = ETH_HLEN + vlan_hlen; offset = ETH_HLEN;
} else { } else {
offset -= VLAN_HLEN + vlan_hlen; offset -= VLAN_HLEN;
} }
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
...@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt) ...@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
return pkt->inneroff; return pkt->inneroff;
} }
static bool nft_payload_need_vlan_copy(const struct nft_payload *priv) static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
{ {
unsigned int len = priv->offset + priv->len; unsigned int boundary = offset + len;
/* data past ether src/dst requested, copy needed */ /* data past ether src/dst requested, copy needed */
if (len > offsetof(struct ethhdr, h_proto)) if (boundary > offsetof(struct ethhdr, h_proto))
return true; return true;
return false; return false;
...@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr, ...@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err; goto err;
if (skb_vlan_tag_present(skb) && if (skb_vlan_tag_present(skb) &&
nft_payload_need_vlan_copy(priv)) { nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
if (!nft_payload_copy_vlan(dest, skb, if (!nft_payload_copy_vlan(dest, skb,
priv->offset, priv->len)) priv->offset, priv->len))
goto err; goto err;
...@@ -810,21 +801,79 @@ struct nft_payload_set { ...@@ -810,21 +801,79 @@ struct nft_payload_set {
u8 csum_flags; u8 csum_flags;
}; };
/* This is not struct vlan_hdr. */
struct nft_payload_vlan_hdr {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
};
static bool
nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
int *vlan_hlen)
{
struct nft_payload_vlan_hdr *vlanh;
__be16 vlan_proto;
u16 vlan_tci;
if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
*vlan_hlen = VLAN_HLEN;
return true;
}
switch (offset) {
case offsetof(struct vlan_ethhdr, h_vlan_proto):
if (len == 2) {
vlan_proto = nft_reg_load_be16(src);
skb->vlan_proto = vlan_proto;
} else if (len == 4) {
vlanh = (struct nft_payload_vlan_hdr *)src;
__vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
ntohs(vlanh->h_vlan_TCI));
} else {
return false;
}
break;
case offsetof(struct vlan_ethhdr, h_vlan_TCI):
if (len != 2)
return false;
vlan_tci = ntohs(nft_reg_load_be16(src));
skb->vlan_tci = vlan_tci;
break;
default:
return false;
}
return true;
}
static void nft_payload_set_eval(const struct nft_expr *expr, static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs, struct nft_regs *regs,
const struct nft_pktinfo *pkt) const struct nft_pktinfo *pkt)
{ {
const struct nft_payload_set *priv = nft_expr_priv(expr); const struct nft_payload_set *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
const u32 *src = &regs->data[priv->sreg]; const u32 *src = &regs->data[priv->sreg];
int offset, csum_offset; int offset, csum_offset, vlan_hlen = 0;
struct sk_buff *skb = pkt->skb;
__wsum fsum, tsum; __wsum fsum, tsum;
switch (priv->base) { switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER: case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb)) if (!skb_mac_header_was_set(skb))
goto err; goto err;
offset = skb_mac_header(skb) - skb->data;
if (skb_vlan_tag_present(skb) &&
nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
if (!nft_payload_set_vlan(src, skb,
priv->offset, priv->len,
&vlan_hlen))
goto err;
if (!vlan_hlen)
return;
}
offset = skb_mac_header(skb) - skb->data - vlan_hlen;
break; break;
case NFT_PAYLOAD_NETWORK_HEADER: case NFT_PAYLOAD_NETWORK_HEADER:
offset = skb_network_offset(skb); offset = skb_network_offset(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment