Commit 4679f4f1 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'nf-next-24-02-21' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Florian Westphal says:

====================
netfilter updates for net-next

1. Prefer KMEM_CACHE() macro to create kmem caches, from Kunwu Chan.

Patches 2 and 3 consolidate nf_log NULL checks and introduces
extra boundary checks on family and type to make it clear that no out
of bounds access will happen.  No in-tree user currently passes such
values, but thats not clear from looking at the function.
From Pablo Neira Ayuso.

Patch 4, also from Pablo, gets rid of unneeded conditional in
nft_osf init function.

Patch 5, from myself, fixes erroneous Kconfig dependencies that
came in an earlier net-next pull request. This should get rid
of the xtables related build failure reports.

Patches 6 to 10 are an update to nftables' concatenated-ranges
set type to speed up element insertions.  This series also
compacts a few data structures and cleans up a few oddities such
as reliance on ZERO_SIZE_PTR when asking to allocate a set with
no elements. From myself.

Patches 11 moves the nf_reinject function from the netfilter core
(vmlinux) into the nfnetlink_queue backend, the only location where
this is called from. Also from myself.

Patch 12, from Kees Cook, switches xtables' compat layer to use
unsafe_memcpy because xt_entry_target cannot easily get converted
to a real flexible array (its UAPI and used inside other structs).

* tag 'nf-next-24-02-21' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next:
  netfilter: x_tables: Use unsafe_memcpy() for 0-sized destination
  netfilter: move nf_reinject into nfnetlink_queue modules
  netfilter: nft_set_pipapo: use GFP_KERNEL for insertions
  netfilter: nft_set_pipapo: speed up bulk element insertions
  netfilter: nft_set_pipapo: shrink data structures
  netfilter: nft_set_pipapo: do not rely on ZERO_SIZE_PTR
  netfilter: nft_set_pipapo: constify lookup fn args where possible
  netfilter: xtables: fix up kconfig dependencies
  netfilter: nft_osf: simplify init path
  netfilter: nf_log: validate nf_logger_find_get()
  netfilter: nf_log: consolidate check for NULL logger in lookup function
  netfilter: expect: Simplify the allocation of slab caches in nf_conntrack_expect_init
====================

Link: https://lore.kernel.org/r/20240221112637.5396-1-fw@strlen.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3e7a0dcc 26f4dac1
......@@ -370,7 +370,6 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
u_int8_t protocol, unsigned short family);
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict, unsigned short family);
int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
......
......@@ -35,7 +35,6 @@ struct nf_queue_handler {
void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_free(struct nf_queue_entry *entry);
......
......@@ -217,7 +217,7 @@ config IP_NF_NAT
default m if NETFILTER_ADVANCED=n
select NF_NAT
select NETFILTER_XT_NAT
select IP6_NF_IPTABLES_LEGACY
select IP_NF_IPTABLES_LEGACY
help
This enables the `nat' table in iptables. This allows masquerading,
port forwarding and other forms of full Network Address Port
......@@ -329,6 +329,7 @@ config NFT_COMPAT_ARP
config IP_NF_ARPFILTER
tristate "arptables-legacy packet filtering support"
select IP_NF_ARPTABLES
depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
rules for simple ARP packet filtering at local input and
......
......@@ -722,9 +722,7 @@ int nf_conntrack_expect_init(void)
nf_ct_expect_hsize = 1;
}
nf_ct_expect_max = nf_ct_expect_hsize * 4;
nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
sizeof(struct nf_conntrack_expect),
0, 0, NULL);
nf_ct_expect_cachep = KMEM_CACHE(nf_conntrack_expect, 0);
if (!nf_ct_expect_cachep)
return -ENOMEM;
......
......@@ -31,10 +31,10 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
int i;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
if (loggers[pf][i] == NULL)
log = nft_log_dereference(loggers[pf][i]);
if (!log)
continue;
log = nft_log_dereference(loggers[pf][i]);
if (!strncasecmp(str_logger, log->name, strlen(log->name)))
return log;
}
......@@ -156,6 +156,11 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
struct nf_logger *logger;
int ret = -ENOENT;
if (pf >= ARRAY_SIZE(loggers))
return -EINVAL;
if (type >= NF_LOG_TYPE_MAX)
return -EINVAL;
if (pf == NFPROTO_INET) {
ret = nf_logger_find_get(NFPROTO_IPV4, type);
if (ret < 0)
......
......@@ -248,109 +248,3 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
return 0;
}
EXPORT_SYMBOL_GPL(nf_queue);
static unsigned int nf_iterate(struct sk_buff *skb,
struct nf_hook_state *state,
const struct nf_hook_entries *hooks,
unsigned int *index)
{
const struct nf_hook_entry *hook;
unsigned int verdict, i = *index;
while (i < hooks->num_hook_entries) {
hook = &hooks->hooks[i];
repeat:
verdict = nf_hook_entry_hookfn(hook, skb, state);
if (verdict != NF_ACCEPT) {
*index = i;
if (verdict != NF_REPEAT)
return verdict;
goto repeat;
}
i++;
}
*index = i;
return NF_ACCEPT;
}
static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
{
switch (pf) {
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
case NFPROTO_BRIDGE:
return rcu_dereference(net->nf.hooks_bridge[hooknum]);
#endif
case NFPROTO_IPV4:
return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
case NFPROTO_IPV6:
return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
default:
WARN_ON_ONCE(1);
return NULL;
}
return NULL;
}
/* Caller must hold rcu read-side lock */
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_hook_entry *hook_entry;
const struct nf_hook_entries *hooks;
struct sk_buff *skb = entry->skb;
const struct net *net;
unsigned int i;
int err;
u8 pf;
net = entry->state.net;
pf = entry->state.pf;
hooks = nf_hook_entries_head(net, pf, entry->state.hook);
i = entry->hook_index;
if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
kfree_skb(skb);
nf_queue_entry_free(entry);
return;
}
hook_entry = &hooks->hooks[i];
/* Continue traversal iff userspace said ok... */
if (verdict == NF_REPEAT)
verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
if (verdict == NF_ACCEPT) {
if (nf_reroute(skb, entry) < 0)
verdict = NF_DROP;
}
if (verdict == NF_ACCEPT) {
next_hook:
++i;
verdict = nf_iterate(skb, &entry->state, hooks, &i);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
local_bh_disable();
entry->state.okfn(entry->state.net, entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
err = nf_queue(skb, &entry->state, i, verdict);
if (err == 1)
goto next_hook;
break;
case NF_STOLEN:
break;
default:
kfree_skb(skb);
}
nf_queue_entry_free(entry);
}
EXPORT_SYMBOL(nf_reinject);
......@@ -225,6 +225,148 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
return entry;
}
static unsigned int nf_iterate(struct sk_buff *skb,
struct nf_hook_state *state,
const struct nf_hook_entries *hooks,
unsigned int *index)
{
const struct nf_hook_entry *hook;
unsigned int verdict, i = *index;
while (i < hooks->num_hook_entries) {
hook = &hooks->hooks[i];
repeat:
verdict = nf_hook_entry_hookfn(hook, skb, state);
if (verdict != NF_ACCEPT) {
*index = i;
if (verdict != NF_REPEAT)
return verdict;
goto repeat;
}
i++;
}
*index = i;
return NF_ACCEPT;
}
static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
{
switch (pf) {
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
case NFPROTO_BRIDGE:
return rcu_dereference(net->nf.hooks_bridge[hooknum]);
#endif
case NFPROTO_IPV4:
return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
case NFPROTO_IPV6:
return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
default:
WARN_ON_ONCE(1);
return NULL;
}
return NULL;
}
static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
{
#ifdef CONFIG_INET
const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
if (!(iph->tos == rt_info->tos &&
skb->mark == rt_info->mark &&
iph->daddr == rt_info->daddr &&
iph->saddr == rt_info->saddr))
return ip_route_me_harder(entry->state.net, entry->state.sk,
skb, RTN_UNSPEC);
}
#endif
return 0;
}
static int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
{
const struct nf_ipv6_ops *v6ops;
int ret = 0;
switch (entry->state.pf) {
case AF_INET:
ret = nf_ip_reroute(skb, entry);
break;
case AF_INET6:
v6ops = rcu_dereference(nf_ipv6_ops);
if (v6ops)
ret = v6ops->reroute(skb, entry);
break;
}
return ret;
}
/* caller must hold rcu read-side lock */
static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_hook_entry *hook_entry;
const struct nf_hook_entries *hooks;
struct sk_buff *skb = entry->skb;
const struct net *net;
unsigned int i;
int err;
u8 pf;
net = entry->state.net;
pf = entry->state.pf;
hooks = nf_hook_entries_head(net, pf, entry->state.hook);
i = entry->hook_index;
if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
nf_queue_entry_free(entry);
return;
}
hook_entry = &hooks->hooks[i];
/* Continue traversal iff userspace said ok... */
if (verdict == NF_REPEAT)
verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
if (verdict == NF_ACCEPT) {
if (nf_reroute(skb, entry) < 0)
verdict = NF_DROP;
}
if (verdict == NF_ACCEPT) {
next_hook:
++i;
verdict = nf_iterate(skb, &entry->state, hooks, &i);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
local_bh_disable();
entry->state.okfn(entry->state.net, entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
err = nf_queue(skb, &entry->state, i, verdict);
if (err == 1)
goto next_hook;
break;
case NF_STOLEN:
break;
default:
kfree_skb(skb);
}
nf_queue_entry_free(entry);
}
static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_ct_hook *ct_hook;
......
......@@ -63,7 +63,6 @@ static int nft_osf_init(const struct nft_ctx *ctx,
{
struct nft_osf *priv = nft_expr_priv(expr);
u32 flags;
int err;
u8 ttl;
if (!tb[NFTA_OSF_DREG])
......@@ -83,13 +82,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
priv->flags = flags;
}
err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
return nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
NULL, NFT_DATA_VALUE,
NFT_OSF_MAXGENRELEN);
if (err < 0)
return err;
return 0;
}
static int nft_osf_dump(struct sk_buff *skb,
......
This diff is collapsed.
......@@ -70,15 +70,9 @@
#define NFT_PIPAPO_ALIGN_HEADROOM \
(NFT_PIPAPO_ALIGN - ARCH_KMALLOC_MINALIGN)
#define NFT_PIPAPO_LT_ALIGN(lt) (PTR_ALIGN((lt), NFT_PIPAPO_ALIGN))
#define NFT_PIPAPO_LT_ASSIGN(field, x) \
do { \
(field)->lt_aligned = NFT_PIPAPO_LT_ALIGN(x); \
(field)->lt = (x); \
} while (0)
#else
#define NFT_PIPAPO_ALIGN_HEADROOM 0
#define NFT_PIPAPO_LT_ALIGN(lt) (lt)
#define NFT_PIPAPO_LT_ASSIGN(field, x) ((field)->lt = (x))
#endif /* NFT_PIPAPO_ALIGN */
#define nft_pipapo_for_each_field(field, index, match) \
......@@ -110,22 +104,20 @@ union nft_pipapo_map_bucket {
/**
* struct nft_pipapo_field - Lookup, mapping tables and related data for a field
* @groups: Amount of bit groups
* @rules: Number of inserted rules
* @bsize: Size of each bucket in lookup table, in longs
* @rules_alloc: Number of allocated rules, always >= rules
* @groups: Amount of bit groups
* @bb: Number of bits grouped together in lookup table buckets
* @lt: Lookup table: 'groups' rows of buckets
* @lt_aligned: Version of @lt aligned to NFT_PIPAPO_ALIGN bytes
* @mt: Mapping table: one bucket per rule
*/
struct nft_pipapo_field {
int groups;
unsigned long rules;
size_t bsize;
int bb;
#ifdef NFT_PIPAPO_ALIGN
unsigned long *lt_aligned;
#endif
unsigned int rules;
unsigned int bsize;
unsigned int rules_alloc;
u8 groups;
u8 bb;
unsigned long *lt;
union nft_pipapo_map_bucket *mt;
};
......@@ -145,15 +137,15 @@ struct nft_pipapo_scratch {
/**
* struct nft_pipapo_match - Data used for lookup and matching
* @field_count: Amount of fields in set
* @scratch: Preallocated per-CPU maps for partial matching results
* @bsize_max: Maximum lookup table bucket size of all fields, in longs
* @scratch: Preallocated per-CPU maps for partial matching results
* @rcu: Matching data is swapped on commits
* @f: Fields, with lookup and mapping tables
*/
struct nft_pipapo_match {
int field_count;
u8 field_count;
unsigned int bsize_max;
struct nft_pipapo_scratch * __percpu *scratch;
size_t bsize_max;
struct rcu_head rcu;
struct nft_pipapo_field f[] __counted_by(field_count);
};
......@@ -186,8 +178,9 @@ struct nft_pipapo_elem {
struct nft_set_ext ext;
};
int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
union nft_pipapo_map_bucket *mt, bool match_only);
int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
unsigned long *dst,
const union nft_pipapo_map_bucket *mt, bool match_only);
/**
* pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
......@@ -195,7 +188,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
......@@ -223,7 +216,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
......
......@@ -212,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
......@@ -274,8 +275,9 @@ static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
......@@ -350,8 +352,9 @@ static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
......@@ -445,8 +448,9 @@ static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
......@@ -534,8 +538,9 @@ static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
......@@ -669,8 +674,9 @@ static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
......@@ -726,8 +732,9 @@ static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
......@@ -790,8 +797,9 @@ static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
......@@ -865,8 +873,9 @@ static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
......@@ -950,8 +959,9 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
......@@ -1042,8 +1052,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
const struct nft_pipapo_field *f,
int offset, const u8 *pkt,
bool first, bool last)
{
unsigned long bsize = f->bsize;
int i, ret = -1, b;
......@@ -1119,9 +1130,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_scratch *scratch;
u8 genmask = nft_genmask_cur(net);
const struct nft_pipapo_match *m;
const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
struct nft_pipapo_match *m;
struct nft_pipapo_field *f;
unsigned long *res, *fill;
bool map_index;
int i, ret = 0;
......
......@@ -179,43 +179,6 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
}
EXPORT_SYMBOL_GPL(nf_route);
static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
{
#ifdef CONFIG_INET
const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
if (entry->state.hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
if (!(iph->tos == rt_info->tos &&
skb->mark == rt_info->mark &&
iph->daddr == rt_info->daddr &&
iph->saddr == rt_info->saddr))
return ip_route_me_harder(entry->state.net, entry->state.sk,
skb, RTN_UNSPEC);
}
#endif
return 0;
}
int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
{
const struct nf_ipv6_ops *v6ops;
int ret = 0;
switch (entry->state.pf) {
case AF_INET:
ret = nf_ip_reroute(skb, entry);
break;
case AF_INET6:
v6ops = rcu_dereference(nf_ipv6_ops);
if (v6ops)
ret = v6ops->reroute(skb, entry);
break;
}
return ret;
}
/* Only get and check the lengths, not do any hop-by-hop stuff. */
int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen)
{
......
......@@ -1142,7 +1142,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
memcpy(t->data, ct->data, tsize - sizeof(*ct));
unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
/* UAPI 0-sized destination */);
tsize += off;
t->u.user.target_size = tsize;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment