Commit 14d3692f authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
The following patchset contains relevant updates for the Netfilter
tree, they are:

* Enhancements for ipset: Add the counter extension for sets, this
  information can be used from the iptables set match, to change
  the matching behaviour. Jozsef required to add the extension
  infrastructure and moved the existing timeout support upon it.
  This also includes a change in net/sched/em_ipset to adapt it to
  the new extension structure.

* Enhancements for performance boosting in nfnetlink_queue: Add new
  configuration flags that allows user-space to receive big packets (GRO)
  and to disable checksumming calculation. This were proposed by Eric
  Dumazet during the Netfilter Workshop 2013 in Copenhagen. Florian
  Westphal was kind enough to find the time to materialize the proposal.

* A sparse fix from Simon, he noticed it in the SCTP NAT helper, the fix
  required a change in the interface of sctp_end_cksum.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 674853b2 eee1d5a1
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de> * Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se> * Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -47,10 +47,36 @@ enum ip_set_feature { ...@@ -47,10 +47,36 @@ enum ip_set_feature {
IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG), IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
}; };
/* Set extensions */
enum ip_set_extension {
IPSET_EXT_NONE = 0,
IPSET_EXT_BIT_TIMEOUT = 1,
IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
IPSET_EXT_BIT_COUNTER = 2,
IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
};
/* Extension offsets */
enum ip_set_offset {
IPSET_OFFSET_TIMEOUT = 0,
IPSET_OFFSET_COUNTER,
IPSET_OFFSET_MAX,
};
#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
struct ip_set_ext {
unsigned long timeout;
u64 packets;
u64 bytes;
};
struct ip_set; struct ip_set;
typedef int (*ipset_adtfn)(struct ip_set *set, void *value, typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
u32 timeout, u32 flags); const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 cmdflags);
/* Kernel API function options */ /* Kernel API function options */
struct ip_set_adt_opt { struct ip_set_adt_opt {
...@@ -58,7 +84,7 @@ struct ip_set_adt_opt { ...@@ -58,7 +84,7 @@ struct ip_set_adt_opt {
u8 dim; /* Dimension of match/target */ u8 dim; /* Dimension of match/target */
u8 flags; /* Direction and negation flags */ u8 flags; /* Direction and negation flags */
u32 cmdflags; /* Command-like flags */ u32 cmdflags; /* Command-like flags */
u32 timeout; /* Timeout value */ struct ip_set_ext ext; /* Extensions */
}; };
/* Set type, variant-specific part */ /* Set type, variant-specific part */
...@@ -69,7 +95,7 @@ struct ip_set_type_variant { ...@@ -69,7 +95,7 @@ struct ip_set_type_variant {
* positive for matching element */ * positive for matching element */
int (*kadt)(struct ip_set *set, const struct sk_buff *skb, int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt); enum ipset_adt adt, struct ip_set_adt_opt *opt);
/* Userspace: test/add/del entries /* Userspace: test/add/del entries
* returns negative error code, * returns negative error code,
...@@ -151,10 +177,76 @@ struct ip_set { ...@@ -151,10 +177,76 @@ struct ip_set {
u8 family; u8 family;
/* The type revision */ /* The type revision */
u8 revision; u8 revision;
/* Extensions */
u8 extensions;
/* The type specific data */ /* The type specific data */
void *data; void *data;
}; };
struct ip_set_counter {
atomic64_t bytes;
atomic64_t packets;
};
static inline void
ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
{
atomic64_add((long long)bytes, &(counter)->bytes);
}
static inline void
ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
{
atomic64_add((long long)packets, &(counter)->packets);
}
static inline u64
ip_set_get_bytes(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->bytes);
}
static inline u64
ip_set_get_packets(const struct ip_set_counter *counter)
{
return (u64)atomic64_read(&(counter)->packets);
}
static inline void
ip_set_update_counter(struct ip_set_counter *counter,
const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
if (ext->packets != ULLONG_MAX &&
!(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
ip_set_add_bytes(ext->bytes, counter);
ip_set_add_packets(ext->packets, counter);
}
if (flags & IPSET_FLAG_MATCH_COUNTERS) {
mext->packets = ip_set_get_packets(counter);
mext->bytes = ip_set_get_bytes(counter);
}
}
static inline bool
ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
{
return nla_put_net64(skb, IPSET_ATTR_BYTES,
cpu_to_be64(ip_set_get_bytes(counter))) ||
nla_put_net64(skb, IPSET_ATTR_PACKETS,
cpu_to_be64(ip_set_get_packets(counter)));
}
static inline void
ip_set_init_counter(struct ip_set_counter *counter,
const struct ip_set_ext *ext)
{
if (ext->bytes != ULLONG_MAX)
atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
if (ext->packets != ULLONG_MAX)
atomic64_set(&(counter)->packets, (long long)(ext->packets));
}
/* register and unregister set references */ /* register and unregister set references */
extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set); extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
extern void ip_set_put_byindex(ip_set_id_t index); extern void ip_set_put_byindex(ip_set_id_t index);
...@@ -167,19 +259,21 @@ extern void ip_set_nfnl_put(ip_set_id_t index); ...@@ -167,19 +259,21 @@ extern void ip_set_nfnl_put(ip_set_id_t index);
extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb, extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
const struct ip_set_adt_opt *opt); struct ip_set_adt_opt *opt);
extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb, extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
const struct ip_set_adt_opt *opt); struct ip_set_adt_opt *opt);
extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb, extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
const struct ip_set_adt_opt *opt); struct ip_set_adt_opt *opt);
/* Utility functions */ /* Utility functions */
extern void *ip_set_alloc(size_t size); extern void *ip_set_alloc(size_t size);
extern void ip_set_free(void *members); extern void ip_set_free(void *members);
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
static inline int static inline int
ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
...@@ -200,6 +294,14 @@ ip_set_eexist(int ret, u32 flags) ...@@ -200,6 +294,14 @@ ip_set_eexist(int ret, u32 flags)
return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST); return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
} }
/* Match elements marked with nomatch */
static inline bool
ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt)
{
return adt == IPSET_TEST &&
ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH);
}
/* Check the NLA_F_NET_BYTEORDER flag */ /* Check the NLA_F_NET_BYTEORDER flag */
static inline bool static inline bool
ip_set_attr_netorder(struct nlattr *tb[], int type) ip_set_attr_netorder(struct nlattr *tb[], int type)
...@@ -284,4 +386,14 @@ bitmap_bytes(u32 a, u32 b) ...@@ -284,4 +386,14 @@ bitmap_bytes(u32 a, u32 b)
return 4 * ((((b - a + 8) / 8) + 3) / 4); return 4 * ((((b - a + 8) / 8) + 3) / 4);
} }
#include <linux/netfilter/ipset/ip_set_timeout.h>
#define IP_SET_INIT_KEXT(skb, opt, map) \
{ .bytes = (skb)->len, .packets = 1, \
.timeout = ip_set_adt_opt_timeout(opt, map) }
#define IP_SET_INIT_UEXT(map) \
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
.timeout = (map)->timeout }
#endif /*_IP_SET_H */ #endif /*_IP_SET_H */
...@@ -5,6 +5,12 @@ ...@@ -5,6 +5,12 @@
#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
enum {
IPSET_ADD_FAILED = 1,
IPSET_ADD_STORE_PLAIN_TIMEOUT,
IPSET_ADD_START_STORED_TIMEOUT,
};
/* Common functions */ /* Common functions */
static inline u32 static inline u32
......
#ifndef _IP_SET_TIMEOUT_H #ifndef _IP_SET_TIMEOUT_H
#define _IP_SET_TIMEOUT_H #define _IP_SET_TIMEOUT_H
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -17,13 +17,14 @@ ...@@ -17,13 +17,14 @@
#define IPSET_GC_PERIOD(timeout) \ #define IPSET_GC_PERIOD(timeout) \
((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
/* Set is defined without timeout support: timeout value may be 0 */ /* Entry is set with no timeout value */
#define IPSET_NO_TIMEOUT UINT_MAX #define IPSET_ELEM_PERMANENT 0
#define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT) /* Set is defined with timeout support: timeout value may be 0 */
#define IPSET_NO_TIMEOUT UINT_MAX
#define opt_timeout(opt, map) \ #define ip_set_adt_opt_timeout(opt, map) \
(with_timeout((opt)->timeout) ? (opt)->timeout : (map)->timeout) ((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout)
static inline unsigned int static inline unsigned int
ip_set_timeout_uget(struct nlattr *tb) ip_set_timeout_uget(struct nlattr *tb)
...@@ -38,61 +39,6 @@ ip_set_timeout_uget(struct nlattr *tb) ...@@ -38,61 +39,6 @@ ip_set_timeout_uget(struct nlattr *tb)
return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout; return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
} }
#ifdef IP_SET_BITMAP_TIMEOUT
/* Bitmap specific timeout constants and macros for the entries */
/* Bitmap entry is unset */
#define IPSET_ELEM_UNSET 0
/* Bitmap entry is set with no timeout value */
#define IPSET_ELEM_PERMANENT (UINT_MAX/2)
static inline bool
ip_set_timeout_test(unsigned long timeout)
{
return timeout != IPSET_ELEM_UNSET &&
(timeout == IPSET_ELEM_PERMANENT ||
time_is_after_jiffies(timeout));
}
static inline bool
ip_set_timeout_expired(unsigned long timeout)
{
return timeout != IPSET_ELEM_UNSET &&
timeout != IPSET_ELEM_PERMANENT &&
time_is_before_jiffies(timeout);
}
static inline unsigned long
ip_set_timeout_set(u32 timeout)
{
unsigned long t;
if (!timeout)
return IPSET_ELEM_PERMANENT;
t = msecs_to_jiffies(timeout * 1000) + jiffies;
if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
/* Bingo! */
t++;
return t;
}
static inline u32
ip_set_timeout_get(unsigned long timeout)
{
return timeout == IPSET_ELEM_PERMANENT ? 0 :
jiffies_to_msecs(timeout - jiffies)/1000;
}
#else
/* Hash specific timeout constants and macros for the entries */
/* Hash entry is set with no timeout value */
#define IPSET_ELEM_PERMANENT 0
static inline bool static inline bool
ip_set_timeout_test(unsigned long timeout) ip_set_timeout_test(unsigned long timeout)
{ {
...@@ -101,36 +47,32 @@ ip_set_timeout_test(unsigned long timeout) ...@@ -101,36 +47,32 @@ ip_set_timeout_test(unsigned long timeout)
} }
static inline bool static inline bool
ip_set_timeout_expired(unsigned long timeout) ip_set_timeout_expired(unsigned long *timeout)
{ {
return timeout != IPSET_ELEM_PERMANENT && return *timeout != IPSET_ELEM_PERMANENT &&
time_is_before_jiffies(timeout); time_is_before_jiffies(*timeout);
} }
static inline unsigned long static inline void
ip_set_timeout_set(u32 timeout) ip_set_timeout_set(unsigned long *timeout, u32 t)
{ {
unsigned long t; if (!t) {
*timeout = IPSET_ELEM_PERMANENT;
if (!timeout) return;
return IPSET_ELEM_PERMANENT; }
t = msecs_to_jiffies(timeout * 1000) + jiffies; *timeout = msecs_to_jiffies(t * 1000) + jiffies;
if (t == IPSET_ELEM_PERMANENT) if (*timeout == IPSET_ELEM_PERMANENT)
/* Bingo! :-) */ /* Bingo! :-) */
t++; (*timeout)--;
return t;
} }
static inline u32 static inline u32
ip_set_timeout_get(unsigned long timeout) ip_set_timeout_get(unsigned long *timeout)
{ {
return timeout == IPSET_ELEM_PERMANENT ? 0 : return *timeout == IPSET_ELEM_PERMANENT ? 0 :
jiffies_to_msecs(timeout - jiffies)/1000; jiffies_to_msecs(*timeout - jiffies)/1000;
} }
#endif /* ! IP_SET_BITMAP_TIMEOUT */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _IP_SET_TIMEOUT_H */ #endif /* _IP_SET_TIMEOUT_H */
...@@ -41,4 +41,13 @@ do { \ ...@@ -41,4 +41,13 @@ do { \
to = from | ~ip_set_hostmask(cidr); \ to = from | ~ip_set_hostmask(cidr); \
} while (0) } while (0)
static inline void
ip6_netmask(union nf_inet_addr *ip, u8 prefix)
{
ip->ip6[0] &= ip_set_netmask6(prefix)[0];
ip->ip6[1] &= ip_set_netmask6(prefix)[1];
ip->ip6[2] &= ip_set_netmask6(prefix)[2];
ip->ip6[3] &= ip_set_netmask6(prefix)[3];
}
#endif /*_PFXLEN_H */ #endif /*_PFXLEN_H */
...@@ -9,10 +9,13 @@ struct nf_queue_entry { ...@@ -9,10 +9,13 @@ struct nf_queue_entry {
struct nf_hook_ops *elem; struct nf_hook_ops *elem;
u_int8_t pf; u_int8_t pf;
u16 size; /* sizeof(entry) + saved route keys */
unsigned int hook; unsigned int hook;
struct net_device *indev; struct net_device *indev;
struct net_device *outdev; struct net_device *outdev;
int (*okfn)(struct sk_buff *); int (*okfn)(struct sk_buff *);
/* extra space to store route keys */
}; };
#define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
...@@ -27,4 +30,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh); ...@@ -27,4 +30,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void); void nf_unregister_queue_handler(void);
extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
#endif /* _NF_QUEUE_H */ #endif /* _NF_QUEUE_H */
...@@ -77,7 +77,7 @@ static inline __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32) ...@@ -77,7 +77,7 @@ static inline __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
return sctp_crc32c(crc32, buffer, length); return sctp_crc32c(crc32, buffer, length);
} }
static inline __le32 sctp_end_cksum(__be32 crc32) static inline __le32 sctp_end_cksum(__u32 crc32)
{ {
return cpu_to_le32(~crc32); return cpu_to_le32(~crc32);
} }
...@@ -108,6 +108,8 @@ enum { ...@@ -108,6 +108,8 @@ enum {
IPSET_ATTR_CIDR2, IPSET_ATTR_CIDR2,
IPSET_ATTR_IP2_TO, IPSET_ATTR_IP2_TO,
IPSET_ATTR_IFACE, IPSET_ATTR_IFACE,
IPSET_ATTR_BYTES,
IPSET_ATTR_PACKETS,
__IPSET_ATTR_ADT_MAX, __IPSET_ATTR_ADT_MAX,
}; };
#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
...@@ -137,12 +139,13 @@ enum ipset_errno { ...@@ -137,12 +139,13 @@ enum ipset_errno {
IPSET_ERR_REFERENCED, IPSET_ERR_REFERENCED,
IPSET_ERR_IPADDR_IPV4, IPSET_ERR_IPADDR_IPV4,
IPSET_ERR_IPADDR_IPV6, IPSET_ERR_IPADDR_IPV6,
IPSET_ERR_COUNTER,
/* Type specific error codes */ /* Type specific error codes */
IPSET_ERR_TYPE_SPECIFIC = 4352, IPSET_ERR_TYPE_SPECIFIC = 4352,
}; };
/* Flags at command level */ /* Flags at command level or match/target flags, lower half of cmdattrs*/
enum ipset_cmd_flags { enum ipset_cmd_flags {
IPSET_FLAG_BIT_EXIST = 0, IPSET_FLAG_BIT_EXIST = 0,
IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST), IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST),
...@@ -150,10 +153,20 @@ enum ipset_cmd_flags { ...@@ -150,10 +153,20 @@ enum ipset_cmd_flags {
IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME), IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME),
IPSET_FLAG_BIT_LIST_HEADER = 2, IPSET_FLAG_BIT_LIST_HEADER = 2,
IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER), IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER),
IPSET_FLAG_CMD_MAX = 15, /* Lower half */ IPSET_FLAG_BIT_SKIP_COUNTER_UPDATE = 3,
IPSET_FLAG_SKIP_COUNTER_UPDATE =
(1 << IPSET_FLAG_BIT_SKIP_COUNTER_UPDATE),
IPSET_FLAG_BIT_SKIP_SUBCOUNTER_UPDATE = 4,
IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE =
(1 << IPSET_FLAG_BIT_SKIP_SUBCOUNTER_UPDATE),
IPSET_FLAG_BIT_MATCH_COUNTERS = 5,
IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS),
IPSET_FLAG_BIT_RETURN_NOMATCH = 7,
IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH),
IPSET_FLAG_CMD_MAX = 15,
}; };
/* Flags at CADT attribute level */ /* Flags at CADT attribute level, upper half of cmdattrs */
enum ipset_cadt_flags { enum ipset_cadt_flags {
IPSET_FLAG_BIT_BEFORE = 0, IPSET_FLAG_BIT_BEFORE = 0,
IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE), IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE),
...@@ -161,7 +174,9 @@ enum ipset_cadt_flags { ...@@ -161,7 +174,9 @@ enum ipset_cadt_flags {
IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV), IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV),
IPSET_FLAG_BIT_NOMATCH = 2, IPSET_FLAG_BIT_NOMATCH = 2,
IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH), IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH),
IPSET_FLAG_CADT_MAX = 15, /* Upper half */ IPSET_FLAG_BIT_WITH_COUNTERS = 3,
IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS),
IPSET_FLAG_CADT_MAX = 15,
}; };
/* Commands with settype-specific attributes */ /* Commands with settype-specific attributes */
...@@ -190,6 +205,7 @@ enum ip_set_dim { ...@@ -190,6 +205,7 @@ enum ip_set_dim {
* If changed, new revision of iptables match/target is required. * If changed, new revision of iptables match/target is required.
*/ */
IPSET_DIM_MAX = 6, IPSET_DIM_MAX = 6,
/* Backward compatibility: set match revision 2 */
IPSET_BIT_RETURN_NOMATCH = 7, IPSET_BIT_RETURN_NOMATCH = 7,
}; };
...@@ -202,6 +218,18 @@ enum ip_set_kopt { ...@@ -202,6 +218,18 @@ enum ip_set_kopt {
IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH), IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH),
}; };
enum {
IPSET_COUNTER_NONE = 0,
IPSET_COUNTER_EQ,
IPSET_COUNTER_NE,
IPSET_COUNTER_LT,
IPSET_COUNTER_GT,
};
struct ip_set_counter_match {
__u8 op;
__u64 value;
};
/* Interface to iptables/ip6tables */ /* Interface to iptables/ip6tables */
......
...@@ -45,6 +45,7 @@ enum nfqnl_attr_type { ...@@ -45,6 +45,7 @@ enum nfqnl_attr_type {
NFQA_CT, /* nf_conntrack_netlink.h */ NFQA_CT, /* nf_conntrack_netlink.h */
NFQA_CT_INFO, /* enum ip_conntrack_info */ NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */ NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
__NFQA_MAX __NFQA_MAX
}; };
...@@ -96,6 +97,13 @@ enum nfqnl_attr_config { ...@@ -96,6 +97,13 @@ enum nfqnl_attr_config {
/* Flags for NFQA_CFG_FLAGS */ /* Flags for NFQA_CFG_FLAGS */
#define NFQA_CFG_F_FAIL_OPEN (1 << 0) #define NFQA_CFG_F_FAIL_OPEN (1 << 0)
#define NFQA_CFG_F_CONNTRACK (1 << 1) #define NFQA_CFG_F_CONNTRACK (1 << 1)
#define NFQA_CFG_F_MAX (1 << 2) #define NFQA_CFG_F_GSO (1 << 2)
#define NFQA_CFG_F_MAX (1 << 3)
/* flags for NFQA_SKB_INFO */
/* packet appears to have wrong checksums, but they are ok */
#define NFQA_SKB_CSUMNOTREADY (1 << 0)
/* packet is GSO (i.e., exceeds device mtu) */
#define NFQA_SKB_GSO (1 << 1)
#endif /* _NFNETLINK_QUEUE_H */ #endif /* _NFNETLINK_QUEUE_H */
...@@ -62,4 +62,13 @@ struct xt_set_info_target_v2 { ...@@ -62,4 +62,13 @@ struct xt_set_info_target_v2 {
__u32 timeout; __u32 timeout;
}; };
/* Revision 3 match */
struct xt_set_info_match_v3 {
struct xt_set_info match_set;
struct ip_set_counter_match packets;
struct ip_set_counter_match bytes;
__u32 flags;
};
#endif /*_XT_SET_H*/ #endif /*_XT_SET_H*/
/* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __IP_SET_BITMAP_IP_GEN_H
#define __IP_SET_BITMAP_IP_GEN_H
#define CONCAT(a, b) a##b
#define TOKEN(a,b) CONCAT(a, b)
#define mtype_do_test TOKEN(MTYPE, _do_test)
#define mtype_gc_test TOKEN(MTYPE, _gc_test)
#define mtype_is_filled TOKEN(MTYPE, _is_filled)
#define mtype_do_add TOKEN(MTYPE, _do_add)
#define mtype_do_del TOKEN(MTYPE, _do_del)
#define mtype_do_list TOKEN(MTYPE, _do_list)
#define mtype_do_head TOKEN(MTYPE, _do_head)
#define mtype_adt_elem TOKEN(MTYPE, _adt_elem)
#define mtype_add_timeout TOKEN(MTYPE, _add_timeout)
#define mtype_gc_init TOKEN(MTYPE, _gc_init)
#define mtype_kadt TOKEN(MTYPE, _kadt)
#define mtype_uadt TOKEN(MTYPE, _uadt)
#define mtype_destroy TOKEN(MTYPE, _destroy)
#define mtype_flush TOKEN(MTYPE, _flush)
#define mtype_head TOKEN(MTYPE, _head)
#define mtype_same_set TOKEN(MTYPE, _same_set)
#define mtype_elem TOKEN(MTYPE, _elem)
#define mtype_test TOKEN(MTYPE, _test)
#define mtype_add TOKEN(MTYPE, _add)
#define mtype_del TOKEN(MTYPE, _del)
#define mtype_list TOKEN(MTYPE, _list)
#define mtype_gc TOKEN(MTYPE, _gc)
#define mtype MTYPE
#define ext_timeout(e, m) \
(unsigned long *)((e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
#define ext_counter(e, m) \
(struct ip_set_counter *)((e) + (m)->offset[IPSET_OFFSET_COUNTER])
#define get_ext(map, id) ((map)->extensions + (map)->dsize * (id))
static void
mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
{
struct mtype *map = set->data;
init_timer(&map->gc);
map->gc.data = (unsigned long) set;
map->gc.function = gc;
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
static void
mtype_destroy(struct ip_set *set)
{
struct mtype *map = set->data;
if (SET_WITH_TIMEOUT(set))
del_timer_sync(&map->gc);
ip_set_free(map->members);
if (map->dsize)
ip_set_free(map->extensions);
kfree(map);
set->data = NULL;
}
static void
mtype_flush(struct ip_set *set)
{
struct mtype *map = set->data;
memset(map->members, 0, map->memsize);
}
static int
mtype_head(struct ip_set *set, struct sk_buff *skb)
{
const struct mtype *map = set->data;
struct nlattr *nested;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (mtype_do_head(skb, map) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) +
map->memsize +
map->dsize * map->elements)) ||
(SET_WITH_TIMEOUT(set) &&
nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
(SET_WITH_COUNTER(set) &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
htonl(IPSET_FLAG_WITH_COUNTERS))))
goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int
mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct mtype *map = set->data;
const struct mtype_adt_elem *e = value;
void *x = get_ext(map, e->id);
int ret = mtype_do_test(e, map);
if (ret <= 0)
return ret;
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(x, map)))
return 0;
if (SET_WITH_COUNTER(set))
ip_set_update_counter(ext_counter(x, map), ext, mext, flags);
return 1;
}
static int
mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct mtype *map = set->data;
const struct mtype_adt_elem *e = value;
void *x = get_ext(map, e->id);
int ret = mtype_do_add(e, map, flags);
if (ret == IPSET_ADD_FAILED) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(x, map)))
ret = 0;
else if (!(flags & IPSET_FLAG_EXIST))
return -IPSET_ERR_EXIST;
}
if (SET_WITH_TIMEOUT(set))
#ifdef IP_SET_BITMAP_STORED_TIMEOUT
mtype_add_timeout(ext_timeout(x, map), e, ext, map, ret);
#else
ip_set_timeout_set(ext_timeout(x, map), ext->timeout);
#endif
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(x, map), ext);
return 0;
}
static int
mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{
struct mtype *map = set->data;
const struct mtype_adt_elem *e = value;
const void *x = get_ext(map, e->id);
if (mtype_do_del(e, map) ||
(SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(x, map))))
return -IPSET_ERR_EXIST;
return 0;
}
static int
mtype_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
struct mtype *map = set->data;
struct nlattr *adt, *nested;
void *x;
u32 id, first = cb->args[2];
adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!adt)
return -EMSGSIZE;
for (; cb->args[2] < map->elements; cb->args[2]++) {
id = cb->args[2];
x = get_ext(map, id);
if (!test_bit(id, map->members) ||
(SET_WITH_TIMEOUT(set) &&
#ifdef IP_SET_BITMAP_STORED_TIMEOUT
mtype_is_filled((const struct mtype_elem *) x) &&
#endif
ip_set_timeout_expired(ext_timeout(x, map))))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, adt);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (mtype_do_list(skb, map, id))
goto nla_put_failure;
if (SET_WITH_TIMEOUT(set)) {
#ifdef IP_SET_BITMAP_STORED_TIMEOUT
if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_stored(map, id,
ext_timeout(x, map)))))
goto nla_put_failure;
#else
if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(
ext_timeout(x, map)))))
goto nla_put_failure;
#endif
}
if (SET_WITH_COUNTER(set) &&
ip_set_put_counter(skb, ext_counter(x, map)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, adt);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, adt);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
}
static void
mtype_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct mtype *map = set->data;
const void *x;
u32 id;
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id < map->elements; id++)
if (mtype_gc_test(id, map)) {
x = get_ext(map, id);
if (ip_set_timeout_expired(ext_timeout(x, map)))
clear_bit(id, map->members);
}
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
static const struct ip_set_type_variant mtype = {
.kadt = mtype_kadt,
.uadt = mtype_uadt,
.adt = {
[IPSET_ADD] = mtype_add,
[IPSET_DEL] = mtype_del,
[IPSET_TEST] = mtype_test,
},
.destroy = mtype_destroy,
.flush = mtype_flush,
.head = mtype_head,
.list = mtype_list,
.same_set = mtype_same_set,
};
#endif /* __IP_SET_BITMAP_IP_GEN_H */
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de> * Patrick Schaaf <bof@bof.de>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -24,31 +24,37 @@ ...@@ -24,31 +24,37 @@
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h> #include <linux/netfilter/ipset/ip_set_bitmap.h>
#define IP_SET_BITMAP_TIMEOUT
#include <linux/netfilter/ipset/ip_set_timeout.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
#define REVISION_MAX 0 #define REVISION_MAX 1 /* Counter support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX); IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_bitmap:ip"); MODULE_ALIAS("ip_set_bitmap:ip");
#define MTYPE bitmap_ip
/* Type structure */ /* Type structure */
struct bitmap_ip { struct bitmap_ip {
void *members; /* the set members */ void *members; /* the set members */
void *extensions; /* data extensions */
u32 first_ip; /* host byte order, included in range */ u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */ u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */ u32 elements; /* number of max elements in the set */
u32 hosts; /* number of hosts in a subnet */ u32 hosts; /* number of hosts in a subnet */
size_t memsize; /* members size */ size_t memsize; /* members size */
size_t dsize; /* extensions struct size */
size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
u8 netmask; /* subnet netmask */ u8 netmask; /* subnet netmask */
u32 timeout; /* timeout parameter */ u32 timeout; /* timeout parameter */
struct timer_list gc; /* garbage collection */ struct timer_list gc; /* garbage collection */
}; };
/* Base variant */ /* ADT structure for generic function args */
struct bitmap_ip_adt_elem {
u16 id;
};
static inline u32 static inline u32
ip_to_id(const struct bitmap_ip *m, u32 ip) ip_to_id(const struct bitmap_ip *m, u32 ip)
...@@ -56,188 +62,67 @@ ip_to_id(const struct bitmap_ip *m, u32 ip) ...@@ -56,188 +62,67 @@ ip_to_id(const struct bitmap_ip *m, u32 ip)
return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts; return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
} }
static int /* Common functions */
bitmap_ip_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
{
const struct bitmap_ip *map = set->data;
u16 id = *(u16 *)value;
return !!test_bit(id, map->members);
}
static int static inline int
bitmap_ip_add(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
{ {
struct bitmap_ip *map = set->data; return !!test_bit(e->id, map->members);
u16 id = *(u16 *)value;
if (test_and_set_bit(id, map->members))
return -IPSET_ERR_EXIST;
return 0;
} }
static int static inline int
bitmap_ip_del(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map)
{ {
struct bitmap_ip *map = set->data; return !!test_bit(id, map->members);
u16 id = *(u16 *)value;
if (!test_and_clear_bit(id, map->members))
return -IPSET_ERR_EXIST;
return 0;
}
static int
bitmap_ip_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct bitmap_ip *map = set->data;
struct nlattr *atd, *nested;
u32 id, first = cb->args[2];
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] < map->elements; cb->args[2]++) {
id = cb->args[2];
if (!test_bit(id, map->members))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id * map->hosts)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, atd);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
} }
/* Timeout variant */ static inline int
bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
static int u32 flags)
bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
{ {
const struct bitmap_ip *map = set->data; return !!test_and_set_bit(e->id, map->members);
const unsigned long *members = map->members;
u16 id = *(u16 *)value;
return ip_set_timeout_test(members[id]);
} }
static int static inline int
bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
{ {
struct bitmap_ip *map = set->data; return !test_and_clear_bit(e->id, map->members);
unsigned long *members = map->members;
u16 id = *(u16 *)value;
if (ip_set_timeout_test(members[id]) && !(flags & IPSET_FLAG_EXIST))
return -IPSET_ERR_EXIST;
members[id] = ip_set_timeout_set(timeout);
return 0;
} }
static int static inline int
bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id)
{ {
struct bitmap_ip *map = set->data; return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
unsigned long *members = map->members; htonl(map->first_ip + id * map->hosts));
u16 id = *(u16 *)value;
int ret = -IPSET_ERR_EXIST;
if (ip_set_timeout_test(members[id]))
ret = 0;
members[id] = IPSET_ELEM_UNSET;
return ret;
} }
static int static inline int
bitmap_ip_tlist(const struct ip_set *set, bitmap_ip_do_head(struct sk_buff *skb, const struct bitmap_ip *map)
struct sk_buff *skb, struct netlink_callback *cb)
{ {
const struct bitmap_ip *map = set->data; return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
struct nlattr *adt, *nested; nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
u32 id, first = cb->args[2]; (map->netmask != 32 &&
const unsigned long *members = map->members; nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask));
adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!adt)
return -EMSGSIZE;
for (; cb->args[2] < map->elements; cb->args[2]++) {
id = cb->args[2];
if (!ip_set_timeout_test(members[id]))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, adt);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id * map->hosts)) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(members[id]))))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, adt);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, adt);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
} }
static int static int
bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb, bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
struct bitmap_ip *map = set->data; struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_ip_adt_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
u32 ip; u32 ip;
ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
if (ip < map->first_ip || ip > map->last_ip) if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
ip = ip_to_id(map, ip); e.id = ip_to_id(map, ip);
return adtfn(set, &ip, opt_timeout(opt, map), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
...@@ -246,33 +131,31 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -246,33 +131,31 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
{ {
struct bitmap_ip *map = set->data; struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
u32 timeout = map->timeout; u32 ip, ip_to;
u32 ip, ip_to, id; struct bitmap_ip_adt_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
int ret = 0; int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (ip < map->first_ip || ip > map->last_ip) if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(map->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST) { if (adt == IPSET_TEST) {
id = ip_to_id(map, ip); e.id = ip_to_id(map, ip);
return adtfn(set, &id, timeout, flags); return adtfn(set, &e, &ext, &ext, flags);
} }
if (tb[IPSET_ATTR_IP_TO]) { if (tb[IPSET_ATTR_IP_TO]) {
...@@ -297,8 +180,8 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -297,8 +180,8 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
for (; !before(ip_to, ip); ip += map->hosts) { for (; !before(ip_to, ip); ip += map->hosts) {
id = ip_to_id(map, ip); e.id = ip_to_id(map, ip);
ret = adtfn(set, &id, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -308,54 +191,6 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -308,54 +191,6 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static void
bitmap_ip_destroy(struct ip_set *set)
{
struct bitmap_ip *map = set->data;
if (with_timeout(map->timeout))
del_timer_sync(&map->gc);
ip_set_free(map->members);
kfree(map);
set->data = NULL;
}
static void
bitmap_ip_flush(struct ip_set *set)
{
struct bitmap_ip *map = set->data;
memset(map->members, 0, map->memsize);
}
static int
bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
{
const struct bitmap_ip *map = set->data;
struct nlattr *nested;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
(map->netmask != 32 &&
nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize)) ||
(with_timeout(map->timeout) &&
nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static bool static bool
bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b) bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
{ {
...@@ -365,70 +200,35 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b) ...@@ -365,70 +200,35 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
return x->first_ip == y->first_ip && return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip && x->last_ip == y->last_ip &&
x->netmask == y->netmask && x->netmask == y->netmask &&
x->timeout == y->timeout; x->timeout == y->timeout &&
a->extensions == b->extensions;
} }
static const struct ip_set_type_variant bitmap_ip = { /* Plain variant */
.kadt = bitmap_ip_kadt,
.uadt = bitmap_ip_uadt, struct bitmap_ip_elem {
.adt = {
[IPSET_ADD] = bitmap_ip_add,
[IPSET_DEL] = bitmap_ip_del,
[IPSET_TEST] = bitmap_ip_test,
},
.destroy = bitmap_ip_destroy,
.flush = bitmap_ip_flush,
.head = bitmap_ip_head,
.list = bitmap_ip_list,
.same_set = bitmap_ip_same_set,
}; };
static const struct ip_set_type_variant bitmap_tip = { /* Timeout variant */
.kadt = bitmap_ip_kadt,
.uadt = bitmap_ip_uadt, struct bitmap_ipt_elem {
.adt = { unsigned long timeout;
[IPSET_ADD] = bitmap_ip_tadd,
[IPSET_DEL] = bitmap_ip_tdel,
[IPSET_TEST] = bitmap_ip_ttest,
},
.destroy = bitmap_ip_destroy,
.flush = bitmap_ip_flush,
.head = bitmap_ip_head,
.list = bitmap_ip_tlist,
.same_set = bitmap_ip_same_set,
}; };
static void /* Plain variant with counter */
bitmap_ip_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct bitmap_ip *map = set->data;
unsigned long *table = map->members;
u32 id;
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id < map->elements; id++)
if (ip_set_timeout_expired(table[id]))
table[id] = IPSET_ELEM_UNSET;
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
static void struct bitmap_ipc_elem {
bitmap_ip_gc_init(struct ip_set *set) struct ip_set_counter counter;
{ };
struct bitmap_ip *map = set->data;
init_timer(&map->gc); /* Timeout variant with counter */
map->gc.data = (unsigned long) set;
map->gc.function = bitmap_ip_gc; struct bitmap_ipct_elem {
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; unsigned long timeout;
add_timer(&map->gc); struct ip_set_counter counter;
} };
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip type of sets */ /* Create bitmap:ip type of sets */
...@@ -440,6 +240,13 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map, ...@@ -440,6 +240,13 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
map->members = ip_set_alloc(map->memsize); map->members = ip_set_alloc(map->memsize);
if (!map->members) if (!map->members)
return false; return false;
if (map->dsize) {
map->extensions = ip_set_alloc(map->dsize * elements);
if (!map->extensions) {
kfree(map->members);
return false;
}
}
map->first_ip = first_ip; map->first_ip = first_ip;
map->last_ip = last_ip; map->last_ip = last_ip;
map->elements = elements; map->elements = elements;
...@@ -457,13 +264,14 @@ static int ...@@ -457,13 +264,14 @@ static int
bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{ {
struct bitmap_ip *map; struct bitmap_ip *map;
u32 first_ip, last_ip, hosts; u32 first_ip, last_ip, hosts, cadt_flags = 0;
u64 elements; u64 elements;
u8 netmask = 32; u8 netmask = 32;
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
...@@ -526,8 +334,45 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) ...@@ -526,8 +334,45 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (!map) if (!map)
return -ENOMEM; return -ENOMEM;
if (tb[IPSET_ATTR_TIMEOUT]) { map->memsize = bitmap_bytes(0, elements - 1);
map->memsize = elements * sizeof(unsigned long); set->variant = &bitmap_ip;
if (tb[IPSET_ATTR_CADT_FLAGS])
cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
set->extensions |= IPSET_EXT_COUNTER;
if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct bitmap_ipct_elem);
map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct bitmap_ipct_elem, timeout);
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct bitmap_ipct_elem, counter);
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
kfree(map);
return -ENOMEM;
}
map->timeout = ip_set_timeout_uget(
tb[IPSET_ATTR_TIMEOUT]);
set->extensions |= IPSET_EXT_TIMEOUT;
bitmap_ip_gc_init(set, bitmap_ip_gc);
} else {
map->dsize = sizeof(struct bitmap_ipc_elem);
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct bitmap_ipc_elem, counter);
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
kfree(map);
return -ENOMEM;
}
}
} else if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct bitmap_ipt_elem);
map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct bitmap_ipt_elem, timeout);
if (!init_map_ip(set, map, first_ip, last_ip, if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) { elements, hosts, netmask)) {
...@@ -536,19 +381,16 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) ...@@ -536,19 +381,16 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
} }
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &bitmap_tip; set->extensions |= IPSET_EXT_TIMEOUT;
bitmap_ip_gc_init(set); bitmap_ip_gc_init(set, bitmap_ip_gc);
} else { } else {
map->memsize = bitmap_bytes(0, elements - 1); map->dsize = 0;
if (!init_map_ip(set, map, first_ip, last_ip, if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) { elements, hosts, netmask)) {
kfree(map); kfree(map);
return -ENOMEM; return -ENOMEM;
} }
set->variant = &bitmap_ip;
} }
return 0; return 0;
} }
...@@ -568,6 +410,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = { ...@@ -568,6 +410,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 }, [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -575,6 +418,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = { ...@@ -575,6 +418,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de> * Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se> * Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -23,344 +23,208 @@ ...@@ -23,344 +23,208 @@
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h> #include <linux/netfilter/ipset/ip_set_bitmap.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
#define REVISION_MAX 0 #define REVISION_MAX 1 /* Counter support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX); IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_bitmap:ip,mac"); MODULE_ALIAS("ip_set_bitmap:ip,mac");
#define MTYPE bitmap_ipmac
#define IP_SET_BITMAP_STORED_TIMEOUT
enum { enum {
MAC_EMPTY, /* element is not set */
MAC_FILLED, /* element is set with MAC */
MAC_UNSET, /* element is set, without MAC */ MAC_UNSET, /* element is set, without MAC */
MAC_FILLED, /* element is set with MAC */
}; };
/* Type structure */ /* Type structure */
struct bitmap_ipmac { struct bitmap_ipmac {
void *members; /* the set members */ void *members; /* the set members */
void *extensions; /* MAC + data extensions */
u32 first_ip; /* host byte order, included in range */ u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */ u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
u32 timeout; /* timeout value */ u32 timeout; /* timeout value */
struct timer_list gc; /* garbage collector */ struct timer_list gc; /* garbage collector */
size_t memsize; /* members size */
size_t dsize; /* size of element */ size_t dsize; /* size of element */
size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
}; };
/* ADT structure for generic function args */ /* ADT structure for generic function args */
struct ipmac { struct bitmap_ipmac_adt_elem {
u32 id; /* id in array */ u16 id;
unsigned char *ether; /* ethernet address */ unsigned char *ether;
}; };
/* Member element without and with timeout */ struct bitmap_ipmac_elem {
struct ipmac_elem {
unsigned char ether[ETH_ALEN]; unsigned char ether[ETH_ALEN];
unsigned char match; unsigned char filled;
} __attribute__ ((aligned)); } __attribute__ ((aligned));
struct ipmac_telem { static inline u32
unsigned char ether[ETH_ALEN]; ip_to_id(const struct bitmap_ipmac *m, u32 ip)
unsigned char match;
unsigned long timeout;
} __attribute__ ((aligned));
static inline void *
bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
{ {
return (void *)((char *)map->members + id * map->dsize); return ip - m->first_ip;
} }
static inline bool static inline struct bitmap_ipmac_elem *
bitmap_timeout(const struct bitmap_ipmac *map, u32 id) get_elem(void *extensions, u16 id, size_t dsize)
{ {
const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id); return (struct bitmap_ipmac_elem *)(extensions + id * dsize);
return ip_set_timeout_test(elem->timeout);
} }
static inline bool /* Common functions */
bitmap_expired(const struct bitmap_ipmac *map, u32 id)
{
const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
return ip_set_timeout_expired(elem->timeout);
}
static inline int static inline int
bitmap_ipmac_exist(const struct ipmac_telem *elem) bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
{ const struct bitmap_ipmac *map)
return elem->match == MAC_UNSET ||
(elem->match == MAC_FILLED &&
!ip_set_timeout_expired(elem->timeout));
}
/* Base variant */
static int
bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
{
const struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
switch (elem->match) {
case MAC_UNSET:
/* Trigger kernel to fill out the ethernet address */
return -EAGAIN;
case MAC_FILLED:
return data->ether == NULL ||
ether_addr_equal(data->ether, elem->ether);
}
return 0;
}
static int
bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
{
struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
switch (elem->match) {
case MAC_UNSET:
if (!data->ether)
/* Already added without ethernet address */
return -IPSET_ERR_EXIST;
/* Fill the MAC address */
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
break;
case MAC_FILLED:
return -IPSET_ERR_EXIST;
case MAC_EMPTY:
if (data->ether) {
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
} else
elem->match = MAC_UNSET;
}
return 0;
}
static int
bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
{ {
struct bitmap_ipmac *map = set->data; const struct bitmap_ipmac_elem *elem;
const struct ipmac *data = value;
struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
if (elem->match == MAC_EMPTY)
return -IPSET_ERR_EXIST;
elem->match = MAC_EMPTY; if (!test_bit(e->id, map->members))
return 0;
return 0; elem = get_elem(map->extensions, e->id, map->dsize);
if (elem->filled == MAC_FILLED)
return e->ether == NULL ||
ether_addr_equal(e->ether, elem->ether);
/* Trigger kernel to fill out the ethernet address */
return -EAGAIN;
} }
static int static inline int
bitmap_ipmac_list(const struct ip_set *set, bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map)
struct sk_buff *skb, struct netlink_callback *cb)
{ {
const struct bitmap_ipmac *map = set->data; const struct bitmap_ipmac_elem *elem;
const struct ipmac_elem *elem;
struct nlattr *atd, *nested;
u32 id, first = cb->args[2];
u32 last = map->last_ip - map->first_ip;
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
elem = bitmap_ipmac_elem(map, id);
if (elem->match == MAC_EMPTY)
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id)) ||
(elem->match == MAC_FILLED &&
nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
elem->ether)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure: if (!test_bit(id, map->members))
nla_nest_cancel(skb, nested); return 0;
ipset_nest_end(skb, atd); elem = get_elem(map->extensions, id, map->dsize);
if (unlikely(id == first)) { /* Timer not started for the incomplete elements */
cb->args[2] = 0; return elem->filled == MAC_FILLED;
return -EMSGSIZE;
}
return 0;
} }
/* Timeout variant */ static inline int
bitmap_ipmac_is_filled(const struct bitmap_ipmac_elem *elem)
static int
bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
{ {
const struct bitmap_ipmac *map = set->data; return elem->filled == MAC_FILLED;
const struct ipmac *data = value;
const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
switch (elem->match) {
case MAC_UNSET:
/* Trigger kernel to fill out the ethernet address */
return -EAGAIN;
case MAC_FILLED:
return (data->ether == NULL ||
ether_addr_equal(data->ether, elem->ether)) &&
!bitmap_expired(map, data->id);
}
return 0;
} }
static int static inline int
bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_ipmac_add_timeout(unsigned long *timeout,
const struct bitmap_ipmac_adt_elem *e,
const struct ip_set_ext *ext,
struct bitmap_ipmac *map, int mode)
{ {
struct bitmap_ipmac *map = set->data; u32 t = ext->timeout;
const struct ipmac *data = value;
struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
bool flag_exist = flags & IPSET_FLAG_EXIST;
switch (elem->match) { if (mode == IPSET_ADD_START_STORED_TIMEOUT) {
case MAC_UNSET: if (t == map->timeout)
if (!(data->ether || flag_exist))
/* Already added without ethernet address */
return -IPSET_ERR_EXIST;
/* Fill the MAC address and activate the timer */
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
if (timeout == map->timeout)
/* Timeout was not specified, get stored one */ /* Timeout was not specified, get stored one */
timeout = elem->timeout; t = *timeout;
elem->timeout = ip_set_timeout_set(timeout); ip_set_timeout_set(timeout, t);
break; } else {
case MAC_FILLED:
if (!(bitmap_expired(map, data->id) || flag_exist))
return -IPSET_ERR_EXIST;
/* Fall through */
case MAC_EMPTY:
if (data->ether) {
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
} else
elem->match = MAC_UNSET;
/* If MAC is unset yet, we store plain timeout value /* If MAC is unset yet, we store plain timeout value
* because the timer is not activated yet * because the timer is not activated yet
* and we can reuse it later when MAC is filled out, * and we can reuse it later when MAC is filled out,
* possibly by the kernel */ * possibly by the kernel */
elem->timeout = data->ether ? ip_set_timeout_set(timeout) if (e->ether)
: timeout; ip_set_timeout_set(timeout, t);
break; else
*timeout = t;
} }
return 0; return 0;
} }
static int static inline int
bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
struct bitmap_ipmac *map, u32 flags)
{ {
struct bitmap_ipmac *map = set->data; struct bitmap_ipmac_elem *elem;
const struct ipmac *data = value;
struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id); elem = get_elem(map->extensions, e->id, map->dsize);
if (test_and_set_bit(e->id, map->members)) {
if (elem->filled == MAC_FILLED) {
if (e->ether && (flags & IPSET_FLAG_EXIST))
memcpy(elem->ether, e->ether, ETH_ALEN);
return IPSET_ADD_FAILED;
} else if (!e->ether)
/* Already added without ethernet address */
return IPSET_ADD_FAILED;
/* Fill the MAC address and trigger the timer activation */
memcpy(elem->ether, e->ether, ETH_ALEN);
elem->filled = MAC_FILLED;
return IPSET_ADD_START_STORED_TIMEOUT;
} else if (e->ether) {
/* We can store MAC too */
memcpy(elem->ether, e->ether, ETH_ALEN);
elem->filled = MAC_FILLED;
return 0;
} else {
elem->filled = MAC_UNSET;
/* MAC is not stored yet, don't start timer */
return IPSET_ADD_STORE_PLAIN_TIMEOUT;
}
}
if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id)) static inline int
return -IPSET_ERR_EXIST; bitmap_ipmac_do_del(const struct bitmap_ipmac_adt_elem *e,
struct bitmap_ipmac *map)
{
return !test_and_clear_bit(e->id, map->members);
}
elem->match = MAC_EMPTY; static inline unsigned long
ip_set_timeout_stored(struct bitmap_ipmac *map, u32 id, unsigned long *timeout)
{
const struct bitmap_ipmac_elem *elem =
get_elem(map->extensions, id, map->dsize);
return 0; return elem->filled == MAC_FILLED ? ip_set_timeout_get(timeout) :
*timeout;
} }
static int static inline int
bitmap_ipmac_tlist(const struct ip_set *set, bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
struct sk_buff *skb, struct netlink_callback *cb) u32 id)
{ {
const struct bitmap_ipmac *map = set->data; const struct bitmap_ipmac_elem *elem =
const struct ipmac_telem *elem; get_elem(map->extensions, id, map->dsize);
struct nlattr *atd, *nested;
u32 id, first = cb->args[2];
u32 timeout, last = map->last_ip - map->first_ip;
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
elem = bitmap_ipmac_elem(map, id);
if (!bitmap_ipmac_exist(elem))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id)) ||
(elem->match == MAC_FILLED &&
nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
elem->ether)))
goto nla_put_failure;
timeout = elem->match == MAC_UNSET ? elem->timeout
: ip_set_timeout_get(elem->timeout);
if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0; return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id)) ||
(elem->filled == MAC_FILLED &&
nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, elem->ether));
}
nla_put_failure: static inline int
nla_nest_cancel(skb, nested); bitmap_ipmac_do_head(struct sk_buff *skb, const struct bitmap_ipmac *map)
ipset_nest_end(skb, atd); {
if (unlikely(id == first)) { return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
cb->args[2] = 0; nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
return -EMSGSIZE;
}
return 0;
} }
static int static int
bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
struct bitmap_ipmac *map = set->data; struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data; struct bitmap_ipmac_adt_elem e = {};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
u32 ip;
/* MAC can be src only */ /* MAC can be src only */
if (!(opt->flags & IPSET_DIM_TWO_SRC)) if (!(opt->flags & IPSET_DIM_TWO_SRC))
return 0; return 0;
data.id = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
if (data.id < map->first_ip || data.id > map->last_ip) if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
/* Backward compatibility: we don't check the second flag */ /* Backward compatibility: we don't check the second flag */
...@@ -368,10 +232,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -368,10 +232,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
(skb_mac_header(skb) + ETH_HLEN) > skb->data) (skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL; return -EINVAL;
data.id -= map->first_ip; e.id = ip_to_id(map, ip);
data.ether = eth_hdr(skb)->h_source; e.ether = eth_hdr(skb)->h_source;
return adtfn(set, &data, opt_timeout(opt, map), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
...@@ -380,91 +244,39 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -380,91 +244,39 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
{ {
const struct bitmap_ipmac *map = set->data; const struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data; struct bitmap_ipmac_adt_elem e = {};
u32 timeout = map->timeout; struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
u32 ip;
int ret = 0; int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (data.id < map->first_ip || data.id > map->last_ip) if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
e.id = ip_to_id(map, ip);
if (tb[IPSET_ATTR_ETHER]) if (tb[IPSET_ATTR_ETHER])
data.ether = nla_data(tb[IPSET_ATTR_ETHER]); e.ether = nla_data(tb[IPSET_ATTR_ETHER]);
else else
data.ether = NULL; e.ether = NULL;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(map->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
data.id -= map->first_ip;
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_eexist(ret, flags) ? 0 : ret;
} }
static void
bitmap_ipmac_destroy(struct ip_set *set)
{
struct bitmap_ipmac *map = set->data;
if (with_timeout(map->timeout))
del_timer_sync(&map->gc);
ip_set_free(map->members);
kfree(map);
set->data = NULL;
}
static void
bitmap_ipmac_flush(struct ip_set *set)
{
struct bitmap_ipmac *map = set->data;
memset(map->members, 0,
(map->last_ip - map->first_ip + 1) * map->dsize);
}
static int
bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
{
const struct bitmap_ipmac *map = set->data;
struct nlattr *nested;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) +
((map->last_ip - map->first_ip + 1) *
map->dsize))) ||
(with_timeout(map->timeout) &&
nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static bool static bool
bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b) bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
{ {
...@@ -473,85 +285,64 @@ bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b) ...@@ -473,85 +285,64 @@ bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
return x->first_ip == y->first_ip && return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip && x->last_ip == y->last_ip &&
x->timeout == y->timeout; x->timeout == y->timeout &&
a->extensions == b->extensions;
} }
static const struct ip_set_type_variant bitmap_ipmac = { /* Plain variant */
.kadt = bitmap_ipmac_kadt,
.uadt = bitmap_ipmac_uadt,
.adt = {
[IPSET_ADD] = bitmap_ipmac_add,
[IPSET_DEL] = bitmap_ipmac_del,
[IPSET_TEST] = bitmap_ipmac_test,
},
.destroy = bitmap_ipmac_destroy,
.flush = bitmap_ipmac_flush,
.head = bitmap_ipmac_head,
.list = bitmap_ipmac_list,
.same_set = bitmap_ipmac_same_set,
};
static const struct ip_set_type_variant bitmap_tipmac = { /* Timeout variant */
.kadt = bitmap_ipmac_kadt,
.uadt = bitmap_ipmac_uadt, struct bitmap_ipmact_elem {
.adt = { struct {
[IPSET_ADD] = bitmap_ipmac_tadd, unsigned char ether[ETH_ALEN];
[IPSET_DEL] = bitmap_ipmac_tdel, unsigned char filled;
[IPSET_TEST] = bitmap_ipmac_ttest, } __attribute__ ((aligned));
}, unsigned long timeout;
.destroy = bitmap_ipmac_destroy,
.flush = bitmap_ipmac_flush,
.head = bitmap_ipmac_head,
.list = bitmap_ipmac_tlist,
.same_set = bitmap_ipmac_same_set,
}; };
static void /* Plain variant with counter */
bitmap_ipmac_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct bitmap_ipmac *map = set->data;
struct ipmac_telem *elem;
u32 id, last = map->last_ip - map->first_ip;
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id <= last; id++) {
elem = bitmap_ipmac_elem(map, id);
if (elem->match == MAC_FILLED &&
ip_set_timeout_expired(elem->timeout))
elem->match = MAC_EMPTY;
}
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; struct bitmap_ipmacc_elem {
add_timer(&map->gc); struct {
} unsigned char ether[ETH_ALEN];
unsigned char filled;
} __attribute__ ((aligned));
struct ip_set_counter counter;
};
static void /* Timeout variant with counter */
bitmap_ipmac_gc_init(struct ip_set *set)
{
struct bitmap_ipmac *map = set->data;
init_timer(&map->gc); struct bitmap_ipmacct_elem {
map->gc.data = (unsigned long) set; struct {
map->gc.function = bitmap_ipmac_gc; unsigned char ether[ETH_ALEN];
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; unsigned char filled;
add_timer(&map->gc); } __attribute__ ((aligned));
} unsigned long timeout;
struct ip_set_counter counter;
};
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip,mac type of sets */ /* Create bitmap:ip,mac type of sets */
static bool static bool
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip) u32 first_ip, u32 last_ip, u32 elements)
{ {
map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize); map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
if (!map->members) if (!map->members)
return false; return false;
if (map->dsize) {
map->extensions = ip_set_alloc(map->dsize * elements);
if (!map->extensions) {
kfree(map->members);
return false;
}
}
map->first_ip = first_ip; map->first_ip = first_ip;
map->last_ip = last_ip; map->last_ip = last_ip;
map->elements = elements;
map->timeout = IPSET_NO_TIMEOUT; map->timeout = IPSET_NO_TIMEOUT;
set->data = map; set->data = map;
...@@ -564,13 +355,14 @@ static int ...@@ -564,13 +355,14 @@ static int
bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[], bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
u32 flags) u32 flags)
{ {
u32 first_ip, last_ip; u32 first_ip, last_ip, cadt_flags = 0;
u64 elements; u64 elements;
struct bitmap_ipmac *map; struct bitmap_ipmac *map;
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
...@@ -605,28 +397,59 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[], ...@@ -605,28 +397,59 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
if (!map) if (!map)
return -ENOMEM; return -ENOMEM;
if (tb[IPSET_ATTR_TIMEOUT]) { map->memsize = bitmap_bytes(0, elements - 1);
map->dsize = sizeof(struct ipmac_telem); set->variant = &bitmap_ipmac;
if (tb[IPSET_ATTR_CADT_FLAGS])
cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
set->extensions |= IPSET_EXT_COUNTER;
if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct bitmap_ipmacct_elem);
map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct bitmap_ipmacct_elem, timeout);
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct bitmap_ipmacct_elem, counter);
if (!init_map_ipmac(set, map, first_ip, last_ip,
elements)) {
kfree(map);
return -ENOMEM;
}
map->timeout = ip_set_timeout_uget(
tb[IPSET_ATTR_TIMEOUT]);
set->extensions |= IPSET_EXT_TIMEOUT;
bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
} else {
map->dsize = sizeof(struct bitmap_ipmacc_elem);
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct bitmap_ipmacc_elem, counter);
if (!init_map_ipmac(set, map, first_ip, last_ip,
elements)) {
kfree(map);
return -ENOMEM;
}
}
} else if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct bitmap_ipmact_elem);
map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct bitmap_ipmact_elem, timeout);
if (!init_map_ipmac(set, map, first_ip, last_ip)) { if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
kfree(map); kfree(map);
return -ENOMEM; return -ENOMEM;
} }
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->extensions |= IPSET_EXT_TIMEOUT;
set->variant = &bitmap_tipmac; bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
bitmap_ipmac_gc_init(set);
} else { } else {
map->dsize = sizeof(struct ipmac_elem); map->dsize = sizeof(struct bitmap_ipmac_elem);
if (!init_map_ipmac(set, map, first_ip, last_ip)) { if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
kfree(map); kfree(map);
return -ENOMEM; return -ENOMEM;
} }
set->variant = &bitmap_ipmac; set->variant = &bitmap_ipmac;
} }
return 0; return 0;
} }
...@@ -645,6 +468,7 @@ static struct ip_set_type bitmap_ipmac_type = { ...@@ -645,6 +468,7 @@ static struct ip_set_type bitmap_ipmac_type = {
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -652,6 +476,8 @@ static struct ip_set_type bitmap_ipmac_type = { ...@@ -652,6 +476,8 @@ static struct ip_set_type bitmap_ipmac_type = {
.len = ETH_ALEN }, .len = ETH_ALEN },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -19,205 +19,94 @@ ...@@ -19,205 +19,94 @@
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h> #include <linux/netfilter/ipset/ip_set_bitmap.h>
#include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_getport.h>
#define IP_SET_BITMAP_TIMEOUT
#include <linux/netfilter/ipset/ip_set_timeout.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
#define REVISION_MAX 0 #define REVISION_MAX 1 /* Counter support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX); IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_bitmap:port"); MODULE_ALIAS("ip_set_bitmap:port");
#define MTYPE bitmap_port
/* Type structure */ /* Type structure */
struct bitmap_port { struct bitmap_port {
void *members; /* the set members */ void *members; /* the set members */
void *extensions; /* data extensions */
u16 first_port; /* host byte order, included in range */ u16 first_port; /* host byte order, included in range */
u16 last_port; /* host byte order, included in range */ u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
size_t memsize; /* members size */ size_t memsize; /* members size */
size_t dsize; /* extensions struct size */
size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
u32 timeout; /* timeout parameter */ u32 timeout; /* timeout parameter */
struct timer_list gc; /* garbage collection */ struct timer_list gc; /* garbage collection */
}; };
/* Base variant */ /* ADT structure for generic function args */
struct bitmap_port_adt_elem {
u16 id;
};
static int static inline u16
bitmap_port_test(struct ip_set *set, void *value, u32 timeout, u32 flags) port_to_id(const struct bitmap_port *m, u16 port)
{ {
const struct bitmap_port *map = set->data; return port - m->first_port;
u16 id = *(u16 *)value;
return !!test_bit(id, map->members);
} }
static int /* Common functions */
bitmap_port_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
{
struct bitmap_port *map = set->data;
u16 id = *(u16 *)value;
if (test_and_set_bit(id, map->members))
return -IPSET_ERR_EXIST;
return 0;
}
static int static inline int
bitmap_port_del(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
const struct bitmap_port *map)
{ {
struct bitmap_port *map = set->data; return !!test_bit(e->id, map->members);
u16 id = *(u16 *)value;
if (!test_and_clear_bit(id, map->members))
return -IPSET_ERR_EXIST;
return 0;
} }
static int static inline int
bitmap_port_list(const struct ip_set *set, bitmap_port_gc_test(u16 id, const struct bitmap_port *map)
struct sk_buff *skb, struct netlink_callback *cb)
{ {
const struct bitmap_port *map = set->data; return !!test_bit(id, map->members);
struct nlattr *atd, *nested;
u16 id, first = cb->args[2];
u16 last = map->last_port - map->first_port;
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
if (!test_bit(id, map->members))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (nla_put_net16(skb, IPSET_ATTR_PORT,
htons(map->first_port + id)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, atd);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
} }
/* Timeout variant */ static inline int
bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
static int struct bitmap_port *map, u32 flags)
bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
{ {
const struct bitmap_port *map = set->data; return !!test_and_set_bit(e->id, map->members);
const unsigned long *members = map->members;
u16 id = *(u16 *)value;
return ip_set_timeout_test(members[id]);
} }
static int static inline int
bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_port_do_del(const struct bitmap_port_adt_elem *e,
struct bitmap_port *map)
{ {
struct bitmap_port *map = set->data; return !test_and_clear_bit(e->id, map->members);
unsigned long *members = map->members;
u16 id = *(u16 *)value;
if (ip_set_timeout_test(members[id]) && !(flags & IPSET_FLAG_EXIST))
return -IPSET_ERR_EXIST;
members[id] = ip_set_timeout_set(timeout);
return 0;
} }
static int static inline int
bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id)
{ {
struct bitmap_port *map = set->data; return nla_put_net16(skb, IPSET_ATTR_PORT,
unsigned long *members = map->members; htons(map->first_port + id));
u16 id = *(u16 *)value;
int ret = -IPSET_ERR_EXIST;
if (ip_set_timeout_test(members[id]))
ret = 0;
members[id] = IPSET_ELEM_UNSET;
return ret;
} }
static int static inline int
bitmap_port_tlist(const struct ip_set *set, bitmap_port_do_head(struct sk_buff *skb, const struct bitmap_port *map)
struct sk_buff *skb, struct netlink_callback *cb)
{ {
const struct bitmap_port *map = set->data; return nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
struct nlattr *adt, *nested; nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
u16 id, first = cb->args[2];
u16 last = map->last_port - map->first_port;
const unsigned long *members = map->members;
adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!adt)
return -EMSGSIZE;
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
if (!ip_set_timeout_test(members[id]))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, adt);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (nla_put_net16(skb, IPSET_ATTR_PORT,
htons(map->first_port + id)) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(members[id]))))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, adt);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, adt);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
} }
static int static int
bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb, bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
struct bitmap_port *map = set->data; struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct bitmap_port_adt_elem e = {};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
__be16 __port; __be16 __port;
u16 port = 0; u16 port = 0;
...@@ -230,9 +119,9 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -230,9 +119,9 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
if (port < map->first_port || port > map->last_port) if (port < map->first_port || port > map->last_port)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
port -= map->first_port; e.id = port_to_id(map, port);
return adtfn(set, &port, opt_timeout(opt, map), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
...@@ -241,14 +130,17 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -241,14 +130,17 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
{ {
struct bitmap_port *map = set->data; struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
u32 timeout = map->timeout; struct bitmap_port_adt_elem e = {};
struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
u32 port; /* wraparound */ u32 port; /* wraparound */
u16 id, port_to; u16 port_to;
int ret = 0; int ret = 0;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
...@@ -257,16 +149,13 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -257,16 +149,13 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
port = ip_set_get_h16(tb[IPSET_ATTR_PORT]); port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
if (port < map->first_port || port > map->last_port) if (port < map->first_port || port > map->last_port)
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
ret = ip_set_get_extensions(set, tb, &ext);
if (tb[IPSET_ATTR_TIMEOUT]) { if (ret)
if (!with_timeout(map->timeout)) return ret;
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST) { if (adt == IPSET_TEST) {
id = port - map->first_port; e.id = port_to_id(map, port);
return adtfn(set, &id, timeout, flags); return adtfn(set, &e, &ext, &ext, flags);
} }
if (tb[IPSET_ATTR_PORT_TO]) { if (tb[IPSET_ATTR_PORT_TO]) {
...@@ -283,8 +172,8 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -283,8 +172,8 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
return -IPSET_ERR_BITMAP_RANGE; return -IPSET_ERR_BITMAP_RANGE;
for (; port <= port_to; port++) { for (; port <= port_to; port++) {
id = port - map->first_port; e.id = port_to_id(map, port);
ret = adtfn(set, &id, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -294,52 +183,6 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -294,52 +183,6 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static void
bitmap_port_destroy(struct ip_set *set)
{
struct bitmap_port *map = set->data;
if (with_timeout(map->timeout))
del_timer_sync(&map->gc);
ip_set_free(map->members);
kfree(map);
set->data = NULL;
}
static void
bitmap_port_flush(struct ip_set *set)
{
struct bitmap_port *map = set->data;
memset(map->members, 0, map->memsize);
}
static int
bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
{
const struct bitmap_port *map = set->data;
struct nlattr *nested;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize)) ||
(with_timeout(map->timeout) &&
nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
goto nla_put_failure;
ipset_nest_end(skb, nested);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static bool static bool
bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b) bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
{ {
...@@ -348,71 +191,35 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b) ...@@ -348,71 +191,35 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
return x->first_port == y->first_port && return x->first_port == y->first_port &&
x->last_port == y->last_port && x->last_port == y->last_port &&
x->timeout == y->timeout; x->timeout == y->timeout &&
a->extensions == b->extensions;
} }
static const struct ip_set_type_variant bitmap_port = { /* Plain variant */
.kadt = bitmap_port_kadt,
.uadt = bitmap_port_uadt, struct bitmap_port_elem {
.adt = {
[IPSET_ADD] = bitmap_port_add,
[IPSET_DEL] = bitmap_port_del,
[IPSET_TEST] = bitmap_port_test,
},
.destroy = bitmap_port_destroy,
.flush = bitmap_port_flush,
.head = bitmap_port_head,
.list = bitmap_port_list,
.same_set = bitmap_port_same_set,
}; };
static const struct ip_set_type_variant bitmap_tport = { /* Timeout variant */
.kadt = bitmap_port_kadt,
.uadt = bitmap_port_uadt, struct bitmap_portt_elem {
.adt = { unsigned long timeout;
[IPSET_ADD] = bitmap_port_tadd,
[IPSET_DEL] = bitmap_port_tdel,
[IPSET_TEST] = bitmap_port_ttest,
},
.destroy = bitmap_port_destroy,
.flush = bitmap_port_flush,
.head = bitmap_port_head,
.list = bitmap_port_tlist,
.same_set = bitmap_port_same_set,
}; };
static void /* Plain variant with counter */
bitmap_port_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct bitmap_port *map = set->data;
unsigned long *table = map->members;
u32 id; /* wraparound */
u16 last = map->last_port - map->first_port;
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id <= last; id++)
if (ip_set_timeout_expired(table[id]))
table[id] = IPSET_ELEM_UNSET;
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
static void struct bitmap_portc_elem {
bitmap_port_gc_init(struct ip_set *set) struct ip_set_counter counter;
{ };
struct bitmap_port *map = set->data;
init_timer(&map->gc); /* Timeout variant with counter */
map->gc.data = (unsigned long) set;
map->gc.function = bitmap_port_gc; struct bitmap_portct_elem {
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; unsigned long timeout;
add_timer(&map->gc); struct ip_set_counter counter;
} };
#include "ip_set_bitmap_gen.h"
/* Create bitmap:ip type of sets */ /* Create bitmap:ip type of sets */
...@@ -423,6 +230,13 @@ init_map_port(struct ip_set *set, struct bitmap_port *map, ...@@ -423,6 +230,13 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
map->members = ip_set_alloc(map->memsize); map->members = ip_set_alloc(map->memsize);
if (!map->members) if (!map->members)
return false; return false;
if (map->dsize) {
map->extensions = ip_set_alloc(map->dsize * map->elements);
if (!map->extensions) {
kfree(map->members);
return false;
}
}
map->first_port = first_port; map->first_port = first_port;
map->last_port = last_port; map->last_port = last_port;
map->timeout = IPSET_NO_TIMEOUT; map->timeout = IPSET_NO_TIMEOUT;
...@@ -434,15 +248,16 @@ init_map_port(struct ip_set *set, struct bitmap_port *map, ...@@ -434,15 +248,16 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
} }
static int static int
bitmap_port_create(struct ip_set *set, struct nlattr *tb[], bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 flags)
{ {
struct bitmap_port *map; struct bitmap_port *map;
u16 first_port, last_port; u16 first_port, last_port;
u32 cadt_flags = 0;
if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]); first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
...@@ -458,28 +273,56 @@ bitmap_port_create(struct ip_set *set, struct nlattr *tb[], ...@@ -458,28 +273,56 @@ bitmap_port_create(struct ip_set *set, struct nlattr *tb[],
if (!map) if (!map)
return -ENOMEM; return -ENOMEM;
if (tb[IPSET_ATTR_TIMEOUT]) { map->elements = last_port - first_port + 1;
map->memsize = (last_port - first_port + 1) map->memsize = map->elements * sizeof(unsigned long);
* sizeof(unsigned long); set->variant = &bitmap_port;
if (tb[IPSET_ATTR_CADT_FLAGS])
cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
set->extensions |= IPSET_EXT_COUNTER;
if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct bitmap_portct_elem);
map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct bitmap_portct_elem, timeout);
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct bitmap_portct_elem, counter);
if (!init_map_port(set, map, first_port, last_port)) {
kfree(map);
return -ENOMEM;
}
map->timeout =
ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->extensions |= IPSET_EXT_TIMEOUT;
bitmap_port_gc_init(set, bitmap_port_gc);
} else {
map->dsize = sizeof(struct bitmap_portc_elem);
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct bitmap_portc_elem, counter);
if (!init_map_port(set, map, first_port, last_port)) {
kfree(map);
return -ENOMEM;
}
}
} else if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct bitmap_portt_elem);
map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct bitmap_portt_elem, timeout);
if (!init_map_port(set, map, first_port, last_port)) { if (!init_map_port(set, map, first_port, last_port)) {
kfree(map); kfree(map);
return -ENOMEM; return -ENOMEM;
} }
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &bitmap_tport; set->extensions |= IPSET_EXT_TIMEOUT;
bitmap_port_gc_init(set, bitmap_port_gc);
bitmap_port_gc_init(set);
} else { } else {
map->memsize = bitmap_bytes(0, last_port - first_port); map->dsize = 0;
pr_debug("memsize: %zu\n", map->memsize);
if (!init_map_port(set, map, first_port, last_port)) { if (!init_map_port(set, map, first_port, last_port)) {
kfree(map); kfree(map);
return -ENOMEM; return -ENOMEM;
} }
set->variant = &bitmap_port;
} }
return 0; return 0;
} }
...@@ -497,12 +340,15 @@ static struct ip_set_type bitmap_port_type = { ...@@ -497,12 +340,15 @@ static struct ip_set_type bitmap_port_type = {
[IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de> * Patrick Schaaf <bof@bof.de>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -315,6 +315,29 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr) ...@@ -315,6 +315,29 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
} }
EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
int
ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext)
{
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!(set->extensions & IPSET_EXT_TIMEOUT))
return -IPSET_ERR_TIMEOUT;
ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
if (!(set->extensions & IPSET_EXT_COUNTER))
return -IPSET_ERR_COUNTER;
if (tb[IPSET_ATTR_BYTES])
ext->bytes = be64_to_cpu(nla_get_be64(
tb[IPSET_ATTR_BYTES]));
if (tb[IPSET_ATTR_PACKETS])
ext->packets = be64_to_cpu(nla_get_be64(
tb[IPSET_ATTR_PACKETS]));
}
return 0;
}
EXPORT_SYMBOL_GPL(ip_set_get_extensions);
/* /*
* Creating/destroying/renaming/swapping affect the existence and * Creating/destroying/renaming/swapping affect the existence and
* the properties of a set. All of these can be executed from userspace * the properties of a set. All of these can be executed from userspace
...@@ -365,8 +388,7 @@ ip_set_rcu_get(ip_set_id_t index) ...@@ -365,8 +388,7 @@ ip_set_rcu_get(ip_set_id_t index)
int int
ip_set_test(ip_set_id_t index, const struct sk_buff *skb, ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par, struct ip_set_adt_opt *opt)
const struct ip_set_adt_opt *opt)
{ {
struct ip_set *set = ip_set_rcu_get(index); struct ip_set *set = ip_set_rcu_get(index);
int ret = 0; int ret = 0;
...@@ -391,7 +413,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb, ...@@ -391,7 +413,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
ret = 1; ret = 1;
} else { } else {
/* --return-nomatch: invert matched element */ /* --return-nomatch: invert matched element */
if ((opt->flags & IPSET_RETURN_NOMATCH) && if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) &&
(set->type->features & IPSET_TYPE_NOMATCH) && (set->type->features & IPSET_TYPE_NOMATCH) &&
(ret > 0 || ret == -ENOTEMPTY)) (ret > 0 || ret == -ENOTEMPTY))
ret = -ret; ret = -ret;
...@@ -404,8 +426,7 @@ EXPORT_SYMBOL_GPL(ip_set_test); ...@@ -404,8 +426,7 @@ EXPORT_SYMBOL_GPL(ip_set_test);
int int
ip_set_add(ip_set_id_t index, const struct sk_buff *skb, ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par, struct ip_set_adt_opt *opt)
const struct ip_set_adt_opt *opt)
{ {
struct ip_set *set = ip_set_rcu_get(index); struct ip_set *set = ip_set_rcu_get(index);
int ret; int ret;
...@@ -427,8 +448,7 @@ EXPORT_SYMBOL_GPL(ip_set_add); ...@@ -427,8 +448,7 @@ EXPORT_SYMBOL_GPL(ip_set_add);
int int
ip_set_del(ip_set_id_t index, const struct sk_buff *skb, ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par, struct ip_set_adt_opt *opt)
const struct ip_set_adt_opt *opt)
{ {
struct ip_set *set = ip_set_rcu_get(index); struct ip_set *set = ip_set_rcu_get(index);
int ret = 0; int ret = 0;
......
#ifndef _IP_SET_AHASH_H /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
#define _IP_SET_AHASH_H *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _IP_SET_HASH_GEN_H
#define _IP_SET_HASH_GEN_H
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_timeout.h>
#ifndef rcu_dereference_bh
#define rcu_dereference_bh(p) rcu_dereference(p)
#endif
#define CONCAT(a, b, c) a##b##c #define CONCAT(a, b) a##b
#define TOKEN(a, b, c) CONCAT(a, b, c) #define TOKEN(a, b) CONCAT(a, b)
#define type_pf_next TOKEN(TYPE, PF, _elem)
/* Hashing which uses arrays to resolve clashing. The hash table is resized /* Hashing which uses arrays to resolve clashing. The hash table is resized
* (doubled) when searching becomes too long. * (doubled) when searching becomes too long.
...@@ -71,34 +79,12 @@ struct htable { ...@@ -71,34 +79,12 @@ struct htable {
#define hbucket(h, i) (&((h)->bucket[i])) #define hbucket(h, i) (&((h)->bucket[i]))
/* Book-keeping of the prefixes added to the set */ /* Book-keeping of the prefixes added to the set */
struct ip_set_hash_nets { struct net_prefixes {
u8 cidr; /* the different cidr values in the set */ u8 cidr; /* the different cidr values in the set */
u32 nets; /* number of elements per cidr */ u32 nets; /* number of elements per cidr */
}; };
/* The generic ip_set hash structure */ /* Compute the hash table size */
struct ip_set_hash {
struct htable *table; /* the hash table */
u32 maxelem; /* max elements in the hash */
u32 elements; /* current element (vs timeout) */
u32 initval; /* random jhash init value */
u32 timeout; /* timeout value, if enabled */
struct timer_list gc; /* garbage collection when timeout enabled */
struct type_pf_next next; /* temporary storage for uadd */
#ifdef IP_SET_HASH_WITH_MULTI
u8 ahash_max; /* max elements in an array block */
#endif
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask; /* netmask value for subnets to store */
#endif
#ifdef IP_SET_HASH_WITH_RBTREE
struct rb_root rbtree;
#endif
#ifdef IP_SET_HASH_WITH_NETS
struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
#endif
};
static size_t static size_t
htable_size(u8 hbits) htable_size(u8 hbits)
{ {
...@@ -128,25 +114,190 @@ htable_bits(u32 hashsize) ...@@ -128,25 +114,190 @@ htable_bits(u32 hashsize)
return bits; return bits;
} }
/* Destroy the hashtable part of the set */
static void
ahash_destroy(struct htable *t)
{
struct hbucket *n;
u32 i;
for (i = 0; i < jhash_size(t->htable_bits); i++) {
n = hbucket(t, i);
if (n->size)
/* FIXME: use slab cache */
kfree(n->value);
}
ip_set_free(t);
}
static int
hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
{
if (n->pos >= n->size) {
void *tmp;
if (n->size >= ahash_max)
/* Trigger rehashing */
return -EAGAIN;
tmp = kzalloc((n->size + AHASH_INIT_SIZE) * dsize,
GFP_ATOMIC);
if (!tmp)
return -ENOMEM;
if (n->size) {
memcpy(tmp, n->value, n->size * dsize);
kfree(n->value);
}
n->value = tmp;
n->size += AHASH_INIT_SIZE;
}
return 0;
}
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
#ifdef IP_SET_HASH_WITH_NETS_PACKED #ifdef IP_SET_HASH_WITH_NETS_PACKED
/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */ /* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
#define CIDR(cidr) (cidr + 1) #define CIDR(cidr) (cidr + 1)
#else #else
#define CIDR(cidr) (cidr) #define CIDR(cidr) (cidr)
#endif #endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) #define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
#ifdef IP_SET_HASH_WITH_MULTI #ifdef IP_SET_HASH_WITH_MULTI
#define NETS_LENGTH(family) (SET_HOST_MASK(family) + 1) #define NETS_LENGTH(family) (SET_HOST_MASK(family) + 1)
#else #else
#define NETS_LENGTH(family) SET_HOST_MASK(family) #define NETS_LENGTH(family) SET_HOST_MASK(family)
#endif #endif
#else
#define NETS_LENGTH(family) 0
#endif /* IP_SET_HASH_WITH_NETS */
#define ext_timeout(e, h) \
(unsigned long *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_TIMEOUT])
#define ext_counter(e, h) \
(struct ip_set_counter *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_COUNTER])
#endif /* _IP_SET_HASH_GEN_H */
/* Family dependent templates */
#undef ahash_data
#undef mtype_data_equal
#undef mtype_do_data_match
#undef mtype_data_set_flags
#undef mtype_data_reset_flags
#undef mtype_data_netmask
#undef mtype_data_list
#undef mtype_data_next
#undef mtype_elem
#undef mtype_add_cidr
#undef mtype_del_cidr
#undef mtype_ahash_memsize
#undef mtype_flush
#undef mtype_destroy
#undef mtype_gc_init
#undef mtype_same_set
#undef mtype_kadt
#undef mtype_uadt
#undef mtype
#undef mtype_add
#undef mtype_del
#undef mtype_test_cidrs
#undef mtype_test
#undef mtype_expire
#undef mtype_resize
#undef mtype_head
#undef mtype_list
#undef mtype_gc
#undef mtype_gc_init
#undef mtype_variant
#undef mtype_data_match
#undef HKEY
#define mtype_data_equal TOKEN(MTYPE, _data_equal)
#ifdef IP_SET_HASH_WITH_NETS
#define mtype_do_data_match TOKEN(MTYPE, _do_data_match)
#else
#define mtype_do_data_match(d) 1
#endif
#define mtype_data_set_flags TOKEN(MTYPE, _data_set_flags)
#define mtype_data_reset_flags TOKEN(MTYPE, _data_reset_flags)
#define mtype_data_netmask TOKEN(MTYPE, _data_netmask)
#define mtype_data_list TOKEN(MTYPE, _data_list)
#define mtype_data_next TOKEN(MTYPE, _data_next)
#define mtype_elem TOKEN(MTYPE, _elem)
#define mtype_add_cidr TOKEN(MTYPE, _add_cidr)
#define mtype_del_cidr TOKEN(MTYPE, _del_cidr)
#define mtype_ahash_memsize TOKEN(MTYPE, _ahash_memsize)
#define mtype_flush TOKEN(MTYPE, _flush)
#define mtype_destroy TOKEN(MTYPE, _destroy)
#define mtype_gc_init TOKEN(MTYPE, _gc_init)
#define mtype_same_set TOKEN(MTYPE, _same_set)
#define mtype_kadt TOKEN(MTYPE, _kadt)
#define mtype_uadt TOKEN(MTYPE, _uadt)
#define mtype MTYPE
#define mtype_elem TOKEN(MTYPE, _elem)
#define mtype_add TOKEN(MTYPE, _add)
#define mtype_del TOKEN(MTYPE, _del)
#define mtype_test_cidrs TOKEN(MTYPE, _test_cidrs)
#define mtype_test TOKEN(MTYPE, _test)
#define mtype_expire TOKEN(MTYPE, _expire)
#define mtype_resize TOKEN(MTYPE, _resize)
#define mtype_head TOKEN(MTYPE, _head)
#define mtype_list TOKEN(MTYPE, _list)
#define mtype_gc TOKEN(MTYPE, _gc)
#define mtype_variant TOKEN(MTYPE, _variant)
#define mtype_data_match TOKEN(MTYPE, _data_match)
#ifndef HKEY_DATALEN
#define HKEY_DATALEN sizeof(struct mtype_elem)
#endif
#define HKEY(data, initval, htable_bits) \
(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \
& jhash_mask(htable_bits))
#ifndef htype
#define htype HTYPE
/* The generic hash structure */
struct htype {
struct htable *table; /* the hash table */
u32 maxelem; /* max elements in the hash */
u32 elements; /* current element (vs timeout) */
u32 initval; /* random jhash init value */
u32 timeout; /* timeout value, if enabled */
size_t dsize; /* data struct size */
size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
struct timer_list gc; /* garbage collection when timeout enabled */
struct mtype_elem next; /* temporary storage for uadd */
#ifdef IP_SET_HASH_WITH_MULTI
u8 ahash_max; /* max elements in an array block */
#endif
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask; /* netmask value for subnets to store */
#endif
#ifdef IP_SET_HASH_WITH_RBTREE
struct rb_root rbtree;
#endif
#ifdef IP_SET_HASH_WITH_NETS
struct net_prefixes nets[0]; /* book-keeping of prefixes */
#endif
};
#endif
#ifdef IP_SET_HASH_WITH_NETS
/* Network cidr size book keeping when the hash stores different /* Network cidr size book keeping when the hash stores different
* sized networks */ * sized networks */
static void static void
add_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length) mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
{ {
int i, j; int i, j;
...@@ -172,7 +323,7 @@ add_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length) ...@@ -172,7 +323,7 @@ add_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
} }
static void static void
del_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length) mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
{ {
u8 i, j; u8 i, j;
...@@ -188,51 +339,32 @@ del_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length) ...@@ -188,51 +339,32 @@ del_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
h->nets[j].nets = h->nets[j + 1].nets; h->nets[j].nets = h->nets[j + 1].nets;
} }
} }
#else
#define NETS_LENGTH(family) 0
#endif #endif
/* Destroy the hashtable part of the set */
static void
ahash_destroy(struct htable *t)
{
struct hbucket *n;
u32 i;
for (i = 0; i < jhash_size(t->htable_bits); i++) {
n = hbucket(t, i);
if (n->size)
/* FIXME: use slab cache */
kfree(n->value);
}
ip_set_free(t);
}
/* Calculate the actual memory size of the set data */ /* Calculate the actual memory size of the set data */
static size_t static size_t
ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 nets_length) mtype_ahash_memsize(const struct htype *h, u8 nets_length)
{ {
u32 i; u32 i;
struct htable *t = h->table; struct htable *t = h->table;
size_t memsize = sizeof(*h) size_t memsize = sizeof(*h)
+ sizeof(*t) + sizeof(*t)
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
+ sizeof(struct ip_set_hash_nets) * nets_length + sizeof(struct net_prefixes) * nets_length
#endif #endif
+ jhash_size(t->htable_bits) * sizeof(struct hbucket); + jhash_size(t->htable_bits) * sizeof(struct hbucket);
for (i = 0; i < jhash_size(t->htable_bits); i++) for (i = 0; i < jhash_size(t->htable_bits); i++)
memsize += t->bucket[i].size * dsize; memsize += t->bucket[i].size * h->dsize;
return memsize; return memsize;
} }
/* Flush a hash type of set: destroy all elements */ /* Flush a hash type of set: destroy all elements */
static void static void
ip_set_hash_flush(struct ip_set *set) mtype_flush(struct ip_set *set)
{ {
struct ip_set_hash *h = set->data; struct htype *h = set->data;
struct htable *t = h->table; struct htable *t = h->table;
struct hbucket *n; struct hbucket *n;
u32 i; u32 i;
...@@ -246,7 +378,7 @@ ip_set_hash_flush(struct ip_set *set) ...@@ -246,7 +378,7 @@ ip_set_hash_flush(struct ip_set *set)
} }
} }
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
memset(h->nets, 0, sizeof(struct ip_set_hash_nets) memset(h->nets, 0, sizeof(struct net_prefixes)
* NETS_LENGTH(set->family)); * NETS_LENGTH(set->family));
#endif #endif
h->elements = 0; h->elements = 0;
...@@ -254,11 +386,11 @@ ip_set_hash_flush(struct ip_set *set) ...@@ -254,11 +386,11 @@ ip_set_hash_flush(struct ip_set *set)
/* Destroy a hash type of set */ /* Destroy a hash type of set */
static void static void
ip_set_hash_destroy(struct ip_set *set) mtype_destroy(struct ip_set *set)
{ {
struct ip_set_hash *h = set->data; struct htype *h = set->data;
if (with_timeout(h->timeout)) if (set->extensions & IPSET_EXT_TIMEOUT)
del_timer_sync(&h->gc); del_timer_sync(&h->gc);
ahash_destroy(h->table); ahash_destroy(h->table);
...@@ -270,127 +402,127 @@ ip_set_hash_destroy(struct ip_set *set) ...@@ -270,127 +402,127 @@ ip_set_hash_destroy(struct ip_set *set)
set->data = NULL; set->data = NULL;
} }
#endif /* _IP_SET_AHASH_H */ static void
mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
{
struct htype *h = set->data;
#ifndef HKEY_DATALEN init_timer(&h->gc);
#define HKEY_DATALEN sizeof(struct type_pf_elem) h->gc.data = (unsigned long) set;
#endif h->gc.function = gc;
h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
add_timer(&h->gc);
pr_debug("gc initialized, run in every %u\n",
IPSET_GC_PERIOD(h->timeout));
}
#define HKEY(data, initval, htable_bits) \ static bool
(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
& jhash_mask(htable_bits)) {
const struct htype *x = a->data;
const struct htype *y = b->data;
/* Type/family dependent function prototypes */ /* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem &&
#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal) x->timeout == y->timeout &&
#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull) #ifdef IP_SET_HASH_WITH_NETMASK
#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy) x->netmask == y->netmask &&
#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out)
#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask)
#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
#define type_pf_data_next TOKEN(TYPE, PF, _data_next)
#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags)
#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags)
#ifdef IP_SET_HASH_WITH_NETS
#define type_pf_data_match TOKEN(TYPE, PF, _data_match)
#else
#define type_pf_data_match(d) 1
#endif #endif
a->extensions == b->extensions;
#define type_pf_elem TOKEN(TYPE, PF, _elem) }
#define type_pf_telem TOKEN(TYPE, PF, _telem)
#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout)
#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired)
#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
#define type_pf_elem_add TOKEN(TYPE, PF, _elem_add)
#define type_pf_add TOKEN(TYPE, PF, _add)
#define type_pf_del TOKEN(TYPE, PF, _del)
#define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs)
#define type_pf_test TOKEN(TYPE, PF, _test)
#define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd)
#define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem)
#define type_pf_expire TOKEN(TYPE, PF, _expire)
#define type_pf_tadd TOKEN(TYPE, PF, _tadd)
#define type_pf_tdel TOKEN(TYPE, PF, _tdel)
#define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs)
#define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest)
#define type_pf_resize TOKEN(TYPE, PF, _resize)
#define type_pf_tresize TOKEN(TYPE, PF, _tresize)
#define type_pf_flush ip_set_hash_flush
#define type_pf_destroy ip_set_hash_destroy
#define type_pf_head TOKEN(TYPE, PF, _head)
#define type_pf_list TOKEN(TYPE, PF, _list)
#define type_pf_tlist TOKEN(TYPE, PF, _tlist)
#define type_pf_same_set TOKEN(TYPE, PF, _same_set)
#define type_pf_kadt TOKEN(TYPE, PF, _kadt)
#define type_pf_uadt TOKEN(TYPE, PF, _uadt)
#define type_pf_gc TOKEN(TYPE, PF, _gc)
#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init)
#define type_pf_variant TOKEN(TYPE, PF, _variant)
#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant)
/* Flavour without timeout */
/* Get the ith element from the array block n */ /* Get the ith element from the array block n */
#define ahash_data(n, i) \ #define ahash_data(n, i, dsize) \
((struct type_pf_elem *)((n)->value) + (i)) ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
/* Add an element to the hash table when resizing the set: /* Delete expired elements from the hashtable */
* we spare the maintenance of the internal counters. */ static void
static int mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value,
u8 ahash_max, u32 cadt_flags)
{ {
struct type_pf_elem *data; struct htable *t = h->table;
struct hbucket *n;
if (n->pos >= n->size) { struct mtype_elem *data;
void *tmp; u32 i;
int j;
if (n->size >= ahash_max)
/* Trigger rehashing */
return -EAGAIN;
tmp = kzalloc((n->size + AHASH_INIT_SIZE) for (i = 0; i < jhash_size(t->htable_bits); i++) {
* sizeof(struct type_pf_elem), n = hbucket(t, i);
GFP_ATOMIC); for (j = 0; j < n->pos; j++) {
if (!tmp) data = ahash_data(n, j, dsize);
return -ENOMEM; if (ip_set_timeout_expired(ext_timeout(data, h))) {
if (n->size) { pr_debug("expired %u/%u\n", i, j);
memcpy(tmp, n->value, #ifdef IP_SET_HASH_WITH_NETS
sizeof(struct type_pf_elem) * n->size); mtype_del_cidr(h, CIDR(data->cidr),
nets_length);
#endif
if (j != n->pos - 1)
/* Not last one */
memcpy(data,
ahash_data(n, n->pos - 1, dsize),
dsize);
n->pos--;
h->elements--;
}
}
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
* dsize,
GFP_ATOMIC);
if (!tmp)
/* Still try to delete expired elements */
continue;
n->size -= AHASH_INIT_SIZE;
memcpy(tmp, n->value, n->size * dsize);
kfree(n->value); kfree(n->value);
n->value = tmp;
} }
n->value = tmp;
n->size += AHASH_INIT_SIZE;
} }
data = ahash_data(n, n->pos++); }
type_pf_data_copy(data, value);
#ifdef IP_SET_HASH_WITH_NETS static void
/* Resizing won't overwrite stored flags */ mtype_gc(unsigned long ul_set)
if (cadt_flags) {
type_pf_data_flags(data, cadt_flags); struct ip_set *set = (struct ip_set *) ul_set;
#endif struct htype *h = set->data;
return 0;
pr_debug("called\n");
write_lock_bh(&set->lock);
mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
write_unlock_bh(&set->lock);
h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
add_timer(&h->gc);
} }
/* Resize a hash: create a new hash table with doubling the hashsize /* Resize a hash: create a new hash table with doubling the hashsize
* and inserting the elements to it. Repeat until we succeed or * and inserting the elements to it. Repeat until we succeed or
* fail due to memory pressures. */ * fail due to memory pressures. */
static int static int
type_pf_resize(struct ip_set *set, bool retried) mtype_resize(struct ip_set *set, bool retried)
{ {
struct ip_set_hash *h = set->data; struct htype *h = set->data;
struct htable *t, *orig = h->table; struct htable *t, *orig = h->table;
u8 htable_bits = orig->htable_bits; u8 htable_bits = orig->htable_bits;
struct type_pf_elem *data; #ifdef IP_SET_HASH_WITH_NETS
u8 flags;
#endif
struct mtype_elem *data;
struct mtype_elem *d;
struct hbucket *n, *m; struct hbucket *n, *m;
u32 i, j, flags = 0; u32 i, j;
int ret; int ret;
/* Try to cleanup once */
if (SET_WITH_TIMEOUT(set) && !retried) {
i = h->elements;
write_lock_bh(&set->lock);
mtype_expire(set->data, NETS_LENGTH(set->family),
h->dsize);
write_unlock_bh(&set->lock);
if (h->elements < i)
return 0;
}
retry: retry:
ret = 0; ret = 0;
htable_bits++; htable_bits++;
...@@ -412,16 +544,16 @@ type_pf_resize(struct ip_set *set, bool retried) ...@@ -412,16 +544,16 @@ type_pf_resize(struct ip_set *set, bool retried)
for (i = 0; i < jhash_size(orig->htable_bits); i++) { for (i = 0; i < jhash_size(orig->htable_bits); i++) {
n = hbucket(orig, i); n = hbucket(orig, i);
for (j = 0; j < n->pos; j++) { for (j = 0; j < n->pos; j++) {
data = ahash_data(n, j); data = ahash_data(n, j, h->dsize);
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
flags = 0; flags = 0;
type_pf_data_reset_flags(data, &flags); mtype_data_reset_flags(data, &flags);
#endif #endif
m = hbucket(t, HKEY(data, h->initval, htable_bits)); m = hbucket(t, HKEY(data, h->initval, htable_bits));
ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags); ret = hbucket_elem_add(m, AHASH_MAX(h), h->dsize);
if (ret < 0) { if (ret < 0) {
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
type_pf_data_flags(data, flags); mtype_data_reset_flags(data, &flags);
#endif #endif
read_unlock_bh(&set->lock); read_unlock_bh(&set->lock);
ahash_destroy(t); ahash_destroy(t);
...@@ -429,6 +561,11 @@ type_pf_resize(struct ip_set *set, bool retried) ...@@ -429,6 +561,11 @@ type_pf_resize(struct ip_set *set, bool retried)
goto retry; goto retry;
return ret; return ret;
} }
d = ahash_data(m, m->pos++, h->dsize);
memcpy(d, data, h->dsize);
#ifdef IP_SET_HASH_WITH_NETS
mtype_data_reset_flags(d, &flags);
#endif
} }
} }
...@@ -445,21 +582,25 @@ type_pf_resize(struct ip_set *set, bool retried) ...@@ -445,21 +582,25 @@ type_pf_resize(struct ip_set *set, bool retried)
return 0; return 0;
} }
static inline void
type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d);
/* Add an element to a hash and update the internal counters when succeeded, /* Add an element to a hash and update the internal counters when succeeded,
* otherwise report the proper error code. */ * otherwise report the proper error code. */
static int static int
type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{ {
struct ip_set_hash *h = set->data; struct htype *h = set->data;
struct htable *t; struct htable *t;
const struct type_pf_elem *d = value; const struct mtype_elem *d = value;
struct mtype_elem *data;
struct hbucket *n; struct hbucket *n;
int i, ret = 0; int i, ret = 0;
int j = AHASH_MAX(h) + 1;
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0; u32 key, multi = 0;
u32 cadt_flags = flags >> 16;
if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
if (h->elements >= h->maxelem) { if (h->elements >= h->maxelem) {
if (net_ratelimit()) if (net_ratelimit())
...@@ -472,29 +613,58 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) ...@@ -472,29 +613,58 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
t = rcu_dereference_bh(h->table); t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits); key = HKEY(value, h->initval, t->htable_bits);
n = hbucket(t, key); n = hbucket(t, key);
for (i = 0; i < n->pos; i++) for (i = 0; i < n->pos; i++) {
if (type_pf_data_equal(ahash_data(n, i), d, &multi)) { data = ahash_data(n, i, h->dsize);
if (mtype_data_equal(data, d, &multi)) {
if (flag_exist ||
(SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(data, h)))) {
/* Just the extensions could be overwritten */
j = i;
goto reuse_slot;
} else {
ret = -IPSET_ERR_EXIST;
goto out;
}
}
/* Reuse first timed out entry */
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(data, h)) &&
j != AHASH_MAX(h) + 1)
j = i;
}
reuse_slot:
if (j != AHASH_MAX(h) + 1) {
/* Fill out reused slot */
data = ahash_data(n, j, h->dsize);
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
if (flags & IPSET_FLAG_EXIST) mtype_del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
/* Support overwriting just the flags */ mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
type_pf_data_flags(ahash_data(n, i),
cadt_flags);
#endif #endif
ret = -IPSET_ERR_EXIST; } else {
/* Use/create a new slot */
TUNE_AHASH_MAX(h, multi);
ret = hbucket_elem_add(n, AHASH_MAX(h), h->dsize);
if (ret != 0) {
if (ret == -EAGAIN)
mtype_data_next(&h->next, d);
goto out; goto out;
} }
TUNE_AHASH_MAX(h, multi); data = ahash_data(n, n->pos++, h->dsize);
ret = type_pf_elem_add(n, value, AHASH_MAX(h), cadt_flags); #ifdef IP_SET_HASH_WITH_NETS
if (ret != 0) { mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
if (ret == -EAGAIN) #endif
type_pf_data_next(h, d); h->elements++;
goto out;
} }
memcpy(data, d, sizeof(struct mtype_elem));
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); mtype_data_set_flags(data, flags);
#endif #endif
h->elements++; if (SET_WITH_TIMEOUT(set))
ip_set_timeout_set(ext_timeout(data, h), ext->timeout);
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(data, h), ext);
out: out:
rcu_read_unlock_bh(); rcu_read_unlock_bh();
return ret; return ret;
...@@ -504,40 +674,44 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) ...@@ -504,40 +674,44 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
* and free up space if possible. * and free up space if possible.
*/ */
static int static int
type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags) mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{ {
struct ip_set_hash *h = set->data; struct htype *h = set->data;
struct htable *t = h->table; struct htable *t = h->table;
const struct type_pf_elem *d = value; const struct mtype_elem *d = value;
struct mtype_elem *data;
struct hbucket *n; struct hbucket *n;
int i; int i;
struct type_pf_elem *data;
u32 key, multi = 0; u32 key, multi = 0;
key = HKEY(value, h->initval, t->htable_bits); key = HKEY(value, h->initval, t->htable_bits);
n = hbucket(t, key); n = hbucket(t, key);
for (i = 0; i < n->pos; i++) { for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i); data = ahash_data(n, i, h->dsize);
if (!type_pf_data_equal(data, d, &multi)) if (!mtype_data_equal(data, d, &multi))
continue; continue;
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(data, h)))
return -IPSET_ERR_EXIST;
if (i != n->pos - 1) if (i != n->pos - 1)
/* Not last one */ /* Not last one */
type_pf_data_copy(data, ahash_data(n, n->pos - 1)); memcpy(data, ahash_data(n, n->pos - 1, h->dsize),
h->dsize);
n->pos--; n->pos--;
h->elements--; h->elements--;
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); mtype_del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
#endif #endif
if (n->pos + AHASH_INIT_SIZE < n->size) { if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
* sizeof(struct type_pf_elem), * h->dsize,
GFP_ATOMIC); GFP_ATOMIC);
if (!tmp) if (!tmp)
return 0; return 0;
n->size -= AHASH_INIT_SIZE; n->size -= AHASH_INIT_SIZE;
memcpy(tmp, n->value, memcpy(tmp, n->value, n->size * h->dsize);
n->size * sizeof(struct type_pf_elem));
kfree(n->value); kfree(n->value);
n->value = tmp; n->value = tmp;
} }
...@@ -547,30 +721,54 @@ type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags) ...@@ -547,30 +721,54 @@ type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
return -IPSET_ERR_EXIST; return -IPSET_ERR_EXIST;
} }
#ifdef IP_SET_HASH_WITH_NETS static inline int
mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
struct ip_set_ext *mext, struct ip_set *set, u32 flags)
{
if (SET_WITH_COUNTER(set))
ip_set_update_counter(ext_counter(data,
(struct htype *)(set->data)),
ext, mext, flags);
return mtype_do_data_match(data);
}
#ifdef IP_SET_HASH_WITH_NETS
/* Special test function which takes into account the different network /* Special test function which takes into account the different network
* sizes added to the set */ * sizes added to the set */
static int static int
type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{ {
struct ip_set_hash *h = set->data; struct htype *h = set->data;
struct htable *t = h->table; struct htable *t = h->table;
struct hbucket *n; struct hbucket *n;
const struct type_pf_elem *data; struct mtype_elem *data;
int i, j = 0; int i, j = 0;
u32 key, multi = 0; u32 key, multi = 0;
u8 nets_length = NETS_LENGTH(set->family); u8 nets_length = NETS_LENGTH(set->family);
pr_debug("test by nets\n"); pr_debug("test by nets\n");
for (; j < nets_length && h->nets[j].nets && !multi; j++) { for (; j < nets_length && h->nets[j].nets && !multi; j++) {
type_pf_data_netmask(d, h->nets[j].cidr); mtype_data_netmask(d, h->nets[j].cidr);
key = HKEY(d, h->initval, t->htable_bits); key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key); n = hbucket(t, key);
for (i = 0; i < n->pos; i++) { for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i); data = ahash_data(n, i, h->dsize);
if (type_pf_data_equal(data, d, &multi)) if (!mtype_data_equal(data, d, &multi))
return type_pf_data_match(data); continue;
if (SET_WITH_TIMEOUT(set)) {
if (!ip_set_timeout_expired(
ext_timeout(data, h)))
return mtype_data_match(data, ext,
mext, set,
flags);
#ifdef IP_SET_HASH_WITH_MULTI
multi = 0;
#endif
} else
return mtype_data_match(data, ext,
mext, set, flags);
} }
} }
return 0; return 0;
...@@ -579,13 +777,14 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) ...@@ -579,13 +777,14 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
/* Test whether the element is added to the set */ /* Test whether the element is added to the set */
static int static int
type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags) mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{ {
struct ip_set_hash *h = set->data; struct htype *h = set->data;
struct htable *t = h->table; struct htable *t = h->table;
struct type_pf_elem *d = value; struct mtype_elem *d = value;
struct hbucket *n; struct hbucket *n;
const struct type_pf_elem *data; struct mtype_elem *data;
int i; int i;
u32 key, multi = 0; u32 key, multi = 0;
...@@ -593,32 +792,31 @@ type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags) ...@@ -593,32 +792,31 @@ type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
/* If we test an IP address and not a network address, /* If we test an IP address and not a network address,
* try all possible network sizes */ * try all possible network sizes */
if (CIDR(d->cidr) == SET_HOST_MASK(set->family)) if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_test_cidrs(set, d, timeout); return mtype_test_cidrs(set, d, ext, mext, flags);
#endif #endif
key = HKEY(d, h->initval, t->htable_bits); key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key); n = hbucket(t, key);
for (i = 0; i < n->pos; i++) { for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i); data = ahash_data(n, i, h->dsize);
if (type_pf_data_equal(data, d, &multi)) if (mtype_data_equal(data, d, &multi) &&
return type_pf_data_match(data); !(SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(data, h))))
return mtype_data_match(data, ext, mext, set, flags);
} }
return 0; return 0;
} }
/* Reply a HEADER request: fill out the header part of the set */ /* Reply a HEADER request: fill out the header part of the set */
static int static int
type_pf_head(struct ip_set *set, struct sk_buff *skb) mtype_head(struct ip_set *set, struct sk_buff *skb)
{ {
const struct ip_set_hash *h = set->data; const struct htype *h = set->data;
struct nlattr *nested; struct nlattr *nested;
size_t memsize; size_t memsize;
read_lock_bh(&set->lock); read_lock_bh(&set->lock);
memsize = ahash_memsize(h, with_timeout(h->timeout) memsize = mtype_ahash_memsize(h, NETS_LENGTH(set->family));
? sizeof(struct type_pf_telem)
: sizeof(struct type_pf_elem),
NETS_LENGTH(set->family));
read_unlock_bh(&set->lock); read_unlock_bh(&set->lock);
nested = ipset_nest_start(skb, IPSET_ATTR_DATA); nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
...@@ -635,8 +833,11 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb) ...@@ -635,8 +833,11 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
#endif #endif
if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) || nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
(with_timeout(h->timeout) && ((set->extensions & IPSET_EXT_TIMEOUT) &&
nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)))) nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))) ||
((set->extensions & IPSET_EXT_COUNTER) &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
htonl(IPSET_FLAG_WITH_COUNTERS))))
goto nla_put_failure; goto nla_put_failure;
ipset_nest_end(skb, nested); ipset_nest_end(skb, nested);
...@@ -647,14 +848,14 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb) ...@@ -647,14 +848,14 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
/* Reply a LIST/SAVE request: dump the elements of the specified set */ /* Reply a LIST/SAVE request: dump the elements of the specified set */
static int static int
type_pf_list(const struct ip_set *set, mtype_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb) struct sk_buff *skb, struct netlink_callback *cb)
{ {
const struct ip_set_hash *h = set->data; const struct htype *h = set->data;
const struct htable *t = h->table; const struct htable *t = h->table;
struct nlattr *atd, *nested; struct nlattr *atd, *nested;
const struct hbucket *n; const struct hbucket *n;
const struct type_pf_elem *data; const struct mtype_elem *e;
u32 first = cb->args[2]; u32 first = cb->args[2];
/* We assume that one hash bucket fills into one page */ /* We assume that one hash bucket fills into one page */
void *incomplete; void *incomplete;
...@@ -669,9 +870,12 @@ type_pf_list(const struct ip_set *set, ...@@ -669,9 +870,12 @@ type_pf_list(const struct ip_set *set,
n = hbucket(t, cb->args[2]); n = hbucket(t, cb->args[2]);
pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n); pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
for (i = 0; i < n->pos; i++) { for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i); e = ahash_data(n, i, h->dsize);
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, h)))
continue;
pr_debug("list hash %lu hbucket %p i %u, data %p\n", pr_debug("list hash %lu hbucket %p i %u, data %p\n",
cb->args[2], n, i, data); cb->args[2], n, i, e);
nested = ipset_nest_start(skb, IPSET_ATTR_DATA); nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) { if (!nested) {
if (cb->args[2] == first) { if (cb->args[2] == first) {
...@@ -680,7 +884,15 @@ type_pf_list(const struct ip_set *set, ...@@ -680,7 +884,15 @@ type_pf_list(const struct ip_set *set,
} else } else
goto nla_put_failure; goto nla_put_failure;
} }
if (type_pf_data_list(skb, data)) if (mtype_data_list(skb, e))
goto nla_put_failure;
if (SET_WITH_TIMEOUT(set) &&
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(
ext_timeout(e, h)))))
goto nla_put_failure;
if (SET_WITH_COUNTER(set) &&
ip_set_put_counter(skb, ext_counter(e, h)))
goto nla_put_failure; goto nla_put_failure;
ipset_nest_end(skb, nested); ipset_nest_end(skb, nested);
} }
...@@ -704,538 +916,185 @@ type_pf_list(const struct ip_set *set, ...@@ -704,538 +916,185 @@ type_pf_list(const struct ip_set *set,
} }
static int static int
type_pf_kadt(struct ip_set *set, const struct sk_buff *skb, TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt); enum ipset_adt adt, struct ip_set_adt_opt *opt);
static int static int
type_pf_uadt(struct ip_set *set, struct nlattr *tb[], TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
static const struct ip_set_type_variant type_pf_variant = { static const struct ip_set_type_variant mtype_variant = {
.kadt = type_pf_kadt, .kadt = mtype_kadt,
.uadt = type_pf_uadt, .uadt = mtype_uadt,
.adt = { .adt = {
[IPSET_ADD] = type_pf_add, [IPSET_ADD] = mtype_add,
[IPSET_DEL] = type_pf_del, [IPSET_DEL] = mtype_del,
[IPSET_TEST] = type_pf_test, [IPSET_TEST] = mtype_test,
}, },
.destroy = type_pf_destroy, .destroy = mtype_destroy,
.flush = type_pf_flush, .flush = mtype_flush,
.head = type_pf_head, .head = mtype_head,
.list = type_pf_list, .list = mtype_list,
.resize = type_pf_resize, .resize = mtype_resize,
.same_set = type_pf_same_set, .same_set = mtype_same_set,
}; };
/* Flavour with timeout support */ #ifdef IP_SET_EMIT_CREATE
static int
#define ahash_tdata(n, i) \ TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
(struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i))
static inline u32
type_pf_data_timeout(const struct type_pf_elem *data)
{ {
const struct type_pf_telem *tdata = u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
(const struct type_pf_telem *) data; u32 cadt_flags = 0;
u8 hbits;
return tdata->timeout; #ifdef IP_SET_HASH_WITH_NETMASK
} u8 netmask;
#endif
size_t hsize;
struct HTYPE *h;
static inline bool if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
type_pf_data_expired(const struct type_pf_elem *data) return -IPSET_ERR_INVALID_FAMILY;
{ #ifdef IP_SET_HASH_WITH_NETMASK
const struct type_pf_telem *tdata = netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
(const struct type_pf_telem *) data; pr_debug("Create set %s with family %s\n",
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
#endif
return ip_set_timeout_expired(tdata->timeout); if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
} !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
static inline void if (tb[IPSET_ATTR_HASHSIZE]) {
type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout) hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
{ if (hashsize < IPSET_MIMINAL_HASHSIZE)
struct type_pf_telem *tdata = (struct type_pf_telem *) data; hashsize = IPSET_MIMINAL_HASHSIZE;
}
tdata->timeout = ip_set_timeout_set(timeout); if (tb[IPSET_ATTR_MAXELEM])
} maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
static int #ifdef IP_SET_HASH_WITH_NETMASK
type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, if (tb[IPSET_ATTR_NETMASK]) {
u8 ahash_max, u32 cadt_flags, u32 timeout) netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
{
struct type_pf_elem *data;
if (n->pos >= n->size) { if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
void *tmp; (set->family == NFPROTO_IPV6 && netmask > 128) ||
netmask == 0)
return -IPSET_ERR_INVALID_NETMASK;
}
#endif
if (n->size >= ahash_max) hsize = sizeof(*h);
/* Trigger rehashing */
return -EAGAIN;
tmp = kzalloc((n->size + AHASH_INIT_SIZE)
* sizeof(struct type_pf_telem),
GFP_ATOMIC);
if (!tmp)
return -ENOMEM;
if (n->size) {
memcpy(tmp, n->value,
sizeof(struct type_pf_telem) * n->size);
kfree(n->value);
}
n->value = tmp;
n->size += AHASH_INIT_SIZE;
}
data = ahash_tdata(n, n->pos++);
type_pf_data_copy(data, value);
type_pf_data_timeout_set(data, timeout);
#ifdef IP_SET_HASH_WITH_NETS #ifdef IP_SET_HASH_WITH_NETS
/* Resizing won't overwrite stored flags */ hsize += sizeof(struct net_prefixes) *
if (cadt_flags) (set->family == NFPROTO_IPV4 ? 32 : 128);
type_pf_data_flags(data, cadt_flags);
#endif #endif
return 0; h = kzalloc(hsize, GFP_KERNEL);
} if (!h)
return -ENOMEM;
/* Delete expired elements from the hashtable */
static void
type_pf_expire(struct ip_set_hash *h, u8 nets_length)
{
struct htable *t = h->table;
struct hbucket *n;
struct type_pf_elem *data;
u32 i;
int j;
for (i = 0; i < jhash_size(t->htable_bits); i++) { h->maxelem = maxelem;
n = hbucket(t, i); #ifdef IP_SET_HASH_WITH_NETMASK
for (j = 0; j < n->pos; j++) { h->netmask = netmask;
data = ahash_tdata(n, j);
if (type_pf_data_expired(data)) {
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, CIDR(data->cidr), nets_length);
#endif #endif
if (j != n->pos - 1) get_random_bytes(&h->initval, sizeof(h->initval));
/* Not last one */ h->timeout = IPSET_NO_TIMEOUT;
type_pf_data_copy(data,
ahash_tdata(n, n->pos - 1));
n->pos--;
h->elements--;
}
}
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
* sizeof(struct type_pf_telem),
GFP_ATOMIC);
if (!tmp)
/* Still try to delete expired elements */
continue;
n->size -= AHASH_INIT_SIZE;
memcpy(tmp, n->value,
n->size * sizeof(struct type_pf_telem));
kfree(n->value);
n->value = tmp;
}
}
}
static int
type_pf_tresize(struct ip_set *set, bool retried)
{
struct ip_set_hash *h = set->data;
struct htable *t, *orig = h->table;
u8 htable_bits = orig->htable_bits;
struct type_pf_elem *data;
struct hbucket *n, *m;
u32 i, j, flags = 0;
int ret;
/* Try to cleanup once */ hbits = htable_bits(hashsize);
if (!retried) { hsize = htable_size(hbits);
i = h->elements; if (hsize == 0) {
write_lock_bh(&set->lock); kfree(h);
type_pf_expire(set->data, NETS_LENGTH(set->family));
write_unlock_bh(&set->lock);
if (h->elements < i)
return 0;
}
retry:
ret = 0;
htable_bits++;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
if (!htable_bits) {
/* In case we have plenty of memory :-) */
pr_warning("Cannot increase the hashsize of set %s further\n",
set->name);
return -IPSET_ERR_HASH_FULL;
}
t = ip_set_alloc(sizeof(*t)
+ jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
return -ENOMEM; return -ENOMEM;
t->htable_bits = htable_bits;
read_lock_bh(&set->lock);
for (i = 0; i < jhash_size(orig->htable_bits); i++) {
n = hbucket(orig, i);
for (j = 0; j < n->pos; j++) {
data = ahash_tdata(n, j);
#ifdef IP_SET_HASH_WITH_NETS
flags = 0;
type_pf_data_reset_flags(data, &flags);
#endif
m = hbucket(t, HKEY(data, h->initval, htable_bits));
ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags,
ip_set_timeout_get(type_pf_data_timeout(data)));
if (ret < 0) {
#ifdef IP_SET_HASH_WITH_NETS
type_pf_data_flags(data, flags);
#endif
read_unlock_bh(&set->lock);
ahash_destroy(t);
if (ret == -EAGAIN)
goto retry;
return ret;
}
}
} }
h->table = ip_set_alloc(hsize);
rcu_assign_pointer(h->table, t); if (!h->table) {
read_unlock_bh(&set->lock); kfree(h);
return -ENOMEM;
/* Give time to other readers of the set */
synchronize_rcu_bh();
ahash_destroy(orig);
return 0;
}
static int
type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
{
struct ip_set_hash *h = set->data;
struct htable *t = h->table;
const struct type_pf_elem *d = value;
struct hbucket *n;
struct type_pf_elem *data;
int ret = 0, i, j = AHASH_MAX(h) + 1;
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0;
u32 cadt_flags = flags >> 16;
if (h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
type_pf_expire(h, NETS_LENGTH(set->family));
if (h->elements >= h->maxelem) {
if (net_ratelimit())
pr_warning("Set %s is full, maxelem %u reached\n",
set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
} }
h->table->htable_bits = hbits;
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table); set->data = h;
key = HKEY(d, h->initval, t->htable_bits); if (set->family == NFPROTO_IPV4)
n = hbucket(t, key); set->variant = &TOKEN(HTYPE, 4_variant);
for (i = 0; i < n->pos; i++) { else
data = ahash_tdata(n, i); set->variant = &TOKEN(HTYPE, 6_variant);
if (type_pf_data_equal(data, d, &multi)) {
if (type_pf_data_expired(data) || flag_exist) if (tb[IPSET_ATTR_CADT_FLAGS])
/* Just timeout value may be updated */ cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
j = i; if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
else { set->extensions |= IPSET_EXT_COUNTER;
ret = -IPSET_ERR_EXIST; if (tb[IPSET_ATTR_TIMEOUT]) {
goto out; h->timeout =
ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->extensions |= IPSET_EXT_TIMEOUT;
if (set->family == NFPROTO_IPV4) {
h->dsize =
sizeof(struct TOKEN(HTYPE, 4ct_elem));
h->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct TOKEN(HTYPE, 4ct_elem),
timeout);
h->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct TOKEN(HTYPE, 4ct_elem),
counter);
TOKEN(HTYPE, 4_gc_init)(set,
TOKEN(HTYPE, 4_gc));
} else {
h->dsize =
sizeof(struct TOKEN(HTYPE, 6ct_elem));
h->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct TOKEN(HTYPE, 6ct_elem),
timeout);
h->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct TOKEN(HTYPE, 6ct_elem),
counter);
TOKEN(HTYPE, 6_gc_init)(set,
TOKEN(HTYPE, 6_gc));
} }
} else if (j == AHASH_MAX(h) + 1 && } else {
type_pf_data_expired(data)) if (set->family == NFPROTO_IPV4) {
j = i; h->dsize =
} sizeof(struct TOKEN(HTYPE, 4c_elem));
if (j != AHASH_MAX(h) + 1) { h->offset[IPSET_OFFSET_COUNTER] =
data = ahash_tdata(n, j); offsetof(struct TOKEN(HTYPE, 4c_elem),
#ifdef IP_SET_HASH_WITH_NETS counter);
del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family)); } else {
add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family)); h->dsize =
#endif sizeof(struct TOKEN(HTYPE, 6c_elem));
type_pf_data_copy(data, d); h->offset[IPSET_OFFSET_COUNTER] =
type_pf_data_timeout_set(data, timeout); offsetof(struct TOKEN(HTYPE, 6c_elem),
#ifdef IP_SET_HASH_WITH_NETS counter);
type_pf_data_flags(data, cadt_flags);
#endif
goto out;
}
TUNE_AHASH_MAX(h, multi);
ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), cadt_flags, timeout);
if (ret != 0) {
if (ret == -EAGAIN)
type_pf_data_next(h, d);
goto out;
}
#ifdef IP_SET_HASH_WITH_NETS
add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
#endif
h->elements++;
out:
rcu_read_unlock_bh();
return ret;
}
static int
type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
{
struct ip_set_hash *h = set->data;
struct htable *t = h->table;
const struct type_pf_elem *d = value;
struct hbucket *n;
int i;
struct type_pf_elem *data;
u32 key, multi = 0;
key = HKEY(value, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
if (!type_pf_data_equal(data, d, &multi))
continue;
if (type_pf_data_expired(data))
return -IPSET_ERR_EXIST;
if (i != n->pos - 1)
/* Not last one */
type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
#endif
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
* sizeof(struct type_pf_telem),
GFP_ATOMIC);
if (!tmp)
return 0;
n->size -= AHASH_INIT_SIZE;
memcpy(tmp, n->value,
n->size * sizeof(struct type_pf_telem));
kfree(n->value);
n->value = tmp;
}
return 0;
}
return -IPSET_ERR_EXIST;
}
#ifdef IP_SET_HASH_WITH_NETS
static int
type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
{
struct ip_set_hash *h = set->data;
struct htable *t = h->table;
struct type_pf_elem *data;
struct hbucket *n;
int i, j = 0;
u32 key, multi = 0;
u8 nets_length = NETS_LENGTH(set->family);
for (; j < nets_length && h->nets[j].nets && !multi; j++) {
type_pf_data_netmask(d, h->nets[j].cidr);
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
#ifdef IP_SET_HASH_WITH_MULTI
if (type_pf_data_equal(data, d, &multi)) {
if (!type_pf_data_expired(data))
return type_pf_data_match(data);
multi = 0;
} }
#else
if (type_pf_data_equal(data, d, &multi) &&
!type_pf_data_expired(data))
return type_pf_data_match(data);
#endif
} }
} } else if (tb[IPSET_ATTR_TIMEOUT]) {
return 0; h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
} set->extensions |= IPSET_EXT_TIMEOUT;
#endif if (set->family == NFPROTO_IPV4) {
h->dsize = sizeof(struct TOKEN(HTYPE, 4t_elem));
static int h->offset[IPSET_OFFSET_TIMEOUT] =
type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) offsetof(struct TOKEN(HTYPE, 4t_elem),
{ timeout);
struct ip_set_hash *h = set->data; TOKEN(HTYPE, 4_gc_init)(set, TOKEN(HTYPE, 4_gc));
struct htable *t = h->table; } else {
struct type_pf_elem *data, *d = value; h->dsize = sizeof(struct TOKEN(HTYPE, 6t_elem));
struct hbucket *n; h->offset[IPSET_OFFSET_TIMEOUT] =
int i; offsetof(struct TOKEN(HTYPE, 6t_elem),
u32 key, multi = 0; timeout);
TOKEN(HTYPE, 6_gc_init)(set, TOKEN(HTYPE, 6_gc));
#ifdef IP_SET_HASH_WITH_NETS
if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_ttest_cidrs(set, d, timeout);
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
if (type_pf_data_equal(data, d, &multi) &&
!type_pf_data_expired(data))
return type_pf_data_match(data);
}
return 0;
}
static int
type_pf_tlist(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct ip_set_hash *h = set->data;
const struct htable *t = h->table;
struct nlattr *atd, *nested;
const struct hbucket *n;
const struct type_pf_elem *data;
u32 first = cb->args[2];
/* We assume that one hash bucket fills into one page */
void *incomplete;
int i;
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
incomplete = skb_tail_pointer(skb);
n = hbucket(t, cb->args[2]);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
pr_debug("list %p %u\n", n, i);
if (type_pf_data_expired(data))
continue;
pr_debug("do list %p %u\n", n, i);
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (cb->args[2] == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
if (type_pf_data_tlist(skb, data))
goto nla_put_failure;
ipset_nest_end(skb, nested);
} }
} else {
if (set->family == NFPROTO_IPV4)
h->dsize = sizeof(struct TOKEN(HTYPE, 4_elem));
else
h->dsize = sizeof(struct TOKEN(HTYPE, 6_elem));
} }
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0; pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
nla_put_failure:
nlmsg_trim(skb, incomplete);
ipset_nest_end(skb, atd);
if (unlikely(first == cb->args[2])) {
pr_warning("Can't list set %s: one bucket does not fit into "
"a message. Please report it!\n", set->name);
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0; return 0;
} }
#endif /* IP_SET_EMIT_CREATE */
static const struct ip_set_type_variant type_pf_tvariant = {
.kadt = type_pf_kadt,
.uadt = type_pf_uadt,
.adt = {
[IPSET_ADD] = type_pf_tadd,
[IPSET_DEL] = type_pf_tdel,
[IPSET_TEST] = type_pf_ttest,
},
.destroy = type_pf_destroy,
.flush = type_pf_flush,
.head = type_pf_head,
.list = type_pf_tlist,
.resize = type_pf_tresize,
.same_set = type_pf_same_set,
};
static void
type_pf_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct ip_set_hash *h = set->data;
pr_debug("called\n");
write_lock_bh(&set->lock);
type_pf_expire(h, NETS_LENGTH(set->family));
write_unlock_bh(&set->lock);
h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
add_timer(&h->gc);
}
static void
type_pf_gc_init(struct ip_set *set)
{
struct ip_set_hash *h = set->data;
init_timer(&h->gc);
h->gc.data = (unsigned long) set;
h->gc.function = type_pf_gc;
h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
add_timer(&h->gc);
pr_debug("gc initialized, run in every %u\n",
IPSET_GC_PERIOD(h->timeout));
}
#undef HKEY_DATALEN
#undef HKEY
#undef type_pf_data_equal
#undef type_pf_data_isnull
#undef type_pf_data_copy
#undef type_pf_data_zero_out
#undef type_pf_data_netmask
#undef type_pf_data_list
#undef type_pf_data_tlist
#undef type_pf_data_next
#undef type_pf_data_flags
#undef type_pf_data_reset_flags
#undef type_pf_data_match
#undef type_pf_elem
#undef type_pf_telem
#undef type_pf_data_timeout
#undef type_pf_data_expired
#undef type_pf_data_timeout_set
#undef type_pf_elem_add
#undef type_pf_add
#undef type_pf_del
#undef type_pf_test_cidrs
#undef type_pf_test
#undef type_pf_elem_tadd
#undef type_pf_del_telem
#undef type_pf_expire
#undef type_pf_tadd
#undef type_pf_tdel
#undef type_pf_ttest_cidrs
#undef type_pf_ttest
#undef type_pf_resize
#undef type_pf_tresize
#undef type_pf_flush
#undef type_pf_destroy
#undef type_pf_head
#undef type_pf_list
#undef type_pf_tlist
#undef type_pf_same_set
#undef type_pf_kadt
#undef type_pf_uadt
#undef type_pf_gc
#undef type_pf_gc_init
#undef type_pf_variant
#undef type_pf_tvariant
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -21,11 +21,10 @@ ...@@ -21,11 +21,10 @@
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_hash.h> #include <linux/netfilter/ipset/ip_set_hash.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
#define REVISION_MAX 0 #define REVISION_MAX 1 /* Counters support */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
...@@ -33,58 +32,47 @@ IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX); ...@@ -33,58 +32,47 @@ IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_hash:ip"); MODULE_ALIAS("ip_set_hash:ip");
/* Type specific function prefix */ /* Type specific function prefix */
#define TYPE hash_ip #define HTYPE hash_ip
#define IP_SET_HASH_WITH_NETMASK
static bool
hash_ip_same_set(const struct ip_set *a, const struct ip_set *b);
#define hash_ip4_same_set hash_ip_same_set
#define hash_ip6_same_set hash_ip_same_set
/* The type variant functions: IPv4 */ /* IPv4 variants */
/* Member elements without timeout */ /* Member elements */
struct hash_ip4_elem { struct hash_ip4_elem {
/* Zero valued IP addresses cannot be stored */
__be32 ip; __be32 ip;
}; };
/* Member elements with timeout support */ struct hash_ip4t_elem {
struct hash_ip4_telem {
__be32 ip; __be32 ip;
unsigned long timeout; unsigned long timeout;
}; };
static inline bool struct hash_ip4c_elem {
hash_ip4_data_equal(const struct hash_ip4_elem *ip1, __be32 ip;
const struct hash_ip4_elem *ip2, struct ip_set_counter counter;
u32 *multi) };
{
return ip1->ip == ip2->ip;
}
static inline bool struct hash_ip4ct_elem {
hash_ip4_data_isnull(const struct hash_ip4_elem *elem) __be32 ip;
{ struct ip_set_counter counter;
return elem->ip == 0; unsigned long timeout;
} };
static inline void /* Common functions */
hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src)
{
dst->ip = src->ip;
}
/* Zero valued IP addresses cannot be stored */ static inline bool
static inline void hash_ip4_data_equal(const struct hash_ip4_elem *e1,
hash_ip4_data_zero_out(struct hash_ip4_elem *elem) const struct hash_ip4_elem *e2,
u32 *multi)
{ {
elem->ip = 0; return e1->ip == e2->ip;
} }
static inline bool static inline bool
hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e)
{ {
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip)) if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip))
goto nla_put_failure; goto nla_put_failure;
return 0; return 0;
...@@ -92,41 +80,26 @@ hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) ...@@ -92,41 +80,26 @@ hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
return 1; return 1;
} }
static bool static inline void
hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data) hash_ip4_data_next(struct hash_ip4_elem *next, const struct hash_ip4_elem *e)
{ {
const struct hash_ip4_telem *tdata = next->ip = e->ip;
(const struct hash_ip4_telem *)data;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#define IP_SET_HASH_WITH_NETMASK #define MTYPE hash_ip4
#define PF 4 #define PF 4
#define HOST_MASK 32 #define HOST_MASK 32
#include <linux/netfilter/ipset/ip_set_ahash.h> #include "ip_set_hash_gen.h"
static inline void
hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d)
{
h->next.ip = d->ip;
}
static int static int
hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ip4_elem e = {};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
__be32 ip; __be32 ip;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip); ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
...@@ -134,43 +107,42 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -134,43 +107,42 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
if (ip == 0) if (ip == 0)
return -EINVAL; return -EINVAL;
return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags); e.ip = ip;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
u32 ip, ip_to, hosts, timeout = h->timeout; struct hash_ip4_elem e = {};
__be32 nip; struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 ip, ip_to, hosts;
int ret = 0; int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
ip &= ip_set_hostmask(h->netmask); ip &= ip_set_hostmask(h->netmask);
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST) { if (adt == IPSET_TEST) {
nip = htonl(ip); e.ip = htonl(ip);
if (nip == 0) if (e.ip == 0)
return -IPSET_ERR_HASH_ELEM; return -IPSET_ERR_HASH_ELEM;
return adtfn(set, &nip, timeout, flags); return adtfn(set, &e, &ext, &ext, flags);
} }
ip_to = ip; ip_to = ip;
...@@ -193,10 +165,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -193,10 +165,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
for (; !before(ip_to, ip); ip += hosts) { for (; !before(ip_to, ip); ip += hosts) {
nip = htonl(ip); e.ip = htonl(ip);
if (nip == 0) if (e.ip == 0)
return -IPSET_ERR_HASH_ELEM; return -IPSET_ERR_HASH_ELEM;
ret = adtfn(set, &nip, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -206,29 +178,31 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -206,29 +178,31 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static bool /* IPv6 variants */
hash_ip_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct ip_set_hash *x = a->data;
const struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */ /* Member elements */
return x->maxelem == y->maxelem && struct hash_ip6_elem {
x->timeout == y->timeout && union nf_inet_addr ip;
x->netmask == y->netmask; };
}
/* The type variant functions: IPv6 */ struct hash_ip6t_elem {
union nf_inet_addr ip;
unsigned long timeout;
};
struct hash_ip6_elem { struct hash_ip6c_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
struct ip_set_counter counter;
}; };
struct hash_ip6_telem { struct hash_ip6ct_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_ip6_data_equal(const struct hash_ip6_elem *ip1, hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
const struct hash_ip6_elem *ip2, const struct hash_ip6_elem *ip2,
...@@ -237,37 +211,16 @@ hash_ip6_data_equal(const struct hash_ip6_elem *ip1, ...@@ -237,37 +211,16 @@ hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6); return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6);
} }
static inline bool
hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
{
return ipv6_addr_any(&elem->ip.in6);
}
static inline void static inline void
hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src) hash_ip6_netmask(union nf_inet_addr *ip, u8 prefix)
{ {
dst->ip.in6 = src->ip.in6; ip6_netmask(ip, prefix);
}
static inline void
hash_ip6_data_zero_out(struct hash_ip6_elem *elem)
{
ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0);
}
static inline void
ip6_netmask(union nf_inet_addr *ip, u8 prefix)
{
ip->ip6[0] &= ip_set_netmask6(prefix)[0];
ip->ip6[1] &= ip_set_netmask6(prefix)[1];
ip->ip6[2] &= ip_set_netmask6(prefix)[2];
ip->ip6[3] &= ip_set_netmask6(prefix)[3];
} }
static bool static bool
hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *e)
{ {
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6)) if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6))
goto nla_put_failure; goto nla_put_failure;
return 0; return 0;
...@@ -275,69 +228,55 @@ hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) ...@@ -275,69 +228,55 @@ hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
return 1; return 1;
} }
static bool static inline void
hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data) hash_ip6_data_next(struct hash_ip4_elem *next, const struct hash_ip6_elem *e)
{ {
const struct hash_ip6_telem *e =
(const struct hash_ip6_telem *)data;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#undef MTYPE
#undef PF #undef PF
#undef HOST_MASK #undef HOST_MASK
#undef HKEY_DATALEN
#define MTYPE hash_ip6
#define PF 6 #define PF 6
#define HOST_MASK 128 #define HOST_MASK 128
#include <linux/netfilter/ipset/ip_set_ahash.h>
static inline void #define IP_SET_EMIT_CREATE
hash_ip6_data_next(struct ip_set_hash *h, const struct hash_ip6_elem *d) #include "ip_set_hash_gen.h"
{
}
static int static int
hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
union nf_inet_addr ip; struct hash_ip6_elem e = {};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6_netmask(&ip, h->netmask); hash_ip6_netmask(&e.ip, h->netmask);
if (ipv6_addr_any(&ip.in6)) if (ipv6_addr_any(&e.ip.in6))
return -EINVAL; return -EINVAL;
return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
};
static int static int
hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
union nf_inet_addr ip; struct hash_ip6_elem e = {};
u32 timeout = h->timeout; struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR])) tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
...@@ -345,110 +284,20 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -345,110 +284,20 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
ip6_netmask(&ip, h->netmask); hash_ip6_netmask(&e.ip, h->netmask);
if (ipv6_addr_any(&ip.in6)) if (ipv6_addr_any(&e.ip.in6))
return -IPSET_ERR_HASH_ELEM; return -IPSET_ERR_HASH_ELEM;
if (tb[IPSET_ATTR_TIMEOUT]) { ret = adtfn(set, &e, &ext, &ext, flags);
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
ret = adtfn(set, &ip, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_eexist(ret, flags) ? 0 : ret;
} }
/* Create hash:ip type of sets */
static int
hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 netmask, hbits;
size_t hsize;
struct ip_set_hash *h;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
pr_debug("Create set %s with family %s\n",
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_HASHSIZE]) {
hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
if (hashsize < IPSET_MIMINAL_HASHSIZE)
hashsize = IPSET_MIMINAL_HASHSIZE;
}
if (tb[IPSET_ATTR_MAXELEM])
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
(set->family == NFPROTO_IPV6 && netmask > 128) ||
netmask == 0)
return -IPSET_ERR_INVALID_NETMASK;
}
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem;
h->netmask = netmask;
get_random_bytes(&h->initval, sizeof(h->initval));
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
return -ENOMEM;
}
h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
}
h->table->htable_bits = hbits;
set->data = h;
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_tvariant : &hash_ip6_tvariant;
if (set->family == NFPROTO_IPV4)
hash_ip4_gc_init(set);
else
hash_ip6_gc_init(set);
} else {
set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_variant : &hash_ip6_variant;
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
return 0;
}
static struct ip_set_type hash_ip_type __read_mostly = { static struct ip_set_type hash_ip_type __read_mostly = {
.name = "hash:ip", .name = "hash:ip",
.protocol = IPSET_PROTOCOL, .protocol = IPSET_PROTOCOL,
...@@ -465,6 +314,7 @@ static struct ip_set_type hash_ip_type __read_mostly = { ...@@ -465,6 +314,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 }, [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -472,6 +322,8 @@ static struct ip_set_type hash_ip_type __read_mostly = { ...@@ -472,6 +322,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -21,12 +21,12 @@ ...@@ -21,12 +21,12 @@
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h> #include <linux/netfilter/ipset/ip_set_hash.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
#define REVISION_MAX 1 /* SCTP and UDPLITE support added */ /* 1 SCTP and UDPLITE support added */
#define REVISION_MAX 2 /* Counters support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
...@@ -34,33 +34,45 @@ IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX); ...@@ -34,33 +34,45 @@ IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_hash:ip,port"); MODULE_ALIAS("ip_set_hash:ip,port");
/* Type specific function prefix */ /* Type specific function prefix */
#define TYPE hash_ipport #define HTYPE hash_ipport
static bool /* IPv4 variants */
hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b);
#define hash_ipport4_same_set hash_ipport_same_set /* Member elements */
#define hash_ipport6_same_set hash_ipport_same_set struct hash_ipport4_elem {
__be32 ip;
__be16 port;
u8 proto;
u8 padding;
};
/* The type variant functions: IPv4 */ struct hash_ipport4t_elem {
__be32 ip;
__be16 port;
u8 proto;
u8 padding;
unsigned long timeout;
};
/* Member elements without timeout */ struct hash_ipport4c_elem {
struct hash_ipport4_elem {
__be32 ip; __be32 ip;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
}; };
/* Member elements with timeout support */ struct hash_ipport4ct_elem {
struct hash_ipport4_telem {
__be32 ip; __be32 ip;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1, hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
const struct hash_ipport4_elem *ip2, const struct hash_ipport4_elem *ip2,
...@@ -71,27 +83,6 @@ hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1, ...@@ -71,27 +83,6 @@ hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
ip1->proto == ip2->proto; ip1->proto == ip2->proto;
} }
static inline bool
hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_ipport4_data_copy(struct hash_ipport4_elem *dst,
const struct hash_ipport4_elem *src)
{
dst->ip = src->ip;
dst->port = src->port;
dst->proto = src->proto;
}
static inline void
hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem)
{
elem->proto = 0;
}
static bool static bool
hash_ipport4_data_list(struct sk_buff *skb, hash_ipport4_data_list(struct sk_buff *skb,
const struct hash_ipport4_elem *data) const struct hash_ipport4_elem *data)
...@@ -106,111 +97,91 @@ hash_ipport4_data_list(struct sk_buff *skb, ...@@ -106,111 +97,91 @@ hash_ipport4_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool
hash_ipport4_data_tlist(struct sk_buff *skb,
const struct hash_ipport4_elem *data)
{
const struct hash_ipport4_telem *tdata =
(const struct hash_ipport4_telem *)data;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
}
#define PF 4
#define HOST_MASK 32
#include <linux/netfilter/ipset/ip_set_ahash.h>
static inline void static inline void
hash_ipport4_data_next(struct ip_set_hash *h, hash_ipport4_data_next(struct hash_ipport4_elem *next,
const struct hash_ipport4_elem *d) const struct hash_ipport4_elem *d)
{ {
h->next.ip = d->ip; next->ip = d->ip;
h->next.port = d->port; next->port = d->port;
} }
#define MTYPE hash_ipport4
#define PF 4
#define HOST_MASK 32
#define HKEY_DATALEN sizeof(struct hash_ipport4_elem)
#include "ip_set_hash_gen.h"
static int static int
hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { }; struct hash_ipport4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
} }
static int static int
hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { }; struct hash_ipport4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 ip, ip_to, p = 0, port, port_to; u32 ip, ip_to, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMP)) if (!(with_ports || e.proto == IPPROTO_ICMP))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST || if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
tb[IPSET_ATTR_PORT_TO])) { tb[IPSET_ATTR_PORT_TO])) {
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_eexist(ret, flags) ? 0 : ret;
} }
ip_to = ip = ntohl(data.ip); ip_to = ip = ntohl(e.ip);
if (tb[IPSET_ATTR_IP_TO]) { if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret) if (ret)
...@@ -225,7 +196,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -225,7 +196,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip, ip_to, cidr); ip_set_mask_from_to(ip, ip_to, cidr);
} }
port_to = port = ntohs(data.port); port_to = port = ntohs(e.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) { if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) if (port > port_to)
...@@ -238,9 +209,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -238,9 +209,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port; : port;
for (; p <= port_to; p++) { for (; p <= port_to; p++) {
data.ip = htonl(ip); e.ip = htonl(ip);
data.port = htons(p); e.port = htons(p);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -251,34 +222,42 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -251,34 +222,42 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static bool /* IPv6 variants */
hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct ip_set_hash *x = a->data;
const struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */ struct hash_ipport6_elem {
return x->maxelem == y->maxelem && union nf_inet_addr ip;
x->timeout == y->timeout; __be16 port;
} u8 proto;
u8 padding;
};
/* The type variant functions: IPv6 */ struct hash_ipport6t_elem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
u8 padding;
unsigned long timeout;
};
struct hash_ipport6_elem { struct hash_ipport6c_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
}; };
struct hash_ipport6_telem { struct hash_ipport6ct_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1, hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
const struct hash_ipport6_elem *ip2, const struct hash_ipport6_elem *ip2,
...@@ -289,25 +268,6 @@ hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1, ...@@ -289,25 +268,6 @@ hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
ip1->proto == ip2->proto; ip1->proto == ip2->proto;
} }
static inline bool
hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_ipport6_data_copy(struct hash_ipport6_elem *dst,
const struct hash_ipport6_elem *src)
{
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem)
{
elem->proto = 0;
}
static bool static bool
hash_ipport6_data_list(struct sk_buff *skb, hash_ipport6_data_list(struct sk_buff *skb,
const struct hash_ipport6_elem *data) const struct hash_ipport6_elem *data)
...@@ -322,66 +282,52 @@ hash_ipport6_data_list(struct sk_buff *skb, ...@@ -322,66 +282,52 @@ hash_ipport6_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_ipport6_data_tlist(struct sk_buff *skb, hash_ipport6_data_next(struct hash_ipport4_elem *next,
const struct hash_ipport6_elem *data) const struct hash_ipport6_elem *d)
{ {
const struct hash_ipport6_telem *e = next->port = d->port;
(const struct hash_ipport6_telem *)data;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#undef MTYPE
#undef PF #undef PF
#undef HOST_MASK #undef HOST_MASK
#undef HKEY_DATALEN
#define MTYPE hash_ipport6
#define PF 6 #define PF 6
#define HOST_MASK 128 #define HOST_MASK 128
#include <linux/netfilter/ipset/ip_set_ahash.h> #define HKEY_DATALEN sizeof(struct hash_ipport6_elem)
#define IP_SET_EMIT_CREATE
static inline void #include "ip_set_hash_gen.h"
hash_ipport6_data_next(struct ip_set_hash *h,
const struct hash_ipport6_elem *d)
{
h->next.port = d->port;
}
static int static int
hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport6_elem data = { }; struct hash_ipport6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
} }
static int static int
hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport6_elem data = { }; struct hash_ipport6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 port, port_to; u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
int ret; int ret;
...@@ -389,6 +335,8 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -389,6 +335,8 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR])) tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
...@@ -396,39 +344,34 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -396,39 +344,34 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMPV6)) if (!(with_ports || e.proto == IPPROTO_ICMPV6))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_eexist(ret, flags) ? 0 : ret;
} }
port = ntohs(data.port); port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) if (port > port_to)
swap(port, port_to); swap(port, port_to);
...@@ -436,8 +379,8 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -436,8 +379,8 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried) if (retried)
port = ntohs(h->next.port); port = ntohs(h->next.port);
for (; port <= port_to; port++) { for (; port <= port_to; port++) {
data.port = htons(port); e.port = htons(port);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -447,78 +390,6 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -447,78 +390,6 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
/* Create hash:ip type of sets */
static int
hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_HASHSIZE]) {
hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
if (hashsize < IPSET_MIMINAL_HASHSIZE)
hashsize = IPSET_MIMINAL_HASHSIZE;
}
if (tb[IPSET_ATTR_MAXELEM])
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem;
get_random_bytes(&h->initval, sizeof(h->initval));
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
return -ENOMEM;
}
h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
}
h->table->htable_bits = hbits;
set->data = h;
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
if (set->family == NFPROTO_IPV4)
hash_ipport4_gc_init(set);
else
hash_ipport6_gc_init(set);
} else {
set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_variant : &hash_ipport6_variant;
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
return 0;
}
static struct ip_set_type hash_ipport_type __read_mostly = { static struct ip_set_type hash_ipport_type __read_mostly = {
.name = "hash:ip,port", .name = "hash:ip,port",
.protocol = IPSET_PROTOCOL, .protocol = IPSET_PROTOCOL,
...@@ -535,6 +406,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = { ...@@ -535,6 +406,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -545,6 +417,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = { ...@@ -545,6 +417,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
[IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -21,12 +21,12 @@ ...@@ -21,12 +21,12 @@
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h> #include <linux/netfilter/ipset/ip_set_hash.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
#define REVISION_MAX 1 /* SCTP and UDPLITE support added */ /* 1 SCTP and UDPLITE support added */
#define REVISION_MAX 2 /* Counters support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
...@@ -34,32 +34,44 @@ IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX); ...@@ -34,32 +34,44 @@ IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,ip"); MODULE_ALIAS("ip_set_hash:ip,port,ip");
/* Type specific function prefix */ /* Type specific function prefix */
#define TYPE hash_ipportip #define HTYPE hash_ipportip
static bool /* IPv4 variants */
hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b);
#define hash_ipportip4_same_set hash_ipportip_same_set /* Member elements */
#define hash_ipportip6_same_set hash_ipportip_same_set struct hash_ipportip4_elem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 proto;
u8 padding;
};
/* The type variant functions: IPv4 */ struct hash_ipportip4t_elem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 proto;
u8 padding;
unsigned long timeout;
};
/* Member elements without timeout */ struct hash_ipportip4c_elem {
struct hash_ipportip4_elem {
__be32 ip; __be32 ip;
__be32 ip2; __be32 ip2;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
}; };
/* Member elements with timeout support */ struct hash_ipportip4ct_elem {
struct hash_ipportip4_telem {
__be32 ip; __be32 ip;
__be32 ip2; __be32 ip2;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
...@@ -74,25 +86,6 @@ hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1, ...@@ -74,25 +86,6 @@ hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
ip1->proto == ip2->proto; ip1->proto == ip2->proto;
} }
static inline bool
hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst,
const struct hash_ipportip4_elem *src)
{
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem)
{
elem->proto = 0;
}
static bool static bool
hash_ipportip4_data_list(struct sk_buff *skb, hash_ipportip4_data_list(struct sk_buff *skb,
const struct hash_ipportip4_elem *data) const struct hash_ipportip4_elem *data)
...@@ -108,117 +101,96 @@ hash_ipportip4_data_list(struct sk_buff *skb, ...@@ -108,117 +101,96 @@ hash_ipportip4_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_ipportip4_data_tlist(struct sk_buff *skb, hash_ipportip4_data_next(struct hash_ipportip4_elem *next,
const struct hash_ipportip4_elem *data) const struct hash_ipportip4_elem *d)
{ {
const struct hash_ipportip4_telem *tdata = next->ip = d->ip;
(const struct hash_ipportip4_telem *)data; next->port = d->port;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
/* Common functions */
#define MTYPE hash_ipportip4
#define PF 4 #define PF 4
#define HOST_MASK 32 #define HOST_MASK 32
#include <linux/netfilter/ipset/ip_set_ahash.h> #include "ip_set_hash_gen.h"
static inline void
hash_ipportip4_data_next(struct ip_set_hash *h,
const struct hash_ipportip4_elem *d)
{
h->next.ip = d->ip;
h->next.port = d->port;
}
static int static int
hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { }; struct hash_ipportip4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
} }
static int static int
hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { }; struct hash_ipportip4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 ip, ip_to, p = 0, port, port_to; u32 ip, ip_to, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMP)) if (!(with_ports || e.proto == IPPROTO_ICMP))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST || if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
tb[IPSET_ATTR_PORT_TO])) { tb[IPSET_ATTR_PORT_TO])) {
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_eexist(ret, flags) ? 0 : ret;
} }
ip_to = ip = ntohl(data.ip); ip_to = ip = ntohl(e.ip);
if (tb[IPSET_ATTR_IP_TO]) { if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret) if (ret)
...@@ -233,7 +205,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -233,7 +205,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip, ip_to, cidr); ip_set_mask_from_to(ip, ip_to, cidr);
} }
port_to = port = ntohs(data.port); port_to = port = ntohs(e.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) { if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) if (port > port_to)
...@@ -246,9 +218,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -246,9 +218,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port; : port;
for (; p <= port_to; p++) { for (; p <= port_to; p++) {
data.ip = htonl(ip); e.ip = htonl(ip);
data.port = htons(p); e.port = htons(p);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -259,36 +231,46 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -259,36 +231,46 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static bool /* IPv6 variants */
hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct ip_set_hash *x = a->data;
const struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */ struct hash_ipportip6_elem {
return x->maxelem == y->maxelem && union nf_inet_addr ip;
x->timeout == y->timeout; union nf_inet_addr ip2;
} __be16 port;
u8 proto;
u8 padding;
};
/* The type variant functions: IPv6 */ struct hash_ipportip6t_elem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
u8 proto;
u8 padding;
unsigned long timeout;
};
struct hash_ipportip6_elem { struct hash_ipportip6c_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
union nf_inet_addr ip2; union nf_inet_addr ip2;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
}; };
struct hash_ipportip6_telem { struct hash_ipportip6ct_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
union nf_inet_addr ip2; union nf_inet_addr ip2;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 padding; u8 padding;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
const struct hash_ipportip6_elem *ip2, const struct hash_ipportip6_elem *ip2,
...@@ -300,25 +282,6 @@ hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, ...@@ -300,25 +282,6 @@ hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
ip1->proto == ip2->proto; ip1->proto == ip2->proto;
} }
static inline bool
hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst,
const struct hash_ipportip6_elem *src)
{
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem)
{
elem->proto = 0;
}
static bool static bool
hash_ipportip6_data_list(struct sk_buff *skb, hash_ipportip6_data_list(struct sk_buff *skb,
const struct hash_ipportip6_elem *data) const struct hash_ipportip6_elem *data)
...@@ -334,68 +297,51 @@ hash_ipportip6_data_list(struct sk_buff *skb, ...@@ -334,68 +297,51 @@ hash_ipportip6_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_ipportip6_data_tlist(struct sk_buff *skb, hash_ipportip6_data_next(struct hash_ipportip4_elem *next,
const struct hash_ipportip6_elem *data) const struct hash_ipportip6_elem *d)
{ {
const struct hash_ipportip6_telem *e = next->port = d->port;
(const struct hash_ipportip6_telem *)data;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#undef MTYPE
#undef PF #undef PF
#undef HOST_MASK #undef HOST_MASK
#define MTYPE hash_ipportip6
#define PF 6 #define PF 6
#define HOST_MASK 128 #define HOST_MASK 128
#include <linux/netfilter/ipset/ip_set_ahash.h> #define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static inline void
hash_ipportip6_data_next(struct ip_set_hash *h,
const struct hash_ipportip6_elem *d)
{
h->next.port = d->port;
}
static int static int
hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip6_elem data = { }; struct hash_ipportip6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
} }
static int static int
hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip6_elem data = { }; struct hash_ipportip6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 port, port_to; u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
int ret; int ret;
...@@ -403,6 +349,8 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -403,6 +349,8 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR])) tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
...@@ -410,43 +358,38 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -410,43 +358,38 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMPV6)) if (!(with_ports || e.proto == IPPROTO_ICMPV6))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_eexist(ret, flags) ? 0 : ret;
} }
port = ntohs(data.port); port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) if (port > port_to)
swap(port, port_to); swap(port, port_to);
...@@ -454,8 +397,8 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -454,8 +397,8 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried) if (retried)
port = ntohs(h->next.port); port = ntohs(h->next.port);
for (; port <= port_to; port++) { for (; port <= port_to; port++) {
data.port = htons(port); e.port = htons(port);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -465,78 +408,6 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -465,78 +408,6 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
/* Create hash:ip type of sets */
static int
hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_HASHSIZE]) {
hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
if (hashsize < IPSET_MIMINAL_HASHSIZE)
hashsize = IPSET_MIMINAL_HASHSIZE;
}
if (tb[IPSET_ATTR_MAXELEM])
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem;
get_random_bytes(&h->initval, sizeof(h->initval));
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
return -ENOMEM;
}
h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
}
h->table->htable_bits = hbits;
set->data = h;
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
if (set->family == NFPROTO_IPV4)
hash_ipportip4_gc_init(set);
else
hash_ipportip6_gc_init(set);
} else {
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_variant : &hash_ipportip6_variant;
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
return 0;
}
static struct ip_set_type hash_ipportip_type __read_mostly = { static struct ip_set_type hash_ipportip_type __read_mostly = {
.name = "hash:ip,port,ip", .name = "hash:ip,port,ip",
.protocol = IPSET_PROTOCOL, .protocol = IPSET_PROTOCOL,
...@@ -552,6 +423,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = { ...@@ -552,6 +423,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
[IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -563,6 +435,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = { ...@@ -563,6 +435,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
[IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -21,14 +21,14 @@ ...@@ -21,14 +21,14 @@
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h> #include <linux/netfilter/ipset/ip_set_hash.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
/* 1 SCTP and UDPLITE support added */ /* 1 SCTP and UDPLITE support added */
/* 2 Range as input support for IPv4 added */ /* 2 Range as input support for IPv4 added */
#define REVISION_MAX 3 /* nomatch flag support added */ /* 3 nomatch flag support added */
#define REVISION_MAX 4 /* Counters support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
...@@ -36,23 +36,19 @@ IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX); ...@@ -36,23 +36,19 @@ IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,net"); MODULE_ALIAS("ip_set_hash:ip,port,net");
/* Type specific function prefix */ /* Type specific function prefix */
#define TYPE hash_ipportnet #define HTYPE hash_ipportnet
static bool
hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
#define hash_ipportnet4_same_set hash_ipportnet_same_set
#define hash_ipportnet6_same_set hash_ipportnet_same_set
/* The type variant functions: IPv4 */
/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
* However this way we have to store internally cidr - 1, * However this way we have to store internally cidr - 1,
* dancing back and forth. * dancing back and forth.
*/ */
#define IP_SET_HASH_WITH_NETS_PACKED #define IP_SET_HASH_WITH_NETS_PACKED
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
/* IPv4 variants */
/* Member elements without timeout */ /* Member elements */
struct hash_ipportnet4_elem { struct hash_ipportnet4_elem {
__be32 ip; __be32 ip;
__be32 ip2; __be32 ip2;
...@@ -62,8 +58,7 @@ struct hash_ipportnet4_elem { ...@@ -62,8 +58,7 @@ struct hash_ipportnet4_elem {
u8 proto; u8 proto;
}; };
/* Member elements with timeout support */ struct hash_ipportnet4t_elem {
struct hash_ipportnet4_telem {
__be32 ip; __be32 ip;
__be32 ip2; __be32 ip2;
__be16 port; __be16 port;
...@@ -73,6 +68,29 @@ struct hash_ipportnet4_telem { ...@@ -73,6 +68,29 @@ struct hash_ipportnet4_telem {
unsigned long timeout; unsigned long timeout;
}; };
struct hash_ipportnet4c_elem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
struct ip_set_counter counter;
};
struct hash_ipportnet4ct_elem {
__be32 ip;
__be32 ip2;
__be16 port;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
struct ip_set_counter counter;
unsigned long timeout;
};
/* Common functions */
static inline bool static inline bool
hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
const struct hash_ipportnet4_elem *ip2, const struct hash_ipportnet4_elem *ip2,
...@@ -85,38 +103,22 @@ hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, ...@@ -85,38 +103,22 @@ hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
ip1->proto == ip2->proto; ip1->proto == ip2->proto;
} }
static inline bool static inline int
hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem) hash_ipportnet4_do_data_match(const struct hash_ipportnet4_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
const struct hash_ipportnet4_elem *src)
{ {
memcpy(dst, src, sizeof(*dst)); return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags) hash_ipportnet4_data_set_flags(struct hash_ipportnet4_elem *elem, u32 flags)
{ {
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
} }
static inline void static inline void
hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags) hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *elem, u8 *flags)
{ {
if (dst->nomatch) { swap(*flags, elem->nomatch);
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline int
hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
...@@ -126,12 +128,6 @@ hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) ...@@ -126,12 +128,6 @@ hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
elem->cidr = cidr - 1; elem->cidr = cidr - 1;
} }
static inline void
hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem)
{
elem->proto = 0;
}
static bool static bool
hash_ipportnet4_data_list(struct sk_buff *skb, hash_ipportnet4_data_list(struct sk_buff *skb,
const struct hash_ipportnet4_elem *data) const struct hash_ipportnet4_elem *data)
...@@ -152,81 +148,56 @@ hash_ipportnet4_data_list(struct sk_buff *skb, ...@@ -152,81 +148,56 @@ hash_ipportnet4_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_ipportnet4_data_tlist(struct sk_buff *skb, hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next,
const struct hash_ipportnet4_elem *data) const struct hash_ipportnet4_elem *d)
{ {
const struct hash_ipportnet4_telem *tdata = next->ip = d->ip;
(const struct hash_ipportnet4_telem *)data; next->port = d->port;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; next->ip2 = d->ip2;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout))) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#define IP_SET_HASH_WITH_PROTO #define MTYPE hash_ipportnet4
#define IP_SET_HASH_WITH_NETS
#define PF 4 #define PF 4
#define HOST_MASK 32 #define HOST_MASK 32
#include <linux/netfilter/ipset/ip_set_ahash.h> #include "ip_set_hash_gen.h"
static inline void
hash_ipportnet4_data_next(struct ip_set_hash *h,
const struct hash_ipportnet4_elem *d)
{
h->next.ip = d->ip;
h->next.port = d->port;
h->next.ip2 = d->ip2;
}
static int static int
hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { struct hash_ipportnet4_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK - 1; e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
data.ip2 &= ip_set_netmask(data.cidr + 1); e.ip2 &= ip_set_netmask(e.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 }; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 ip, ip_to, p = 0, port, port_to; u32 ip, ip_to, p = 0, port, port_to;
u32 ip2_from, ip2_to, ip2_last, ip2; u32 ip2_from, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
u8 cidr; u8 cidr;
int ret; int ret;
...@@ -235,13 +206,16 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -235,13 +206,16 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
...@@ -253,46 +227,41 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -253,46 +227,41 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!cidr || cidr > HOST_MASK) if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
data.cidr = cidr - 1; e.cidr = cidr - 1;
} }
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMP)) if (!(with_ports || e.proto == IPPROTO_ICMP))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (adt == IPSET_TEST || if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
tb[IPSET_ATTR_IP2_TO])) { tb[IPSET_ATTR_IP2_TO])) {
data.ip = htonl(ip); e.ip = htonl(ip);
data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr + 1)); e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1));
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_enomatch(ret, flags, adt) ? 1 :
ip_set_eexist(ret, flags) ? 0 : ret;
} }
ip_to = ip; ip_to = ip;
...@@ -310,7 +279,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -310,7 +279,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip, ip_to, cidr); ip_set_mask_from_to(ip, ip_to, cidr);
} }
port_to = port = ntohs(data.port); port_to = port = ntohs(e.port);
if (tb[IPSET_ATTR_PORT_TO]) { if (tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) if (port > port_to)
...@@ -326,28 +295,27 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -326,28 +295,27 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(ip2_from, ip2_to); swap(ip2_from, ip2_to);
if (ip2_from + UINT_MAX == ip2_to) if (ip2_from + UINT_MAX == ip2_to)
return -IPSET_ERR_HASH_RANGE; return -IPSET_ERR_HASH_RANGE;
} else { } else
ip_set_mask_from_to(ip2_from, ip2_to, data.cidr + 1); ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
}
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
for (; !before(ip_to, ip); ip++) { for (; !before(ip_to, ip); ip++) {
data.ip = htonl(ip); e.ip = htonl(ip);
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port; : port;
for (; p <= port_to; p++) { for (; p <= port_to; p++) {
data.port = htons(p); e.port = htons(p);
ip2 = retried ip2 = retried
&& ip == ntohl(h->next.ip) && ip == ntohl(h->next.ip)
&& p == ntohs(h->next.port) && p == ntohs(h->next.port)
? ntohl(h->next.ip2) : ip2_from; ? ntohl(h->next.ip2) : ip2_from;
while (!after(ip2, ip2_to)) { while (!after(ip2, ip2_to)) {
data.ip2 = htonl(ip2); e.ip2 = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to, ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
&cidr); &cidr);
data.cidr = cidr - 1; e.cidr = cidr - 1;
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -360,38 +328,50 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -360,38 +328,50 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static bool /* IPv6 variants */
hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct ip_set_hash *x = a->data;
const struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */ struct hash_ipportnet6_elem {
return x->maxelem == y->maxelem && union nf_inet_addr ip;
x->timeout == y->timeout; union nf_inet_addr ip2;
} __be16 port;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
};
/* The type variant functions: IPv6 */ struct hash_ipportnet6t_elem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
u8 cidr:7;
u8 nomatch:1;
u8 proto;
unsigned long timeout;
};
struct hash_ipportnet6_elem { struct hash_ipportnet6c_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
union nf_inet_addr ip2; union nf_inet_addr ip2;
__be16 port; __be16 port;
u8 cidr:7; u8 cidr:7;
u8 nomatch:1; u8 nomatch:1;
u8 proto; u8 proto;
struct ip_set_counter counter;
}; };
struct hash_ipportnet6_telem { struct hash_ipportnet6ct_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
union nf_inet_addr ip2; union nf_inet_addr ip2;
__be16 port; __be16 port;
u8 cidr:7; u8 cidr:7;
u8 nomatch:1; u8 nomatch:1;
u8 proto; u8 proto;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
const struct hash_ipportnet6_elem *ip2, const struct hash_ipportnet6_elem *ip2,
...@@ -404,53 +384,22 @@ hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, ...@@ -404,53 +384,22 @@ hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
ip1->proto == ip2->proto; ip1->proto == ip2->proto;
} }
static inline bool
hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
const struct hash_ipportnet6_elem *src)
{
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline void
hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags)
{
if (dst->nomatch) {
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline int static inline int
hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) hash_ipportnet6_do_data_match(const struct hash_ipportnet6_elem *elem)
{ {
return elem->nomatch ? -ENOTEMPTY : 1; return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem) hash_ipportnet6_data_set_flags(struct hash_ipportnet6_elem *elem, u32 flags)
{ {
elem->proto = 0; elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
} }
static inline void static inline void
ip6_netmask(union nf_inet_addr *ip, u8 prefix) hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *elem, u8 *flags)
{ {
ip->ip6[0] &= ip_set_netmask6(prefix)[0]; swap(*flags, elem->nomatch);
ip->ip6[1] &= ip_set_netmask6(prefix)[1];
ip->ip6[2] &= ip_set_netmask6(prefix)[2];
ip->ip6[3] &= ip_set_netmask6(prefix)[3];
} }
static inline void static inline void
...@@ -480,78 +429,58 @@ hash_ipportnet6_data_list(struct sk_buff *skb, ...@@ -480,78 +429,58 @@ hash_ipportnet6_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_ipportnet6_data_tlist(struct sk_buff *skb, hash_ipportnet6_data_next(struct hash_ipportnet4_elem *next,
const struct hash_ipportnet6_elem *data) const struct hash_ipportnet6_elem *d)
{ {
const struct hash_ipportnet6_telem *e = next->port = d->port;
(const struct hash_ipportnet6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout))) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#undef MTYPE
#undef PF #undef PF
#undef HOST_MASK #undef HOST_MASK
#define MTYPE hash_ipportnet6
#define PF 6 #define PF 6
#define HOST_MASK 128 #define HOST_MASK 128
#include <linux/netfilter/ipset/ip_set_ahash.h> #define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static inline void
hash_ipportnet6_data_next(struct ip_set_hash *h,
const struct hash_ipportnet6_elem *d)
{
h->next.port = d->port;
}
static int static int
hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem data = { struct hash_ipportnet6_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK - 1; e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
ip6_netmask(&data.ip2, data.cidr + 1); ip6_netmask(&e.ip2, e.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_ipportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem data = { .cidr = HOST_MASK - 1 }; struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 port, port_to; u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
u8 cidr; u8 cidr;
int ret; int ret;
...@@ -561,6 +490,8 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -561,6 +490,8 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR])) tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
...@@ -570,11 +501,12 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -570,11 +501,12 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
if (ret) if (ret)
return ret; return ret;
...@@ -582,46 +514,41 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -582,46 +514,41 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!cidr || cidr > HOST_MASK) if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
data.cidr = cidr - 1; e.cidr = cidr - 1;
} }
ip6_netmask(&data.ip2, data.cidr + 1); ip6_netmask(&e.ip2, e.cidr + 1);
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMPV6)) if (!(with_ports || e.proto == IPPROTO_ICMPV6))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_enomatch(ret, flags, adt) ? 1 :
ip_set_eexist(ret, flags) ? 0 : ret;
} }
port = ntohs(data.port); port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) if (port > port_to)
swap(port, port_to); swap(port, port_to);
...@@ -629,8 +556,8 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -629,8 +556,8 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried) if (retried)
port = ntohs(h->next.port); port = ntohs(h->next.port);
for (; port <= port_to; port++) { for (; port <= port_to; port++) {
data.port = htons(port); e.port = htons(port);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -640,81 +567,6 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -640,81 +567,6 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
/* Create hash:ip type of sets */
static int
hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_HASHSIZE]) {
hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
if (hashsize < IPSET_MIMINAL_HASHSIZE)
hashsize = IPSET_MIMINAL_HASHSIZE;
}
if (tb[IPSET_ATTR_MAXELEM])
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem;
get_random_bytes(&h->initval, sizeof(h->initval));
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
return -ENOMEM;
}
h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
}
h->table->htable_bits = hbits;
set->data = h;
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_tvariant
: &hash_ipportnet6_tvariant;
if (set->family == NFPROTO_IPV4)
hash_ipportnet4_gc_init(set);
else
hash_ipportnet6_gc_init(set);
} else {
set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
return 0;
}
static struct ip_set_type hash_ipportnet_type __read_mostly = { static struct ip_set_type hash_ipportnet_type __read_mostly = {
.name = "hash:ip,port,net", .name = "hash:ip,port,net",
.protocol = IPSET_PROTOCOL, .protocol = IPSET_PROTOCOL,
...@@ -731,6 +583,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = { ...@@ -731,6 +583,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
[IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -745,6 +598,8 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = { ...@@ -745,6 +598,8 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -20,12 +20,12 @@ ...@@ -20,12 +20,12 @@
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_hash.h> #include <linux/netfilter/ipset/ip_set_hash.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
/* 1 Range as input support for IPv4 added */ /* 1 Range as input support for IPv4 added */
#define REVISION_MAX 2 /* nomatch flag support added */ /* 2 nomatch flag support added */
#define REVISION_MAX 3 /* Counters support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
...@@ -33,33 +33,46 @@ IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX); ...@@ -33,33 +33,46 @@ IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_hash:net"); MODULE_ALIAS("ip_set_hash:net");
/* Type specific function prefix */ /* Type specific function prefix */
#define TYPE hash_net #define HTYPE hash_net
#define IP_SET_HASH_WITH_NETS
static bool /* IPv4 variants */
hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
#define hash_net4_same_set hash_net_same_set /* Member elements */
#define hash_net6_same_set hash_net_same_set struct hash_net4_elem {
__be32 ip;
u16 padding0;
u8 nomatch;
u8 cidr;
};
/* The type variant functions: IPv4 */ struct hash_net4t_elem {
__be32 ip;
u16 padding0;
u8 nomatch;
u8 cidr;
unsigned long timeout;
};
/* Member elements without timeout */ struct hash_net4c_elem {
struct hash_net4_elem {
__be32 ip; __be32 ip;
u16 padding0; u16 padding0;
u8 nomatch; u8 nomatch;
u8 cidr; u8 cidr;
struct ip_set_counter counter;
}; };
/* Member elements with timeout support */ struct hash_net4ct_elem {
struct hash_net4_telem {
__be32 ip; __be32 ip;
u16 padding0; u16 padding0;
u8 nomatch; u8 nomatch;
u8 cidr; u8 cidr;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_net4_data_equal(const struct hash_net4_elem *ip1, hash_net4_data_equal(const struct hash_net4_elem *ip1,
const struct hash_net4_elem *ip2, const struct hash_net4_elem *ip2,
...@@ -69,40 +82,22 @@ hash_net4_data_equal(const struct hash_net4_elem *ip1, ...@@ -69,40 +82,22 @@ hash_net4_data_equal(const struct hash_net4_elem *ip1,
ip1->cidr == ip2->cidr; ip1->cidr == ip2->cidr;
} }
static inline bool static inline int
hash_net4_data_isnull(const struct hash_net4_elem *elem) hash_net4_do_data_match(const struct hash_net4_elem *elem)
{
return elem->cidr == 0;
}
static inline void
hash_net4_data_copy(struct hash_net4_elem *dst,
const struct hash_net4_elem *src)
{ {
dst->ip = src->ip; return elem->nomatch ? -ENOTEMPTY : 1;
dst->cidr = src->cidr;
dst->nomatch = src->nomatch;
} }
static inline void static inline void
hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) hash_net4_data_set_flags(struct hash_net4_elem *elem, u32 flags)
{ {
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
} }
static inline void static inline void
hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags) hash_net4_data_reset_flags(struct hash_net4_elem *elem, u8 *flags)
{ {
if (dst->nomatch) { swap(*flags, elem->nomatch);
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline int
hash_net4_data_match(const struct hash_net4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
...@@ -112,13 +107,6 @@ hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr) ...@@ -112,13 +107,6 @@ hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
elem->cidr = cidr; elem->cidr = cidr;
} }
/* Zero CIDR values cannot be stored */
static inline void
hash_net4_data_zero_out(struct hash_net4_elem *elem)
{
elem->cidr = 0;
}
static bool static bool
hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
{ {
...@@ -135,106 +123,84 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data) ...@@ -135,106 +123,84 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
return 1; return 1;
} }
static bool static inline void
hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data) hash_net4_data_next(struct hash_net4_elem *next,
const struct hash_net4_elem *d)
{ {
const struct hash_net4_telem *tdata = next->ip = d->ip;
(const struct hash_net4_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout))) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#define IP_SET_HASH_WITH_NETS #define MTYPE hash_net4
#define PF 4 #define PF 4
#define HOST_MASK 32 #define HOST_MASK 32
#include <linux/netfilter/ipset/ip_set_ahash.h> #include "ip_set_hash_gen.h"
static inline void
hash_net4_data_next(struct ip_set_hash *h,
const struct hash_net4_elem *d)
{
h->next.ip = d->ip;
}
static int static int
hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem data = { struct hash_net4_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (data.cidr == 0) if (e.cidr == 0)
return -EINVAL; return -EINVAL;
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK; e.cidr = HOST_MASK;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
data.ip &= ip_set_netmask(data.cidr); e.ip &= ip_set_netmask(e.cidr);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem data = { .cidr = HOST_MASK }; struct hash_net4_elem e = { .cidr = HOST_MASK };
u32 timeout = h->timeout; struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 ip = 0, ip_to, last; u32 ip = 0, ip_to, last;
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_CIDR]) { if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr || data.cidr > HOST_MASK) if (!e.cidr || e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
} }
if (tb[IPSET_ATTR_TIMEOUT]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
data.ip = htonl(ip & ip_set_hostmask(data.cidr)); e.ip = htonl(ip & ip_set_hostmask(e.cidr));
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_enomatch(ret, flags, adt) ? 1 :
ip_set_eexist(ret, flags) ? 0 : ret;
} }
ip_to = ip; ip_to = ip;
...@@ -250,9 +216,9 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -250,9 +216,9 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
while (!after(ip, ip_to)) { while (!after(ip, ip_to)) {
data.ip = htonl(ip); e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
else else
...@@ -262,34 +228,42 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -262,34 +228,42 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static bool /* IPv6 variants */
hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct ip_set_hash *x = a->data;
const struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */ struct hash_net6_elem {
return x->maxelem == y->maxelem && union nf_inet_addr ip;
x->timeout == y->timeout; u16 padding0;
} u8 nomatch;
u8 cidr;
};
/* The type variant functions: IPv6 */ struct hash_net6t_elem {
union nf_inet_addr ip;
u16 padding0;
u8 nomatch;
u8 cidr;
unsigned long timeout;
};
struct hash_net6_elem { struct hash_net6c_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
u16 padding0; u16 padding0;
u8 nomatch; u8 nomatch;
u8 cidr; u8 cidr;
struct ip_set_counter counter;
}; };
struct hash_net6_telem { struct hash_net6ct_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
u16 padding0; u16 padding0;
u8 nomatch; u8 nomatch;
u8 cidr; u8 cidr;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_net6_data_equal(const struct hash_net6_elem *ip1, hash_net6_data_equal(const struct hash_net6_elem *ip1,
const struct hash_net6_elem *ip2, const struct hash_net6_elem *ip2,
...@@ -299,55 +273,22 @@ hash_net6_data_equal(const struct hash_net6_elem *ip1, ...@@ -299,55 +273,22 @@ hash_net6_data_equal(const struct hash_net6_elem *ip1,
ip1->cidr == ip2->cidr; ip1->cidr == ip2->cidr;
} }
static inline bool
hash_net6_data_isnull(const struct hash_net6_elem *elem)
{
return elem->cidr == 0;
}
static inline void
hash_net6_data_copy(struct hash_net6_elem *dst,
const struct hash_net6_elem *src)
{
dst->ip.in6 = src->ip.in6;
dst->cidr = src->cidr;
dst->nomatch = src->nomatch;
}
static inline void
hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline void
hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags)
{
if (dst->nomatch) {
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline int static inline int
hash_net6_data_match(const struct hash_net6_elem *elem) hash_net6_do_data_match(const struct hash_net6_elem *elem)
{ {
return elem->nomatch ? -ENOTEMPTY : 1; return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
hash_net6_data_zero_out(struct hash_net6_elem *elem) hash_net6_data_set_flags(struct hash_net6_elem *elem, u32 flags)
{ {
elem->cidr = 0; elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
} }
static inline void static inline void
ip6_netmask(union nf_inet_addr *ip, u8 prefix) hash_net6_data_reset_flags(struct hash_net6_elem *elem, u8 *flags)
{ {
ip->ip6[0] &= ip_set_netmask6(prefix)[0]; swap(*flags, elem->nomatch);
ip->ip6[1] &= ip_set_netmask6(prefix)[1];
ip->ip6[2] &= ip_set_netmask6(prefix)[2];
ip->ip6[3] &= ip_set_netmask6(prefix)[3];
} }
static inline void static inline void
...@@ -373,74 +314,60 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data) ...@@ -373,74 +314,60 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
return 1; return 1;
} }
static bool static inline void
hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data) hash_net6_data_next(struct hash_net4_elem *next,
const struct hash_net6_elem *d)
{ {
const struct hash_net6_telem *e =
(const struct hash_net6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout))) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#undef MTYPE
#undef PF #undef PF
#undef HOST_MASK #undef HOST_MASK
#define MTYPE hash_net6
#define PF 6 #define PF 6
#define HOST_MASK 128 #define HOST_MASK 128
#include <linux/netfilter/ipset/ip_set_ahash.h> #define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static inline void
hash_net6_data_next(struct ip_set_hash *h,
const struct hash_net6_elem *d)
{
}
static int static int
hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem data = { struct hash_net6_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (data.cidr == 0) if (e.cidr == 0)
return -EINVAL; return -EINVAL;
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK; e.cidr = HOST_MASK;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6_netmask(&data.ip, data.cidr); ip6_netmask(&e.ip, e.cidr);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_net *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem data = { .cidr = HOST_MASK }; struct hash_net6_elem e = { .cidr = HOST_MASK };
u32 timeout = h->timeout; struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO])) if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
...@@ -448,107 +375,29 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -448,107 +375,29 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_CIDR]) if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr || data.cidr > HOST_MASK) if (!e.cidr || e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr); ip6_netmask(&e.ip, e.cidr);
if (tb[IPSET_ATTR_TIMEOUT]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_enomatch(ret, flags, adt) ? 1 :
} ip_set_eexist(ret, flags) ? 0 : ret;
/* Create hash:ip type of sets */
static int
hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
struct ip_set_hash *h;
u8 hbits;
size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_HASHSIZE]) {
hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
if (hashsize < IPSET_MIMINAL_HASHSIZE)
hashsize = IPSET_MIMINAL_HASHSIZE;
}
if (tb[IPSET_ATTR_MAXELEM])
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem;
get_random_bytes(&h->initval, sizeof(h->initval));
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
return -ENOMEM;
}
h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
}
h->table->htable_bits = hbits;
set->data = h;
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == NFPROTO_IPV4
? &hash_net4_tvariant : &hash_net6_tvariant;
if (set->family == NFPROTO_IPV4)
hash_net4_gc_init(set);
else
hash_net6_gc_init(set);
} else {
set->variant = set->family == NFPROTO_IPV4
? &hash_net4_variant : &hash_net6_variant;
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
return 0;
} }
static struct ip_set_type hash_net_type __read_mostly = { static struct ip_set_type hash_net_type __read_mostly = {
...@@ -566,6 +415,7 @@ static struct ip_set_type hash_net_type __read_mostly = { ...@@ -566,6 +415,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -573,6 +423,8 @@ static struct ip_set_type hash_net_type __read_mostly = { ...@@ -573,6 +423,8 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2011-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -21,12 +21,12 @@ ...@@ -21,12 +21,12 @@
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_hash.h> #include <linux/netfilter/ipset/ip_set_hash.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
/* 1 nomatch flag support added */ /* 1 nomatch flag support added */
#define REVISION_MAX 2 /* /0 support added */ /* 2 /0 support added */
#define REVISION_MAX 3 /* Counters support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
...@@ -127,17 +127,14 @@ iface_add(struct rb_root *root, const char **iface) ...@@ -127,17 +127,14 @@ iface_add(struct rb_root *root, const char **iface)
} }
/* Type specific function prefix */ /* Type specific function prefix */
#define TYPE hash_netiface #define HTYPE hash_netiface
#define IP_SET_HASH_WITH_NETS
static bool #define IP_SET_HASH_WITH_RBTREE
hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b); #define IP_SET_HASH_WITH_MULTI
#define hash_netiface4_same_set hash_netiface_same_set
#define hash_netiface6_same_set hash_netiface_same_set
#define STREQ(a, b) (strcmp(a, b) == 0) #define STREQ(a, b) (strcmp(a, b) == 0)
/* The type variant functions: IPv4 */ /* IPv4 variants */
struct hash_netiface4_elem_hashed { struct hash_netiface4_elem_hashed {
__be32 ip; __be32 ip;
...@@ -147,8 +144,6 @@ struct hash_netiface4_elem_hashed { ...@@ -147,8 +144,6 @@ struct hash_netiface4_elem_hashed {
u8 elem; u8 elem;
}; };
#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
/* Member elements without timeout */ /* Member elements without timeout */
struct hash_netiface4_elem { struct hash_netiface4_elem {
__be32 ip; __be32 ip;
...@@ -159,17 +154,39 @@ struct hash_netiface4_elem { ...@@ -159,17 +154,39 @@ struct hash_netiface4_elem {
const char *iface; const char *iface;
}; };
/* Member elements with timeout support */ struct hash_netiface4t_elem {
struct hash_netiface4_telem { __be32 ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
const char *iface;
unsigned long timeout;
};
struct hash_netiface4c_elem {
__be32 ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
const char *iface;
struct ip_set_counter counter;
};
struct hash_netiface4ct_elem {
__be32 ip; __be32 ip;
u8 physdev; u8 physdev;
u8 cidr; u8 cidr;
u8 nomatch; u8 nomatch;
u8 elem; u8 elem;
const char *iface; const char *iface;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
const struct hash_netiface4_elem *ip2, const struct hash_netiface4_elem *ip2,
...@@ -182,38 +199,22 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, ...@@ -182,38 +199,22 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
ip1->iface == ip2->iface; ip1->iface == ip2->iface;
} }
static inline bool static inline int
hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem) hash_netiface4_do_data_match(const struct hash_netiface4_elem *elem)
{
return elem->elem == 0;
}
static inline void
hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
const struct hash_netiface4_elem *src)
{ {
memcpy(dst, src, sizeof(*dst)); return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) hash_netiface4_data_set_flags(struct hash_netiface4_elem *elem, u32 flags)
{ {
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
} }
static inline void static inline void
hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags) hash_netiface4_data_reset_flags(struct hash_netiface4_elem *elem, u8 *flags)
{
if (dst->nomatch) {
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline int
hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
{ {
return elem->nomatch ? -ENOTEMPTY : 1; swap(*flags, elem->nomatch);
} }
static inline void static inline void
...@@ -223,12 +224,6 @@ hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr) ...@@ -223,12 +224,6 @@ hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
elem->cidr = cidr; elem->cidr = cidr;
} }
static inline void
hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem)
{
elem->elem = 0;
}
static bool static bool
hash_netiface4_data_list(struct sk_buff *skb, hash_netiface4_data_list(struct sk_buff *skb,
const struct hash_netiface4_elem *data) const struct hash_netiface4_elem *data)
...@@ -249,66 +244,40 @@ hash_netiface4_data_list(struct sk_buff *skb, ...@@ -249,66 +244,40 @@ hash_netiface4_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_netiface4_data_tlist(struct sk_buff *skb, hash_netiface4_data_next(struct hash_netiface4_elem *next,
const struct hash_netiface4_elem *data) const struct hash_netiface4_elem *d)
{ {
const struct hash_netiface4_telem *tdata = next->ip = d->ip;
(const struct hash_netiface4_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#define IP_SET_HASH_WITH_NETS #define MTYPE hash_netiface4
#define IP_SET_HASH_WITH_RBTREE
#define IP_SET_HASH_WITH_MULTI
#define PF 4 #define PF 4
#define HOST_MASK 32 #define HOST_MASK 32
#include <linux/netfilter/ipset/ip_set_ahash.h> #define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
#include "ip_set_hash_gen.h"
static inline void
hash_netiface4_data_next(struct ip_set_hash *h,
const struct hash_netiface4_elem *d)
{
h->next.ip = d->ip;
}
static int static int
hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
struct ip_set_hash *h = set->data; struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem data = { struct hash_netiface4_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK, .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
.elem = 1, .elem = 1,
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
int ret; int ret;
if (data.cidr == 0) if (e.cidr == 0)
return -EINVAL; return -EINVAL;
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK; e.cidr = HOST_MASK;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
data.ip &= ip_set_netmask(data.cidr); e.ip &= ip_set_netmask(e.cidr);
#define IFACE(dir) (par->dir ? par->dir->name : NULL) #define IFACE(dir) (par->dir ? par->dir->name : NULL)
#define PHYSDEV(dir) (nf_bridge->dir ? nf_bridge->dir->name : NULL) #define PHYSDEV(dir) (nf_bridge->dir ? nf_bridge->dir->name : NULL)
...@@ -320,72 +289,69 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -320,72 +289,69 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
if (!nf_bridge) if (!nf_bridge)
return -EINVAL; return -EINVAL;
data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); e.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
data.physdev = 1; e.physdev = 1;
#else #else
data.iface = NULL; e.iface = NULL;
#endif #endif
} else } else
data.iface = SRCDIR ? IFACE(in) : IFACE(out); e.iface = SRCDIR ? IFACE(in) : IFACE(out);
if (!data.iface) if (!e.iface)
return -EINVAL; return -EINVAL;
ret = iface_test(&h->rbtree, &data.iface); ret = iface_test(&h->rbtree, &e.iface);
if (adt == IPSET_ADD) { if (adt == IPSET_ADD) {
if (!ret) { if (!ret) {
ret = iface_add(&h->rbtree, &data.iface); ret = iface_add(&h->rbtree, &e.iface);
if (ret) if (ret)
return ret; return ret;
} }
} else if (!ret) } else if (!ret)
return ret; return ret;
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
struct ip_set_hash *h = set->data; struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem data = { .cidr = HOST_MASK, .elem = 1 }; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 ip = 0, ip_to, last; u32 ip = 0, ip_to, last;
u32 timeout = h->timeout;
char iface[IFNAMSIZ]; char iface[IFNAMSIZ];
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!tb[IPSET_ATTR_IFACE] || !tb[IPSET_ATTR_IFACE] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_CIDR]) { if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (data.cidr > HOST_MASK) if (e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
} }
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
data.iface = iface; e.iface = iface;
ret = iface_test(&h->rbtree, &data.iface); ret = iface_test(&h->rbtree, &e.iface);
if (adt == IPSET_ADD) { if (adt == IPSET_ADD) {
if (!ret) { if (!ret) {
ret = iface_add(&h->rbtree, &data.iface); ret = iface_add(&h->rbtree, &e.iface);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -395,14 +361,15 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -395,14 +361,15 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CADT_FLAGS]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV) if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1; e.physdev = 1;
if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH)) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
data.ip = htonl(ip & ip_set_hostmask(data.cidr)); e.ip = htonl(ip & ip_set_hostmask(e.cidr));
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_enomatch(ret, flags, adt) ? 1 :
ip_set_eexist(ret, flags) ? 0 : ret;
} }
if (tb[IPSET_ATTR_IP_TO]) { if (tb[IPSET_ATTR_IP_TO]) {
...@@ -413,16 +380,15 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -413,16 +380,15 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(ip, ip_to); swap(ip, ip_to);
if (ip + UINT_MAX == ip_to) if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE; return -IPSET_ERR_HASH_RANGE;
} else { } else
ip_set_mask_from_to(ip, ip_to, data.cidr); ip_set_mask_from_to(ip, ip_to, e.cidr);
}
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
while (!after(ip, ip_to)) { while (!after(ip, ip_to)) {
data.ip = htonl(ip); e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -433,18 +399,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -433,18 +399,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static bool /* IPv6 variants */
hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct ip_set_hash *x = a->data;
const struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem &&
x->timeout == y->timeout;
}
/* The type variant functions: IPv6 */
struct hash_netiface6_elem_hashed { struct hash_netiface6_elem_hashed {
union nf_inet_addr ip; union nf_inet_addr ip;
...@@ -454,8 +409,6 @@ struct hash_netiface6_elem_hashed { ...@@ -454,8 +409,6 @@ struct hash_netiface6_elem_hashed {
u8 elem; u8 elem;
}; };
#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
struct hash_netiface6_elem { struct hash_netiface6_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
u8 physdev; u8 physdev;
...@@ -465,16 +418,39 @@ struct hash_netiface6_elem { ...@@ -465,16 +418,39 @@ struct hash_netiface6_elem {
const char *iface; const char *iface;
}; };
struct hash_netiface6_telem { struct hash_netiface6t_elem {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
const char *iface;
unsigned long timeout;
};
struct hash_netiface6c_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
u8 physdev; u8 physdev;
u8 cidr; u8 cidr;
u8 nomatch; u8 nomatch;
u8 elem; u8 elem;
const char *iface; const char *iface;
struct ip_set_counter counter;
};
struct hash_netiface6ct_elem {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
u8 nomatch;
u8 elem;
const char *iface;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
const struct hash_netiface6_elem *ip2, const struct hash_netiface6_elem *ip2,
...@@ -487,53 +463,22 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, ...@@ -487,53 +463,22 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
ip1->iface == ip2->iface; ip1->iface == ip2->iface;
} }
static inline bool
hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem)
{
return elem->elem == 0;
}
static inline void
hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
const struct hash_netiface6_elem *src)
{
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline int static inline int
hash_netiface6_data_match(const struct hash_netiface6_elem *elem) hash_netiface6_do_data_match(const struct hash_netiface6_elem *elem)
{ {
return elem->nomatch ? -ENOTEMPTY : 1; return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags) hash_netiface6_data_set_flags(struct hash_netiface6_elem *elem, u32 flags)
{
if (dst->nomatch) {
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline void
hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
{ {
elem->elem = 0; elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
} }
static inline void static inline void
ip6_netmask(union nf_inet_addr *ip, u8 prefix) hash_netiface6_data_reset_flags(struct hash_netiface6_elem *elem, u8 *flags)
{ {
ip->ip6[0] &= ip_set_netmask6(prefix)[0]; swap(*flags, elem->nomatch);
ip->ip6[1] &= ip_set_netmask6(prefix)[1];
ip->ip6[2] &= ip_set_netmask6(prefix)[2];
ip->ip6[3] &= ip_set_netmask6(prefix)[3];
} }
static inline void static inline void
...@@ -563,63 +508,45 @@ hash_netiface6_data_list(struct sk_buff *skb, ...@@ -563,63 +508,45 @@ hash_netiface6_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_netiface6_data_tlist(struct sk_buff *skb, hash_netiface6_data_next(struct hash_netiface4_elem *next,
const struct hash_netiface6_elem *data) const struct hash_netiface6_elem *d)
{ {
const struct hash_netiface6_telem *e =
(const struct hash_netiface6_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
if (data->nomatch)
flags |= IPSET_FLAG_NOMATCH;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#undef MTYPE
#undef PF #undef PF
#undef HOST_MASK #undef HOST_MASK
#undef HKEY_DATALEN
#define MTYPE hash_netiface6
#define PF 6 #define PF 6
#define HOST_MASK 128 #define HOST_MASK 128
#include <linux/netfilter/ipset/ip_set_ahash.h> #define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
#define IP_SET_EMIT_CREATE
static inline void #include "ip_set_hash_gen.h"
hash_netiface6_data_next(struct ip_set_hash *h,
const struct hash_netiface6_elem *d)
{
}
static int static int
hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
struct ip_set_hash *h = set->data; struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem data = { struct hash_netiface6_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK, .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
.elem = 1, .elem = 1,
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
int ret; int ret;
if (data.cidr == 0) if (e.cidr == 0)
return -EINVAL; return -EINVAL;
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK; e.cidr = HOST_MASK;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6_netmask(&data.ip, data.cidr); ip6_netmask(&e.ip, e.cidr);
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
...@@ -627,44 +554,46 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, ...@@ -627,44 +554,46 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
if (!nf_bridge) if (!nf_bridge)
return -EINVAL; return -EINVAL;
data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev); e.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
data.physdev = 1; e.physdev = 1;
#else #else
data.iface = NULL; e.iface = NULL;
#endif #endif
} else } else
data.iface = SRCDIR ? IFACE(in) : IFACE(out); e.iface = SRCDIR ? IFACE(in) : IFACE(out);
if (!data.iface) if (!e.iface)
return -EINVAL; return -EINVAL;
ret = iface_test(&h->rbtree, &data.iface); ret = iface_test(&h->rbtree, &e.iface);
if (adt == IPSET_ADD) { if (adt == IPSET_ADD) {
if (!ret) { if (!ret) {
ret = iface_add(&h->rbtree, &data.iface); ret = iface_add(&h->rbtree, &e.iface);
if (ret) if (ret)
return ret; return ret;
} }
} else if (!ret) } else if (!ret)
return ret; return ret;
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
struct ip_set_hash *h = set->data; struct hash_netiface *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem data = { .cidr = HOST_MASK, .elem = 1 }; struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
u32 timeout = h->timeout; struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
char iface[IFNAMSIZ]; char iface[IFNAMSIZ];
int ret; int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || if (unlikely(!tb[IPSET_ATTR_IP] ||
!tb[IPSET_ATTR_IFACE] || !tb[IPSET_ATTR_IFACE] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO])) if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
...@@ -672,28 +601,23 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -672,28 +601,23 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
if (tb[IPSET_ATTR_CIDR]) if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (data.cidr > HOST_MASK) if (e.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr); ip6_netmask(&e.ip, e.cidr);
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
data.iface = iface; e.iface = iface;
ret = iface_test(&h->rbtree, &data.iface); ret = iface_test(&h->rbtree, &e.iface);
if (adt == IPSET_ADD) { if (adt == IPSET_ADD) {
if (!ret) { if (!ret) {
ret = iface_add(&h->rbtree, &data.iface); ret = iface_add(&h->rbtree, &e.iface);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -703,90 +627,15 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -703,90 +627,15 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CADT_FLAGS]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV) if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1; e.physdev = 1;
if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH)) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
}
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
/* Create hash:ip type of sets */
static int
hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_HASHSIZE]) {
hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
if (hashsize < IPSET_MIMINAL_HASHSIZE)
hashsize = IPSET_MIMINAL_HASHSIZE;
} }
if (tb[IPSET_ATTR_MAXELEM]) ret = adtfn(set, &e, &ext, &ext, flags);
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem; return ip_set_enomatch(ret, flags, adt) ? 1 :
get_random_bytes(&h->initval, sizeof(h->initval)); ip_set_eexist(ret, flags) ? 0 : ret;
h->timeout = IPSET_NO_TIMEOUT;
h->ahash_max = AHASH_MAX_SIZE;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
return -ENOMEM;
}
h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
}
h->table->htable_bits = hbits;
h->rbtree = RB_ROOT;
set->data = h;
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
if (set->family == NFPROTO_IPV4)
hash_netiface4_gc_init(set);
else
hash_netiface6_gc_init(set);
} else {
set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_variant : &hash_netiface6_variant;
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
return 0;
} }
static struct ip_set_type hash_netiface_type __read_mostly = { static struct ip_set_type hash_netiface_type __read_mostly = {
...@@ -806,6 +655,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = { ...@@ -806,6 +655,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -816,6 +666,8 @@ static struct ip_set_type hash_netiface_type __read_mostly = { ...@@ -816,6 +666,8 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -20,14 +20,14 @@ ...@@ -20,14 +20,14 @@
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h> #include <linux/netfilter/ipset/ip_set_hash.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
/* 1 SCTP and UDPLITE support added */ /* 1 SCTP and UDPLITE support added */
/* 2 Range as input support for IPv4 added */ /* 2 Range as input support for IPv4 added */
#define REVISION_MAX 3 /* nomatch flag support added */ /* 3 nomatch flag support added */
#define REVISION_MAX 4 /* Counters support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
...@@ -35,15 +35,9 @@ IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX); ...@@ -35,15 +35,9 @@ IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_hash:net,port"); MODULE_ALIAS("ip_set_hash:net,port");
/* Type specific function prefix */ /* Type specific function prefix */
#define TYPE hash_netport #define HTYPE hash_netport
#define IP_SET_HASH_WITH_PROTO
static bool #define IP_SET_HASH_WITH_NETS
hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
#define hash_netport4_same_set hash_netport_same_set
#define hash_netport6_same_set hash_netport_same_set
/* The type variant functions: IPv4 */
/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
* However this way we have to store internally cidr - 1, * However this way we have to store internally cidr - 1,
...@@ -51,7 +45,9 @@ hash_netport_same_set(const struct ip_set *a, const struct ip_set *b); ...@@ -51,7 +45,9 @@ hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
*/ */
#define IP_SET_HASH_WITH_NETS_PACKED #define IP_SET_HASH_WITH_NETS_PACKED
/* Member elements without timeout */ /* IPv4 variants */
/* Member elements */
struct hash_netport4_elem { struct hash_netport4_elem {
__be32 ip; __be32 ip;
__be16 port; __be16 port;
...@@ -60,16 +56,36 @@ struct hash_netport4_elem { ...@@ -60,16 +56,36 @@ struct hash_netport4_elem {
u8 nomatch:1; u8 nomatch:1;
}; };
/* Member elements with timeout support */ struct hash_netport4t_elem {
struct hash_netport4_telem { __be32 ip;
__be16 port;
u8 proto;
u8 cidr:7;
u8 nomatch:1;
unsigned long timeout;
};
struct hash_netport4c_elem {
__be32 ip;
__be16 port;
u8 proto;
u8 cidr:7;
u8 nomatch:1;
struct ip_set_counter counter;
};
struct hash_netport4ct_elem {
__be32 ip; __be32 ip;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 cidr:7; u8 cidr:7;
u8 nomatch:1; u8 nomatch:1;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_netport4_data_equal(const struct hash_netport4_elem *ip1, hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
const struct hash_netport4_elem *ip2, const struct hash_netport4_elem *ip2,
...@@ -81,42 +97,22 @@ hash_netport4_data_equal(const struct hash_netport4_elem *ip1, ...@@ -81,42 +97,22 @@ hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
ip1->cidr == ip2->cidr; ip1->cidr == ip2->cidr;
} }
static inline bool static inline int
hash_netport4_data_isnull(const struct hash_netport4_elem *elem) hash_netport4_do_data_match(const struct hash_netport4_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_netport4_data_copy(struct hash_netport4_elem *dst,
const struct hash_netport4_elem *src)
{ {
dst->ip = src->ip; return elem->nomatch ? -ENOTEMPTY : 1;
dst->port = src->port;
dst->proto = src->proto;
dst->cidr = src->cidr;
dst->nomatch = src->nomatch;
} }
static inline void static inline void
hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags) hash_netport4_data_set_flags(struct hash_netport4_elem *elem, u32 flags)
{ {
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
} }
static inline void static inline void
hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags) hash_netport4_data_reset_flags(struct hash_netport4_elem *elem, u8 *flags)
{ {
if (dst->nomatch) { swap(*flags, elem->nomatch);
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline int
hash_netport4_data_match(const struct hash_netport4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
...@@ -126,12 +122,6 @@ hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr) ...@@ -126,12 +122,6 @@ hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
elem->cidr = cidr - 1; elem->cidr = cidr - 1;
} }
static inline void
hash_netport4_data_zero_out(struct hash_netport4_elem *elem)
{
elem->proto = 0;
}
static bool static bool
hash_netport4_data_list(struct sk_buff *skb, hash_netport4_data_list(struct sk_buff *skb,
const struct hash_netport4_elem *data) const struct hash_netport4_elem *data)
...@@ -151,77 +141,53 @@ hash_netport4_data_list(struct sk_buff *skb, ...@@ -151,77 +141,53 @@ hash_netport4_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_netport4_data_tlist(struct sk_buff *skb, hash_netport4_data_next(struct hash_netport4_elem *next,
const struct hash_netport4_elem *data) const struct hash_netport4_elem *d)
{ {
const struct hash_netport4_telem *tdata = next->ip = d->ip;
(const struct hash_netport4_telem *)data; next->port = d->port;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout))) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#define IP_SET_HASH_WITH_PROTO #define MTYPE hash_netport4
#define IP_SET_HASH_WITH_NETS
#define PF 4 #define PF 4
#define HOST_MASK 32 #define HOST_MASK 32
#include <linux/netfilter/ipset/ip_set_ahash.h> #include "ip_set_hash_gen.h"
static inline void
hash_netport4_data_next(struct ip_set_hash *h,
const struct hash_netport4_elem *d)
{
h->next.ip = d->ip;
h->next.port = d->port;
}
static int static int
hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = { struct hash_netport4_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1 .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK - 1; e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
data.ip &= ip_set_netmask(data.cidr + 1); e.ip &= ip_set_netmask(e.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = { .cidr = HOST_MASK - 1 }; struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 port, port_to, p = 0, ip = 0, ip_to, last; u32 port, port_to, p = 0, ip = 0, ip_to, last;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
u8 cidr; u8 cidr;
int ret; int ret;
...@@ -230,13 +196,16 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -230,13 +196,16 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
...@@ -244,47 +213,42 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -244,47 +213,42 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK) if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
data.cidr = cidr - 1; e.cidr = cidr - 1;
} }
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMP)) if (!(with_ports || e.proto == IPPROTO_ICMP))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1)); e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1));
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_enomatch(ret, flags, adt) ? 1 :
ip_set_eexist(ret, flags) ? 0 : ret;
} }
port = port_to = ntohs(data.port); port = port_to = ntohs(e.port);
if (tb[IPSET_ATTR_PORT_TO]) { if (tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port_to < port) if (port_to < port)
...@@ -298,21 +262,20 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -298,21 +262,20 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(ip, ip_to); swap(ip, ip_to);
if (ip + UINT_MAX == ip_to) if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE; return -IPSET_ERR_HASH_RANGE;
} else { } else
ip_set_mask_from_to(ip, ip_to, data.cidr + 1); ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
}
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
while (!after(ip, ip_to)) { while (!after(ip, ip_to)) {
data.ip = htonl(ip); e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &cidr); last = ip_set_range_to_cidr(ip, ip_to, &cidr);
data.cidr = cidr - 1; e.cidr = cidr - 1;
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port; : port;
for (; p <= port_to; p++) { for (; p <= port_to; p++) {
data.port = htons(p); e.port = htons(p);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -324,36 +287,46 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -324,36 +287,46 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
static bool /* IPv6 variants */
hash_netport_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct ip_set_hash *x = a->data;
const struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */ struct hash_netport6_elem {
return x->maxelem == y->maxelem && union nf_inet_addr ip;
x->timeout == y->timeout; __be16 port;
} u8 proto;
u8 cidr:7;
u8 nomatch:1;
};
/* The type variant functions: IPv6 */ struct hash_netport6t_elem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
u8 cidr:7;
u8 nomatch:1;
unsigned long timeout;
};
struct hash_netport6_elem { struct hash_netport6c_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 cidr:7; u8 cidr:7;
u8 nomatch:1; u8 nomatch:1;
struct ip_set_counter counter;
}; };
struct hash_netport6_telem { struct hash_netport6ct_elem {
union nf_inet_addr ip; union nf_inet_addr ip;
__be16 port; __be16 port;
u8 proto; u8 proto;
u8 cidr:7; u8 cidr:7;
u8 nomatch:1; u8 nomatch:1;
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
/* Common functions */
static inline bool static inline bool
hash_netport6_data_equal(const struct hash_netport6_elem *ip1, hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
const struct hash_netport6_elem *ip2, const struct hash_netport6_elem *ip2,
...@@ -365,53 +338,22 @@ hash_netport6_data_equal(const struct hash_netport6_elem *ip1, ...@@ -365,53 +338,22 @@ hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
ip1->cidr == ip2->cidr; ip1->cidr == ip2->cidr;
} }
static inline bool
hash_netport6_data_isnull(const struct hash_netport6_elem *elem)
{
return elem->proto == 0;
}
static inline void
hash_netport6_data_copy(struct hash_netport6_elem *dst,
const struct hash_netport6_elem *src)
{
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
{
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
static inline void
hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags)
{
if (dst->nomatch) {
*flags = IPSET_FLAG_NOMATCH;
dst->nomatch = 0;
}
}
static inline int static inline int
hash_netport6_data_match(const struct hash_netport6_elem *elem) hash_netport6_do_data_match(const struct hash_netport6_elem *elem)
{ {
return elem->nomatch ? -ENOTEMPTY : 1; return elem->nomatch ? -ENOTEMPTY : 1;
} }
static inline void static inline void
hash_netport6_data_zero_out(struct hash_netport6_elem *elem) hash_netport6_data_set_flags(struct hash_netport6_elem *elem, u32 flags)
{ {
elem->proto = 0; elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
} }
static inline void static inline void
ip6_netmask(union nf_inet_addr *ip, u8 prefix) hash_netport6_data_reset_flags(struct hash_netport6_elem *elem, u8 *flags)
{ {
ip->ip6[0] &= ip_set_netmask6(prefix)[0]; swap(*flags, elem->nomatch);
ip->ip6[1] &= ip_set_netmask6(prefix)[1];
ip->ip6[2] &= ip_set_netmask6(prefix)[2];
ip->ip6[3] &= ip_set_netmask6(prefix)[3];
} }
static inline void static inline void
...@@ -440,76 +382,57 @@ hash_netport6_data_list(struct sk_buff *skb, ...@@ -440,76 +382,57 @@ hash_netport6_data_list(struct sk_buff *skb,
return 1; return 1;
} }
static bool static inline void
hash_netport6_data_tlist(struct sk_buff *skb, hash_netport6_data_next(struct hash_netport4_elem *next,
const struct hash_netport6_elem *data) const struct hash_netport6_elem *d)
{ {
const struct hash_netport6_telem *e = next->port = d->port;
(const struct hash_netport6_telem *)data;
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout))) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
} }
#undef MTYPE
#undef PF #undef PF
#undef HOST_MASK #undef HOST_MASK
#define MTYPE hash_netport6
#define PF 6 #define PF 6
#define HOST_MASK 128 #define HOST_MASK 128
#include <linux/netfilter/ipset/ip_set_ahash.h> #define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static inline void
hash_netport6_data_next(struct ip_set_hash *h,
const struct hash_netport6_elem *d)
{
h->next.port = d->port;
}
static int static int
hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) enum ipset_adt adt, struct ip_set_adt_opt *opt)
{ {
const struct ip_set_hash *h = set->data; const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = { struct hash_netport6_elem e = {
.cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1, .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
}; };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
if (adt == IPSET_TEST) if (adt == IPSET_TEST)
data.cidr = HOST_MASK - 1; e.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto)) &e.port, &e.proto))
return -EINVAL; return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
ip6_netmask(&data.ip, data.cidr + 1); ip6_netmask(&e.ip, e.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
} }
static int static int
hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
const struct ip_set_hash *h = set->data; const struct hash_netport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = { .cidr = HOST_MASK - 1 }; struct hash_netport6_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
u32 port, port_to; u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false; bool with_ports = false;
u8 cidr; u8 cidr;
int ret; int ret;
...@@ -518,7 +441,9 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -518,7 +441,9 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO])) if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
...@@ -526,7 +451,8 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -526,7 +451,8 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
ip_set_get_extensions(set, tb, &ext);
if (ret) if (ret)
return ret; return ret;
...@@ -534,45 +460,40 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -534,45 +460,40 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK) if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR; return -IPSET_ERR_INVALID_CIDR;
data.cidr = cidr - 1; e.cidr = cidr - 1;
} }
ip6_netmask(&data.ip, data.cidr + 1); ip6_netmask(&e.ip, e.cidr + 1);
if (tb[IPSET_ATTR_PORT]) if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else else
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) { if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(data.proto); with_ports = ip_set_proto_with_ports(e.proto);
if (data.proto == 0) if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO; return -IPSET_ERR_INVALID_PROTO;
} else } else
return -IPSET_ERR_MISSING_PROTO; return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || data.proto == IPPROTO_ICMPV6)) if (!(with_ports || e.proto == IPPROTO_ICMPV6))
data.port = 0; e.port = 0;
if (tb[IPSET_ATTR_TIMEOUT]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
if (!with_timeout(h->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH) if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (cadt_flags << 16); flags |= (IPSET_FLAG_NOMATCH << 16);
} }
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_enomatch(ret, flags, adt) ? 1 :
ip_set_eexist(ret, flags) ? 0 : ret;
} }
port = ntohs(data.port); port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to) if (port > port_to)
swap(port, port_to); swap(port, port_to);
...@@ -580,8 +501,8 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -580,8 +501,8 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried) if (retried)
port = ntohs(h->next.port); port = ntohs(h->next.port);
for (; port <= port_to; port++) { for (; port <= port_to; port++) {
data.port = htons(port); e.port = htons(port);
ret = adtfn(set, &data, timeout, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
...@@ -591,80 +512,6 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -591,80 +512,6 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
} }
/* Create hash:ip type of sets */
static int
hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
struct ip_set_hash *h;
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
size_t hsize;
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_HASHSIZE]) {
hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
if (hashsize < IPSET_MIMINAL_HASHSIZE)
hashsize = IPSET_MIMINAL_HASHSIZE;
}
if (tb[IPSET_ATTR_MAXELEM])
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
* (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem;
get_random_bytes(&h->initval, sizeof(h->initval));
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
hsize = htable_size(hbits);
if (hsize == 0) {
kfree(h);
return -ENOMEM;
}
h->table = ip_set_alloc(hsize);
if (!h->table) {
kfree(h);
return -ENOMEM;
}
h->table->htable_bits = hbits;
set->data = h;
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_tvariant : &hash_netport6_tvariant;
if (set->family == NFPROTO_IPV4)
hash_netport4_gc_init(set);
else
hash_netport6_gc_init(set);
} else {
set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_variant : &hash_netport6_variant;
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
return 0;
}
static struct ip_set_type hash_netport_type __read_mostly = { static struct ip_set_type hash_netport_type __read_mostly = {
.name = "hash:net,port", .name = "hash:net,port",
.protocol = IPSET_PROTOCOL, .protocol = IPSET_PROTOCOL,
...@@ -681,6 +528,7 @@ static struct ip_set_type hash_netport_type __read_mostly = { ...@@ -681,6 +528,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP] = { .type = NLA_NESTED },
...@@ -692,6 +540,8 @@ static struct ip_set_type hash_netport_type __read_mostly = { ...@@ -692,6 +540,8 @@ static struct ip_set_type hash_netport_type __read_mostly = {
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> /* Copyright (C) 2008-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -13,30 +13,53 @@ ...@@ -13,30 +13,53 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_list.h> #include <linux/netfilter/ipset/ip_set_list.h>
#define REVISION_MIN 0 #define REVISION_MIN 0
#define REVISION_MAX 0 #define REVISION_MAX 1 /* Counters support added */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX); IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
MODULE_ALIAS("ip_set_list:set"); MODULE_ALIAS("ip_set_list:set");
/* Member elements without and with timeout */ /* Member elements */
struct set_elem { struct set_elem {
ip_set_id_t id; ip_set_id_t id;
}; };
struct set_telem { struct sett_elem {
ip_set_id_t id; struct {
ip_set_id_t id;
} __attribute__ ((aligned));
unsigned long timeout;
};
struct setc_elem {
struct {
ip_set_id_t id;
} __attribute__ ((aligned));
struct ip_set_counter counter;
};
struct setct_elem {
struct {
ip_set_id_t id;
} __attribute__ ((aligned));
struct ip_set_counter counter;
unsigned long timeout; unsigned long timeout;
}; };
struct set_adt_elem {
ip_set_id_t id;
ip_set_id_t refid;
int before;
};
/* Type structure */ /* Type structure */
struct list_set { struct list_set {
size_t dsize; /* element size */ size_t dsize; /* element size */
size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
u32 size; /* size of set list array */ u32 size; /* size of set list array */
u32 timeout; /* timeout value */ u32 timeout; /* timeout value */
struct timer_list gc; /* garbage collection */ struct timer_list gc; /* garbage collection */
...@@ -49,179 +72,311 @@ list_set_elem(const struct list_set *map, u32 id) ...@@ -49,179 +72,311 @@ list_set_elem(const struct list_set *map, u32 id)
return (struct set_elem *)((void *)map->members + id * map->dsize); return (struct set_elem *)((void *)map->members + id * map->dsize);
} }
static inline struct set_telem * #define ext_timeout(e, m) \
list_set_telem(const struct list_set *map, u32 id) (unsigned long *)((void *)(e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
{ #define ext_counter(e, m) \
return (struct set_telem *)((void *)map->members + id * map->dsize); (struct ip_set_counter *)((void *)(e) + (m)->offset[IPSET_OFFSET_COUNTER])
}
static inline bool static int
list_set_timeout(const struct list_set *map, u32 id) list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
{ {
const struct set_telem *elem = list_set_telem(map, id); struct list_set *map = set->data;
struct set_elem *e;
u32 i, cmdflags = opt->cmdflags;
int ret;
return ip_set_timeout_test(elem->timeout); /* Don't lookup sub-counters at all */
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
for (i = 0; i < map->size; i++) {
e = list_set_elem(map, i);
if (e->id == IPSET_INVALID_ID)
return 0;
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map)))
continue;
ret = ip_set_test(e->id, skb, par, opt);
if (ret > 0) {
if (SET_WITH_COUNTER(set))
ip_set_update_counter(ext_counter(e, map),
ext, &opt->ext,
cmdflags);
return ret;
}
}
return 0;
} }
static inline bool static int
list_set_expired(const struct list_set *map, u32 id) list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
{ {
const struct set_telem *elem = list_set_telem(map, id); struct list_set *map = set->data;
struct set_elem *e;
u32 i;
int ret;
return ip_set_timeout_expired(elem->timeout); for (i = 0; i < map->size; i++) {
e = list_set_elem(map, i);
if (e->id == IPSET_INVALID_ID)
return 0;
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map)))
continue;
ret = ip_set_add(e->id, skb, par, opt);
if (ret == 0)
return ret;
}
return 0;
} }
/* Set list without and with timeout */
static int static int
list_set_kadt(struct ip_set *set, const struct sk_buff *skb, list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
enum ipset_adt adt, const struct ip_set_adt_opt *opt) struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
{ {
struct list_set *map = set->data; struct list_set *map = set->data;
struct set_elem *elem; struct set_elem *e;
u32 i; u32 i;
int ret; int ret;
for (i = 0; i < map->size; i++) { for (i = 0; i < map->size; i++) {
elem = list_set_elem(map, i); e = list_set_elem(map, i);
if (elem->id == IPSET_INVALID_ID) if (e->id == IPSET_INVALID_ID)
return 0; return 0;
if (with_timeout(map->timeout) && list_set_expired(map, i)) if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map)))
continue; continue;
switch (adt) { ret = ip_set_del(e->id, skb, par, opt);
case IPSET_TEST: if (ret == 0)
ret = ip_set_test(elem->id, skb, par, opt); return ret;
if (ret > 0) }
return ret; return 0;
break; }
case IPSET_ADD:
ret = ip_set_add(elem->id, skb, par, opt); static int
if (ret == 0) list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
return ret; const struct xt_action_param *par,
break; enum ipset_adt adt, struct ip_set_adt_opt *opt)
case IPSET_DEL: {
ret = ip_set_del(elem->id, skb, par, opt); struct list_set *map = set->data;
if (ret == 0) struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
return ret;
break; switch (adt) {
default: case IPSET_TEST:
break; return list_set_ktest(set, skb, par, opt, &ext);
} case IPSET_ADD:
return list_set_kadd(set, skb, par, opt, &ext);
case IPSET_DEL:
return list_set_kdel(set, skb, par, opt, &ext);
default:
break;
} }
return -EINVAL; return -EINVAL;
} }
static bool static bool
id_eq(const struct list_set *map, u32 i, ip_set_id_t id) id_eq(const struct ip_set *set, u32 i, ip_set_id_t id)
{ {
const struct set_elem *elem; const struct list_set *map = set->data;
const struct set_elem *e;
if (i < map->size) { if (i >= map->size)
elem = list_set_elem(map, i); return 0;
return elem->id == id;
e = list_set_elem(map, i);
return !!(e->id == id &&
!(SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map))));
}
static int
list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d,
const struct ip_set_ext *ext)
{
struct list_set *map = set->data;
struct set_elem *e = list_set_elem(map, i);
if (e->id != IPSET_INVALID_ID) {
if (i == map->size - 1)
/* Last element replaced: e.g. add new,before,last */
ip_set_put_byindex(e->id);
else {
struct set_elem *x = list_set_elem(map, map->size - 1);
/* Last element pushed off */
if (x->id != IPSET_INVALID_ID)
ip_set_put_byindex(x->id);
memmove(list_set_elem(map, i + 1), e,
map->dsize * (map->size - (i + 1)));
}
} }
e->id = d->id;
if (SET_WITH_TIMEOUT(set))
ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(e, map), ext);
return 0; return 0;
} }
static bool static int
id_eq_timeout(const struct list_set *map, u32 i, ip_set_id_t id) list_set_del(struct ip_set *set, u32 i)
{ {
const struct set_elem *elem; struct list_set *map = set->data;
struct set_elem *e = list_set_elem(map, i);
if (i < map->size) { ip_set_put_byindex(e->id);
elem = list_set_elem(map, i);
return !!(elem->id == id &&
!(with_timeout(map->timeout) &&
list_set_expired(map, i)));
}
if (i < map->size - 1)
memmove(e, list_set_elem(map, i + 1),
map->dsize * (map->size - (i + 1)));
/* Last element */
e = list_set_elem(map, map->size - 1);
e->id = IPSET_INVALID_ID;
return 0; return 0;
} }
static void static void
list_elem_add(struct list_set *map, u32 i, ip_set_id_t id) set_cleanup_entries(struct ip_set *set)
{ {
struct list_set *map = set->data;
struct set_elem *e; struct set_elem *e;
u32 i;
for (; i < map->size; i++) { for (i = 0; i < map->size; i++) {
e = list_set_elem(map, i); e = list_set_elem(map, i);
swap(e->id, id); if (e->id != IPSET_INVALID_ID &&
if (e->id == IPSET_INVALID_ID) ip_set_timeout_expired(ext_timeout(e, map)))
break; list_set_del(set, i);
} }
} }
static void static int
list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id, list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
unsigned long timeout) struct ip_set_ext *mext, u32 flags)
{ {
struct set_telem *e; struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e;
u32 i;
int ret;
for (; i < map->size; i++) { for (i = 0; i < map->size; i++) {
e = list_set_telem(map, i); e = list_set_elem(map, i);
swap(e->id, id);
swap(e->timeout, timeout);
if (e->id == IPSET_INVALID_ID) if (e->id == IPSET_INVALID_ID)
break; return 0;
else if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map)))
continue;
else if (e->id != d->id)
continue;
if (d->before == 0)
return 1;
else if (d->before > 0)
ret = id_eq(set, i + 1, d->refid);
else
ret = i > 0 && id_eq(set, i - 1, d->refid);
return ret;
} }
return 0;
} }
static int static int
list_set_add(struct list_set *map, u32 i, ip_set_id_t id, list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
unsigned long timeout) struct ip_set_ext *mext, u32 flags)
{ {
const struct set_elem *e = list_set_elem(map, i); struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e;
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 i, ret = 0;
if (e->id != IPSET_INVALID_ID) { /* Check already added element */
const struct set_elem *x = list_set_elem(map, map->size - 1); for (i = 0; i < map->size; i++) {
e = list_set_elem(map, i);
if (e->id == IPSET_INVALID_ID)
goto insert;
else if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map)))
continue;
else if (e->id != d->id)
continue;
/* Last element replaced or pushed off */ if ((d->before > 1 && !id_eq(set, i + 1, d->refid)) ||
if (x->id != IPSET_INVALID_ID) (d->before < 0 &&
ip_set_put_byindex(x->id); (i == 0 || !id_eq(set, i - 1, d->refid))))
/* Before/after doesn't match */
return -IPSET_ERR_REF_EXIST;
if (!flag_exist)
/* Can't re-add */
return -IPSET_ERR_EXIST;
/* Update extensions */
if (SET_WITH_TIMEOUT(set))
ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(e, map), ext);
/* Set is already added to the list */
ip_set_put_byindex(d->id);
return 0;
}
insert:
ret = -IPSET_ERR_LIST_FULL;
for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
e = list_set_elem(map, i);
if (e->id == IPSET_INVALID_ID)
ret = d->before != 0 ? -IPSET_ERR_REF_EXIST
: list_set_add(set, i, d, ext);
else if (e->id != d->refid)
continue;
else if (d->before > 0)
ret = list_set_add(set, i, d, ext);
else if (i + 1 < map->size)
ret = list_set_add(set, i + 1, d, ext);
} }
if (with_timeout(map->timeout))
list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
else
list_elem_add(map, i, id);
return 0; return ret;
} }
static int static int
list_set_del(struct list_set *map, u32 i) list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags)
{ {
struct set_elem *a = list_set_elem(map, i), *b; struct list_set *map = set->data;
struct set_adt_elem *d = value;
ip_set_put_byindex(a->id); struct set_elem *e;
for (; i < map->size - 1; i++) {
b = list_set_elem(map, i + 1);
a->id = b->id;
if (with_timeout(map->timeout))
((struct set_telem *)a)->timeout =
((struct set_telem *)b)->timeout;
a = b;
if (a->id == IPSET_INVALID_ID)
break;
}
/* Last element */
a->id = IPSET_INVALID_ID;
return 0;
}
static void
cleanup_entries(struct list_set *map)
{
struct set_telem *e;
u32 i; u32 i;
for (i = 0; i < map->size; i++) { for (i = 0; i < map->size; i++) {
e = list_set_telem(map, i); e = list_set_elem(map, i);
if (e->id != IPSET_INVALID_ID && list_set_expired(map, i)) if (e->id == IPSET_INVALID_ID)
list_set_del(map, i); return d->before != 0 ? -IPSET_ERR_REF_EXIST
: -IPSET_ERR_EXIST;
else if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map)))
continue;
else if (e->id != d->id)
continue;
if (d->before == 0)
return list_set_del(set, i);
else if (d->before > 0) {
if (!id_eq(set, i + 1, d->refid))
return -IPSET_ERR_REF_EXIST;
return list_set_del(set, i);
} else if (i == 0 || !id_eq(set, i - 1, d->refid))
return -IPSET_ERR_REF_EXIST;
else
return list_set_del(set, i);
} }
return -IPSET_ERR_EXIST;
} }
static int static int
...@@ -229,26 +384,27 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -229,26 +384,27 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{ {
struct list_set *map = set->data; struct list_set *map = set->data;
bool with_timeout = with_timeout(map->timeout); ipset_adtfn adtfn = set->variant->adt[adt];
bool flag_exist = flags & IPSET_FLAG_EXIST; struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
int before = 0; struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
u32 timeout = map->timeout;
ip_set_id_t id, refid = IPSET_INVALID_ID;
const struct set_elem *elem;
struct ip_set *s; struct ip_set *s;
u32 i;
int ret = 0; int ret = 0;
if (unlikely(!tb[IPSET_ATTR_NAME] || if (unlikely(!tb[IPSET_ATTR_NAME] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s); ret = ip_set_get_extensions(set, tb, &ext);
if (id == IPSET_INVALID_ID) if (ret)
return ret;
e.id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
if (e.id == IPSET_INVALID_ID)
return -IPSET_ERR_NAME; return -IPSET_ERR_NAME;
/* "Loop detection" */ /* "Loop detection" */
if (s->type->features & IPSET_TYPE_NAME) { if (s->type->features & IPSET_TYPE_NAME) {
...@@ -258,115 +414,34 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[], ...@@ -258,115 +414,34 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CADT_FLAGS]) { if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
before = f & IPSET_FLAG_BEFORE; e.before = f & IPSET_FLAG_BEFORE;
} }
if (before && !tb[IPSET_ATTR_NAMEREF]) { if (e.before && !tb[IPSET_ATTR_NAMEREF]) {
ret = -IPSET_ERR_BEFORE; ret = -IPSET_ERR_BEFORE;
goto finish; goto finish;
} }
if (tb[IPSET_ATTR_NAMEREF]) { if (tb[IPSET_ATTR_NAMEREF]) {
refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]), e.refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
&s); &s);
if (refid == IPSET_INVALID_ID) { if (e.refid == IPSET_INVALID_ID) {
ret = -IPSET_ERR_NAMEREF; ret = -IPSET_ERR_NAMEREF;
goto finish; goto finish;
} }
if (!before) if (!e.before)
before = -1; e.before = -1;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout) {
ret = -IPSET_ERR_TIMEOUT;
goto finish;
}
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
} }
if (with_timeout && adt != IPSET_TEST) if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set))
cleanup_entries(map); set_cleanup_entries(set);
switch (adt) { ret = adtfn(set, &e, &ext, &ext, flags);
case IPSET_TEST:
for (i = 0; i < map->size && !ret; i++) {
elem = list_set_elem(map, i);
if (elem->id == IPSET_INVALID_ID ||
(before != 0 && i + 1 >= map->size))
break;
else if (with_timeout && list_set_expired(map, i))
continue;
else if (before > 0 && elem->id == id)
ret = id_eq_timeout(map, i + 1, refid);
else if (before < 0 && elem->id == refid)
ret = id_eq_timeout(map, i + 1, id);
else if (before == 0 && elem->id == id)
ret = 1;
}
break;
case IPSET_ADD:
for (i = 0; i < map->size; i++) {
elem = list_set_elem(map, i);
if (elem->id != id)
continue;
if (!(with_timeout && flag_exist)) {
ret = -IPSET_ERR_EXIST;
goto finish;
} else {
struct set_telem *e = list_set_telem(map, i);
if ((before > 1 &&
!id_eq(map, i + 1, refid)) ||
(before < 0 &&
(i == 0 || !id_eq(map, i - 1, refid)))) {
ret = -IPSET_ERR_EXIST;
goto finish;
}
e->timeout = ip_set_timeout_set(timeout);
ip_set_put_byindex(id);
ret = 0;
goto finish;
}
}
ret = -IPSET_ERR_LIST_FULL;
for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
elem = list_set_elem(map, i);
if (elem->id == IPSET_INVALID_ID)
ret = before != 0 ? -IPSET_ERR_REF_EXIST
: list_set_add(map, i, id, timeout);
else if (elem->id != refid)
continue;
else if (before > 0)
ret = list_set_add(map, i, id, timeout);
else if (i + 1 < map->size)
ret = list_set_add(map, i + 1, id, timeout);
}
break;
case IPSET_DEL:
ret = -IPSET_ERR_EXIST;
for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) {
elem = list_set_elem(map, i);
if (elem->id == IPSET_INVALID_ID) {
ret = before != 0 ? -IPSET_ERR_REF_EXIST
: -IPSET_ERR_EXIST;
break;
} else if (elem->id == id &&
(before == 0 ||
(before > 0 && id_eq(map, i + 1, refid))))
ret = list_set_del(map, i);
else if (elem->id == refid &&
before < 0 && id_eq(map, i + 1, id))
ret = list_set_del(map, i + 1);
}
break;
default:
break;
}
finish: finish:
if (refid != IPSET_INVALID_ID) if (e.refid != IPSET_INVALID_ID)
ip_set_put_byindex(refid); ip_set_put_byindex(e.refid);
if (adt != IPSET_ADD || ret) if (adt != IPSET_ADD || ret)
ip_set_put_byindex(id); ip_set_put_byindex(e.id);
return ip_set_eexist(ret, flags) ? 0 : ret; return ip_set_eexist(ret, flags) ? 0 : ret;
} }
...@@ -375,14 +450,14 @@ static void ...@@ -375,14 +450,14 @@ static void
list_set_flush(struct ip_set *set) list_set_flush(struct ip_set *set)
{ {
struct list_set *map = set->data; struct list_set *map = set->data;
struct set_elem *elem; struct set_elem *e;
u32 i; u32 i;
for (i = 0; i < map->size; i++) { for (i = 0; i < map->size; i++) {
elem = list_set_elem(map, i); e = list_set_elem(map, i);
if (elem->id != IPSET_INVALID_ID) { if (e->id != IPSET_INVALID_ID) {
ip_set_put_byindex(elem->id); ip_set_put_byindex(e->id);
elem->id = IPSET_INVALID_ID; e->id = IPSET_INVALID_ID;
} }
} }
} }
...@@ -392,7 +467,7 @@ list_set_destroy(struct ip_set *set) ...@@ -392,7 +467,7 @@ list_set_destroy(struct ip_set *set)
{ {
struct list_set *map = set->data; struct list_set *map = set->data;
if (with_timeout(map->timeout)) if (SET_WITH_TIMEOUT(set))
del_timer_sync(&map->gc); del_timer_sync(&map->gc);
list_set_flush(set); list_set_flush(set);
kfree(map); kfree(map);
...@@ -410,8 +485,11 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) ...@@ -410,8 +485,11 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
if (!nested) if (!nested)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
(with_timeout(map->timeout) && (SET_WITH_TIMEOUT(set) &&
nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) || nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
(SET_WITH_COUNTER(set) &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
htonl(IPSET_FLAG_WITH_COUNTERS))) ||
nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->size * map->dsize))) htonl(sizeof(*map) + map->size * map->dsize)))
...@@ -440,7 +518,8 @@ list_set_list(const struct ip_set *set, ...@@ -440,7 +518,8 @@ list_set_list(const struct ip_set *set,
e = list_set_elem(map, i); e = list_set_elem(map, i);
if (e->id == IPSET_INVALID_ID) if (e->id == IPSET_INVALID_ID)
goto finish; goto finish;
if (with_timeout(map->timeout) && list_set_expired(map, i)) if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, map)))
continue; continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA); nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) { if (!nested) {
...@@ -453,13 +532,14 @@ list_set_list(const struct ip_set *set, ...@@ -453,13 +532,14 @@ list_set_list(const struct ip_set *set,
if (nla_put_string(skb, IPSET_ATTR_NAME, if (nla_put_string(skb, IPSET_ATTR_NAME,
ip_set_name_byindex(e->id))) ip_set_name_byindex(e->id)))
goto nla_put_failure; goto nla_put_failure;
if (with_timeout(map->timeout)) { if (SET_WITH_TIMEOUT(set) &&
const struct set_telem *te = nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
(const struct set_telem *) e; htonl(ip_set_timeout_get(
__be32 to = htonl(ip_set_timeout_get(te->timeout)); ext_timeout(e, map)))))
if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to)) goto nla_put_failure;
goto nla_put_failure; if (SET_WITH_COUNTER(set) &&
} ip_set_put_counter(skb, ext_counter(e, map)))
goto nla_put_failure;
ipset_nest_end(skb, nested); ipset_nest_end(skb, nested);
} }
finish: finish:
...@@ -485,12 +565,18 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b) ...@@ -485,12 +565,18 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
const struct list_set *y = b->data; const struct list_set *y = b->data;
return x->size == y->size && return x->size == y->size &&
x->timeout == y->timeout; x->timeout == y->timeout &&
a->extensions == b->extensions;
} }
static const struct ip_set_type_variant list_set = { static const struct ip_set_type_variant set_variant = {
.kadt = list_set_kadt, .kadt = list_set_kadt,
.uadt = list_set_uadt, .uadt = list_set_uadt,
.adt = {
[IPSET_ADD] = list_set_uadd,
[IPSET_DEL] = list_set_udel,
[IPSET_TEST] = list_set_utest,
},
.destroy = list_set_destroy, .destroy = list_set_destroy,
.flush = list_set_flush, .flush = list_set_flush,
.head = list_set_head, .head = list_set_head,
...@@ -505,7 +591,7 @@ list_set_gc(unsigned long ul_set) ...@@ -505,7 +591,7 @@ list_set_gc(unsigned long ul_set)
struct list_set *map = set->data; struct list_set *map = set->data;
write_lock_bh(&set->lock); write_lock_bh(&set->lock);
cleanup_entries(map); set_cleanup_entries(set);
write_unlock_bh(&set->lock); write_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
...@@ -513,20 +599,20 @@ list_set_gc(unsigned long ul_set) ...@@ -513,20 +599,20 @@ list_set_gc(unsigned long ul_set)
} }
static void static void
list_set_gc_init(struct ip_set *set) list_set_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
{ {
struct list_set *map = set->data; struct list_set *map = set->data;
init_timer(&map->gc); init_timer(&map->gc);
map->gc.data = (unsigned long) set; map->gc.data = (unsigned long) set;
map->gc.function = list_set_gc; map->gc.function = gc;
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc); add_timer(&map->gc);
} }
/* Create list:set type of sets */ /* Create list:set type of sets */
static bool static struct list_set *
init_list_set(struct ip_set *set, u32 size, size_t dsize, init_list_set(struct ip_set *set, u32 size, size_t dsize,
unsigned long timeout) unsigned long timeout)
{ {
...@@ -536,7 +622,7 @@ init_list_set(struct ip_set *set, u32 size, size_t dsize, ...@@ -536,7 +622,7 @@ init_list_set(struct ip_set *set, u32 size, size_t dsize,
map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL); map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
if (!map) if (!map)
return false; return NULL;
map->size = size; map->size = size;
map->dsize = dsize; map->dsize = dsize;
...@@ -548,16 +634,19 @@ init_list_set(struct ip_set *set, u32 size, size_t dsize, ...@@ -548,16 +634,19 @@ init_list_set(struct ip_set *set, u32 size, size_t dsize,
e->id = IPSET_INVALID_ID; e->id = IPSET_INVALID_ID;
} }
return true; return map;
} }
static int static int
list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags) list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{ {
u32 size = IP_SET_LIST_DEFAULT_SIZE; struct list_set *map;
u32 size = IP_SET_LIST_DEFAULT_SIZE, cadt_flags = 0;
unsigned long timeout = 0;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) || if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL; return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_SIZE]) if (tb[IPSET_ATTR_SIZE])
...@@ -565,18 +654,46 @@ list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags) ...@@ -565,18 +654,46 @@ list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (size < IP_SET_LIST_MIN_SIZE) if (size < IP_SET_LIST_MIN_SIZE)
size = IP_SET_LIST_MIN_SIZE; size = IP_SET_LIST_MIN_SIZE;
if (tb[IPSET_ATTR_TIMEOUT]) { if (tb[IPSET_ATTR_CADT_FLAGS])
if (!init_list_set(set, size, sizeof(struct set_telem), cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]))) if (tb[IPSET_ATTR_TIMEOUT])
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &set_variant;
if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
set->extensions |= IPSET_EXT_COUNTER;
if (tb[IPSET_ATTR_TIMEOUT]) {
map = init_list_set(set, size,
sizeof(struct setct_elem), timeout);
if (!map)
return -ENOMEM;
set->extensions |= IPSET_EXT_TIMEOUT;
map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct setct_elem, timeout);
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct setct_elem, counter);
list_set_gc_init(set, list_set_gc);
} else {
map = init_list_set(set, size,
sizeof(struct setc_elem), 0);
if (!map)
return -ENOMEM;
map->offset[IPSET_OFFSET_COUNTER] =
offsetof(struct setc_elem, counter);
}
} else if (tb[IPSET_ATTR_TIMEOUT]) {
map = init_list_set(set, size,
sizeof(struct sett_elem), timeout);
if (!map)
return -ENOMEM; return -ENOMEM;
set->extensions |= IPSET_EXT_TIMEOUT;
list_set_gc_init(set); map->offset[IPSET_OFFSET_TIMEOUT] =
offsetof(struct sett_elem, timeout);
list_set_gc_init(set, list_set_gc);
} else { } else {
if (!init_list_set(set, size, sizeof(struct set_elem), map = init_list_set(set, size, sizeof(struct set_elem), 0);
IPSET_NO_TIMEOUT)) if (!map)
return -ENOMEM; return -ENOMEM;
} }
set->variant = &list_set;
return 0; return 0;
} }
...@@ -592,6 +709,7 @@ static struct ip_set_type list_set_type __read_mostly = { ...@@ -592,6 +709,7 @@ static struct ip_set_type list_set_type __read_mostly = {
.create_policy = { .create_policy = {
[IPSET_ATTR_SIZE] = { .type = NLA_U32 }, [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
}, },
.adt_policy = { .adt_policy = {
[IPSET_ATTR_NAME] = { .type = NLA_STRING, [IPSET_ATTR_NAME] = { .type = NLA_STRING,
...@@ -601,6 +719,8 @@ static struct ip_set_type list_set_type __read_mostly = { ...@@ -601,6 +719,8 @@ static struct ip_set_type list_set_type __read_mostly = {
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
}, },
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
...@@ -36,7 +36,7 @@ sctp_manip_pkt(struct sk_buff *skb, ...@@ -36,7 +36,7 @@ sctp_manip_pkt(struct sk_buff *skb,
{ {
struct sk_buff *frag; struct sk_buff *frag;
sctp_sctphdr_t *hdr; sctp_sctphdr_t *hdr;
__be32 crc32; __u32 crc32;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false; return false;
...@@ -55,8 +55,7 @@ sctp_manip_pkt(struct sk_buff *skb, ...@@ -55,8 +55,7 @@ sctp_manip_pkt(struct sk_buff *skb,
skb_walk_frags(skb, frag) skb_walk_frags(skb, frag)
crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag), crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
crc32); crc32);
crc32 = sctp_end_cksum(crc32); hdr->checksum = sctp_end_cksum(crc32);
hdr->checksum = crc32;
return true; return true;
} }
......
...@@ -45,7 +45,7 @@ void nf_unregister_queue_handler(void) ...@@ -45,7 +45,7 @@ void nf_unregister_queue_handler(void)
} }
EXPORT_SYMBOL(nf_unregister_queue_handler); EXPORT_SYMBOL(nf_unregister_queue_handler);
static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
{ {
/* Release those devices we held, or Alexey will kill me. */ /* Release those devices we held, or Alexey will kill me. */
if (entry->indev) if (entry->indev)
...@@ -65,12 +65,41 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) ...@@ -65,12 +65,41 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
/* Drop reference to owner of hook which queued us. */ /* Drop reference to owner of hook which queued us. */
module_put(entry->elem->owner); module_put(entry->elem->owner);
} }
EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
/* Bump dev refs so they don't vanish while packet is out */
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
{
if (!try_module_get(entry->elem->owner))
return false;
if (entry->indev)
dev_hold(entry->indev);
if (entry->outdev)
dev_hold(entry->outdev);
#ifdef CONFIG_BRIDGE_NETFILTER
if (entry->skb->nf_bridge) {
struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
struct net_device *physdev;
physdev = nf_bridge->physindev;
if (physdev)
dev_hold(physdev);
physdev = nf_bridge->physoutdev;
if (physdev)
dev_hold(physdev);
}
#endif
return true;
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
/* /*
* Any packet that leaves via this function must come back * Any packet that leaves via this function must come back
* through nf_reinject(). * through nf_reinject().
*/ */
static int __nf_queue(struct sk_buff *skb, int nf_queue(struct sk_buff *skb,
struct nf_hook_ops *elem, struct nf_hook_ops *elem,
u_int8_t pf, unsigned int hook, u_int8_t pf, unsigned int hook,
struct net_device *indev, struct net_device *indev,
...@@ -80,10 +109,6 @@ static int __nf_queue(struct sk_buff *skb, ...@@ -80,10 +109,6 @@ static int __nf_queue(struct sk_buff *skb,
{ {
int status = -ENOENT; int status = -ENOENT;
struct nf_queue_entry *entry = NULL; struct nf_queue_entry *entry = NULL;
#ifdef CONFIG_BRIDGE_NETFILTER
struct net_device *physindev;
struct net_device *physoutdev;
#endif
const struct nf_afinfo *afinfo; const struct nf_afinfo *afinfo;
const struct nf_queue_handler *qh; const struct nf_queue_handler *qh;
...@@ -114,28 +139,13 @@ static int __nf_queue(struct sk_buff *skb, ...@@ -114,28 +139,13 @@ static int __nf_queue(struct sk_buff *skb,
.indev = indev, .indev = indev,
.outdev = outdev, .outdev = outdev,
.okfn = okfn, .okfn = okfn,
.size = sizeof(*entry) + afinfo->route_key_size,
}; };
/* If it's going away, ignore hook. */ if (!nf_queue_entry_get_refs(entry)) {
if (!try_module_get(entry->elem->owner)) {
status = -ECANCELED; status = -ECANCELED;
goto err_unlock; goto err_unlock;
} }
/* Bump dev refs so they don't vanish while packet is out */
if (indev)
dev_hold(indev);
if (outdev)
dev_hold(outdev);
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge) {
physindev = skb->nf_bridge->physindev;
if (physindev)
dev_hold(physindev);
physoutdev = skb->nf_bridge->physoutdev;
if (physoutdev)
dev_hold(physoutdev);
}
#endif
skb_dst_force(skb); skb_dst_force(skb);
afinfo->saveroute(skb, entry); afinfo->saveroute(skb, entry);
status = qh->outfn(entry, queuenum); status = qh->outfn(entry, queuenum);
...@@ -156,87 +166,6 @@ static int __nf_queue(struct sk_buff *skb, ...@@ -156,87 +166,6 @@ static int __nf_queue(struct sk_buff *skb,
return status; return status;
} }
#ifdef CONFIG_BRIDGE_NETFILTER
/* When called from bridge netfilter, skb->data must point to MAC header
* before calling skb_gso_segment(). Else, original MAC header is lost
* and segmented skbs will be sent to wrong destination.
*/
static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
{
if (skb->nf_bridge)
__skb_push(skb, skb->network_header - skb->mac_header);
}
static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
{
if (skb->nf_bridge)
__skb_pull(skb, skb->network_header - skb->mac_header);
}
#else
#define nf_bridge_adjust_skb_data(s) do {} while (0)
#define nf_bridge_adjust_segmented_data(s) do {} while (0)
#endif
int nf_queue(struct sk_buff *skb,
struct nf_hook_ops *elem,
u_int8_t pf, unsigned int hook,
struct net_device *indev,
struct net_device *outdev,
int (*okfn)(struct sk_buff *),
unsigned int queuenum)
{
struct sk_buff *segs;
int err = -EINVAL;
unsigned int queued;
if (!skb_is_gso(skb))
return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
queuenum);
switch (pf) {
case NFPROTO_IPV4:
skb->protocol = htons(ETH_P_IP);
break;
case NFPROTO_IPV6:
skb->protocol = htons(ETH_P_IPV6);
break;
}
nf_bridge_adjust_skb_data(skb);
segs = skb_gso_segment(skb, 0);
/* Does not use PTR_ERR to limit the number of error codes that can be
* returned by nf_queue. For instance, callers rely on -ECANCELED to mean
* 'ignore this hook'.
*/
if (IS_ERR(segs))
goto out_err;
queued = 0;
err = 0;
do {
struct sk_buff *nskb = segs->next;
segs->next = NULL;
if (err == 0) {
nf_bridge_adjust_segmented_data(segs);
err = __nf_queue(segs, elem, pf, hook, indev,
outdev, okfn, queuenum);
}
if (err == 0)
queued++;
else
kfree_skb(segs);
segs = nskb;
} while (segs);
if (queued) {
kfree_skb(skb);
return 0;
}
out_err:
nf_bridge_adjust_segmented_data(skb);
return err;
}
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{ {
struct sk_buff *skb = entry->skb; struct sk_buff *skb = entry->skb;
...@@ -276,9 +205,9 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ...@@ -276,9 +205,9 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
local_bh_enable(); local_bh_enable();
break; break;
case NF_QUEUE: case NF_QUEUE:
err = __nf_queue(skb, elem, entry->pf, entry->hook, err = nf_queue(skb, elem, entry->pf, entry->hook,
entry->indev, entry->outdev, entry->okfn, entry->indev, entry->outdev, entry->okfn,
verdict >> NF_VERDICT_QBITS); verdict >> NF_VERDICT_QBITS);
if (err < 0) { if (err < 0) {
if (err == -ECANCELED) if (err == -ECANCELED)
goto next_hook; goto next_hook;
......
...@@ -272,6 +272,18 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) ...@@ -272,6 +272,18 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
skb_shinfo(to)->nr_frags = j; skb_shinfo(to)->nr_frags = j;
} }
static int nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet)
{
__u32 flags = 0;
if (packet->ip_summed == CHECKSUM_PARTIAL)
flags = NFQA_SKB_CSUMNOTREADY;
if (skb_is_gso(packet))
flags |= NFQA_SKB_GSO;
return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
}
static struct sk_buff * static struct sk_buff *
nfqnl_build_packet_message(struct nfqnl_instance *queue, nfqnl_build_packet_message(struct nfqnl_instance *queue,
struct nf_queue_entry *entry, struct nf_queue_entry *entry,
...@@ -301,6 +313,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ...@@ -301,6 +313,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
#endif #endif
+ nla_total_size(sizeof(u_int32_t)) /* mark */ + nla_total_size(sizeof(u_int32_t)) /* mark */
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
+ nla_total_size(sizeof(u_int32_t)) /* skbinfo */
+ nla_total_size(sizeof(u_int32_t)); /* cap_len */ + nla_total_size(sizeof(u_int32_t)); /* cap_len */
if (entskb->tstamp.tv64) if (entskb->tstamp.tv64)
...@@ -314,7 +327,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ...@@ -314,7 +327,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
break; break;
case NFQNL_COPY_PACKET: case NFQNL_COPY_PACKET:
if (entskb->ip_summed == CHECKSUM_PARTIAL && if (!(queue->flags & NFQA_CFG_F_GSO) &&
entskb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(entskb)) skb_checksum_help(entskb))
return NULL; return NULL;
...@@ -454,6 +468,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ...@@ -454,6 +468,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
goto nla_put_failure; goto nla_put_failure;
if (nfqnl_put_packet_info(skb, entskb))
goto nla_put_failure;
if (data_len) { if (data_len) {
struct nlattr *nla; struct nlattr *nla;
...@@ -477,28 +494,13 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ...@@ -477,28 +494,13 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
} }
static int static int
nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry)
{ {
struct sk_buff *nskb; struct sk_buff *nskb;
struct nfqnl_instance *queue;
int err = -ENOBUFS; int err = -ENOBUFS;
__be32 *packet_id_ptr; __be32 *packet_id_ptr;
int failopen = 0; int failopen = 0;
struct net *net = dev_net(entry->indev ?
entry->indev : entry->outdev);
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
/* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(q, queuenum);
if (!queue) {
err = -ESRCH;
goto err_out;
}
if (queue->copy_mode == NFQNL_COPY_NONE) {
err = -EINVAL;
goto err_out;
}
nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr); nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
if (nskb == NULL) { if (nskb == NULL) {
...@@ -547,6 +549,141 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -547,6 +549,141 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
return err; return err;
} }
static struct nf_queue_entry *
nf_queue_entry_dup(struct nf_queue_entry *e)
{
struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
if (entry) {
if (nf_queue_entry_get_refs(entry))
return entry;
kfree(entry);
}
return NULL;
}
#ifdef CONFIG_BRIDGE_NETFILTER
/* When called from bridge netfilter, skb->data must point to MAC header
* before calling skb_gso_segment(). Else, original MAC header is lost
* and segmented skbs will be sent to wrong destination.
*/
static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
{
if (skb->nf_bridge)
__skb_push(skb, skb->network_header - skb->mac_header);
}
static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
{
if (skb->nf_bridge)
__skb_pull(skb, skb->network_header - skb->mac_header);
}
#else
#define nf_bridge_adjust_skb_data(s) do {} while (0)
#define nf_bridge_adjust_segmented_data(s) do {} while (0)
#endif
static void free_entry(struct nf_queue_entry *entry)
{
nf_queue_entry_release_refs(entry);
kfree(entry);
}
static int
__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
struct sk_buff *skb, struct nf_queue_entry *entry)
{
int ret = -ENOMEM;
struct nf_queue_entry *entry_seg;
nf_bridge_adjust_segmented_data(skb);
if (skb->next == NULL) { /* last packet, no need to copy entry */
struct sk_buff *gso_skb = entry->skb;
entry->skb = skb;
ret = __nfqnl_enqueue_packet(net, queue, entry);
if (ret)
entry->skb = gso_skb;
return ret;
}
skb->next = NULL;
entry_seg = nf_queue_entry_dup(entry);
if (entry_seg) {
entry_seg->skb = skb;
ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
if (ret)
free_entry(entry_seg);
}
return ret;
}
static int
nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
unsigned int queued;
struct nfqnl_instance *queue;
struct sk_buff *skb, *segs;
int err = -ENOBUFS;
struct net *net = dev_net(entry->indev ?
entry->indev : entry->outdev);
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
/* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(q, queuenum);
if (!queue)
return -ESRCH;
if (queue->copy_mode == NFQNL_COPY_NONE)
return -EINVAL;
if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb))
return __nfqnl_enqueue_packet(net, queue, entry);
skb = entry->skb;
switch (entry->pf) {
case NFPROTO_IPV4:
skb->protocol = htons(ETH_P_IP);
break;
case NFPROTO_IPV6:
skb->protocol = htons(ETH_P_IPV6);
break;
}
nf_bridge_adjust_skb_data(skb);
segs = skb_gso_segment(skb, 0);
/* Does not use PTR_ERR to limit the number of error codes that can be
* returned by nf_queue. For instance, callers rely on -ECANCELED to
* mean 'ignore this hook'.
*/
if (IS_ERR(segs))
goto out_err;
queued = 0;
err = 0;
do {
struct sk_buff *nskb = segs->next;
if (err == 0)
err = __nfqnl_enqueue_packet_gso(net, queue,
segs, entry);
if (err == 0)
queued++;
else
kfree_skb(segs);
segs = nskb;
} while (segs);
if (queued) {
if (err) /* some segments are already queued */
free_entry(entry);
kfree_skb(skb);
return 0;
}
out_err:
nf_bridge_adjust_segmented_data(skb);
return err;
}
static int static int
nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff) nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
{ {
......
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de> * Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se> * Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -30,7 +30,7 @@ MODULE_ALIAS("ip6t_SET"); ...@@ -30,7 +30,7 @@ MODULE_ALIAS("ip6t_SET");
static inline int static inline int
match_set(ip_set_id_t index, const struct sk_buff *skb, match_set(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, const struct xt_action_param *par,
const struct ip_set_adt_opt *opt, int inv) struct ip_set_adt_opt *opt, int inv)
{ {
if (ip_set_test(index, skb, par, opt)) if (ip_set_test(index, skb, par, opt))
inv = !inv; inv = !inv;
...@@ -38,20 +38,12 @@ match_set(ip_set_id_t index, const struct sk_buff *skb, ...@@ -38,20 +38,12 @@ match_set(ip_set_id_t index, const struct sk_buff *skb,
} }
#define ADT_OPT(n, f, d, fs, cfs, t) \ #define ADT_OPT(n, f, d, fs, cfs, t) \
const struct ip_set_adt_opt n = { \
.family = f, \
.dim = d, \
.flags = fs, \
.cmdflags = cfs, \
.timeout = t, \
}
#define ADT_MOPT(n, f, d, fs, cfs, t) \
struct ip_set_adt_opt n = { \ struct ip_set_adt_opt n = { \
.family = f, \ .family = f, \
.dim = d, \ .dim = d, \
.flags = fs, \ .flags = fs, \
.cmdflags = cfs, \ .cmdflags = cfs, \
.timeout = t, \ .ext.timeout = t, \
} }
/* Revision 0 interface: backward compatible with netfilter/iptables */ /* Revision 0 interface: backward compatible with netfilter/iptables */
...@@ -197,6 +189,9 @@ set_match_v1(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -197,6 +189,9 @@ set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
ADT_OPT(opt, par->family, info->match_set.dim, ADT_OPT(opt, par->family, info->match_set.dim,
info->match_set.flags, 0, UINT_MAX); info->match_set.flags, 0, UINT_MAX);
if (opt.flags & IPSET_RETURN_NOMATCH)
opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
return match_set(info->match_set.index, skb, par, &opt, return match_set(info->match_set.index, skb, par, &opt,
info->match_set.flags & IPSET_INV_MATCH); info->match_set.flags & IPSET_INV_MATCH);
} }
...@@ -305,15 +300,15 @@ static unsigned int ...@@ -305,15 +300,15 @@ static unsigned int
set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
{ {
const struct xt_set_info_target_v2 *info = par->targinfo; const struct xt_set_info_target_v2 *info = par->targinfo;
ADT_MOPT(add_opt, par->family, info->add_set.dim, ADT_OPT(add_opt, par->family, info->add_set.dim,
info->add_set.flags, info->flags, info->timeout); info->add_set.flags, info->flags, info->timeout);
ADT_OPT(del_opt, par->family, info->del_set.dim, ADT_OPT(del_opt, par->family, info->del_set.dim,
info->del_set.flags, 0, UINT_MAX); info->del_set.flags, 0, UINT_MAX);
/* Normalize to fit into jiffies */ /* Normalize to fit into jiffies */
if (add_opt.timeout != IPSET_NO_TIMEOUT && if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
add_opt.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
add_opt.timeout = UINT_MAX/MSEC_PER_SEC; add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
if (info->add_set.index != IPSET_INVALID_ID) if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt); ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID) if (info->del_set.index != IPSET_INVALID_ID)
...@@ -325,6 +320,52 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -325,6 +320,52 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
#define set_target_v2_checkentry set_target_v1_checkentry #define set_target_v2_checkentry set_target_v1_checkentry
#define set_target_v2_destroy set_target_v1_destroy #define set_target_v2_destroy set_target_v1_destroy
/* Revision 3 match */
static bool
match_counter(u64 counter, const struct ip_set_counter_match *info)
{
switch (info->op) {
case IPSET_COUNTER_NONE:
return true;
case IPSET_COUNTER_EQ:
return counter == info->value;
case IPSET_COUNTER_NE:
return counter != info->value;
case IPSET_COUNTER_LT:
return counter < info->value;
case IPSET_COUNTER_GT:
return counter > info->value;
}
return false;
}
static bool
set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_set_info_match_v3 *info = par->matchinfo;
ADT_OPT(opt, par->family, info->match_set.dim,
info->match_set.flags, info->flags, UINT_MAX);
int ret;
if (info->packets.op != IPSET_COUNTER_NONE ||
info->bytes.op != IPSET_COUNTER_NONE)
opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
ret = match_set(info->match_set.index, skb, par, &opt,
info->match_set.flags & IPSET_INV_MATCH);
if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
return ret;
if (!match_counter(opt.ext.packets, &info->packets))
return 0;
return match_counter(opt.ext.bytes, &info->bytes);
}
#define set_match_v3_checkentry set_match_v1_checkentry
#define set_match_v3_destroy set_match_v1_destroy
static struct xt_match set_matches[] __read_mostly = { static struct xt_match set_matches[] __read_mostly = {
{ {
.name = "set", .name = "set",
...@@ -377,6 +418,27 @@ static struct xt_match set_matches[] __read_mostly = { ...@@ -377,6 +418,27 @@ static struct xt_match set_matches[] __read_mostly = {
.destroy = set_match_v1_destroy, .destroy = set_match_v1_destroy,
.me = THIS_MODULE .me = THIS_MODULE
}, },
/* counters support: update, match */
{
.name = "set",
.family = NFPROTO_IPV4,
.revision = 3,
.match = set_match_v3,
.matchsize = sizeof(struct xt_set_info_match_v3),
.checkentry = set_match_v3_checkentry,
.destroy = set_match_v3_destroy,
.me = THIS_MODULE
},
{
.name = "set",
.family = NFPROTO_IPV6,
.revision = 3,
.match = set_match_v3,
.matchsize = sizeof(struct xt_set_info_match_v3),
.checkentry = set_match_v3_checkentry,
.destroy = set_match_v3_destroy,
.me = THIS_MODULE
},
}; };
static struct xt_target set_targets[] __read_mostly = { static struct xt_target set_targets[] __read_mostly = {
......
...@@ -83,7 +83,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em, ...@@ -83,7 +83,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
opt.dim = set->dim; opt.dim = set->dim;
opt.flags = set->flags; opt.flags = set->flags;
opt.cmdflags = 0; opt.cmdflags = 0;
opt.timeout = ~0u; opt.ext.timeout = ~0u;
network_offset = skb_network_offset(skb); network_offset = skb_network_offset(skb);
skb_pull(skb, network_offset); skb_pull(skb, network_offset);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment