Commit 9a255a06 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following batch contains Netfilter updates for net-next:

1) Split flowtable workqueues per events, from Oz Shlomo.

2) fall-through warnings for clang, from Gustavo A. R. Silva

3) Remove unused declaration in conntrack, from YueHaibing.

4) Consolidate skb_try_make_writable() in flowtable datapath,
   simplify some of the existing codebase.

5) Call dst_check() to fall back to static classic forwarding path.

6) Update table flags from commit phase.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents add2d736 0ce7cf41
...@@ -4,7 +4,4 @@ ...@@ -4,7 +4,4 @@
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
#include <linux/sysctl.h>
extern struct ctl_table nf_ct_ipv6_sysctl_table[];
#endif /* _NF_CONNTRACK_IPV6_H*/ #endif /* _NF_CONNTRACK_IPV6_H*/
...@@ -86,8 +86,8 @@ static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable) ...@@ -86,8 +86,8 @@ static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
enum flow_offload_tuple_dir { enum flow_offload_tuple_dir {
FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL, FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY, FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
}; };
#define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
struct flow_offload_tuple { struct flow_offload_tuple {
union { union {
...@@ -229,12 +229,12 @@ void nf_flow_table_free(struct nf_flowtable *flow_table); ...@@ -229,12 +229,12 @@ void nf_flow_table_free(struct nf_flowtable *flow_table);
void flow_offload_teardown(struct flow_offload *flow); void flow_offload_teardown(struct flow_offload *flow);
int nf_flow_snat_port(const struct flow_offload *flow, void nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff, struct sk_buff *skb, unsigned int thoff,
u8 protocol, enum flow_offload_tuple_dir dir); u8 protocol, enum flow_offload_tuple_dir dir);
int nf_flow_dnat_port(const struct flow_offload *flow, void nf_flow_dnat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff, struct sk_buff *skb, unsigned int thoff,
u8 protocol, enum flow_offload_tuple_dir dir); u8 protocol, enum flow_offload_tuple_dir dir);
struct flow_ports { struct flow_ports {
__be16 source, dest; __be16 source, dest;
......
...@@ -1498,13 +1498,16 @@ struct nft_trans_chain { ...@@ -1498,13 +1498,16 @@ struct nft_trans_chain {
struct nft_trans_table { struct nft_trans_table {
bool update; bool update;
bool enable; u8 state;
u32 flags;
}; };
#define nft_trans_table_update(trans) \ #define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update) (((struct nft_trans_table *)trans->data)->update)
#define nft_trans_table_enable(trans) \ #define nft_trans_table_state(trans) \
(((struct nft_trans_table *)trans->data)->enable) (((struct nft_trans_table *)trans->data)->state)
#define nft_trans_table_flags(trans) \
(((struct nft_trans_table *)trans->data)->flags)
struct nft_trans_elem { struct nft_trans_elem {
struct nft_set *set; struct nft_set *set;
......
...@@ -397,6 +397,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb, ...@@ -397,6 +397,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
msg = "not picking up existing connection "; msg = "not picking up existing connection ";
goto out_invalid; goto out_invalid;
} }
break;
case CT_DCCP_REQUEST: case CT_DCCP_REQUEST:
break; break;
case CT_DCCP_INVALID: case CT_DCCP_INVALID:
......
...@@ -389,29 +389,20 @@ static void nf_flow_offload_work_gc(struct work_struct *work) ...@@ -389,29 +389,20 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
} }
static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, __be16 port, __be16 new_port)
__be16 port, __be16 new_port)
{ {
struct tcphdr *tcph; struct tcphdr *tcph;
if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
return -1;
tcph = (void *)(skb_network_header(skb) + thoff); tcph = (void *)(skb_network_header(skb) + thoff);
inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false); inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
return 0;
} }
static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
__be16 port, __be16 new_port) __be16 port, __be16 new_port)
{ {
struct udphdr *udph; struct udphdr *udph;
if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
return -1;
udph = (void *)(skb_network_header(skb) + thoff); udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace2(&udph->check, skb, port, inet_proto_csum_replace2(&udph->check, skb, port,
...@@ -419,37 +410,28 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, ...@@ -419,37 +410,28 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
if (!udph->check) if (!udph->check)
udph->check = CSUM_MANGLED_0; udph->check = CSUM_MANGLED_0;
} }
return 0;
} }
static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff, static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
u8 protocol, __be16 port, __be16 new_port) u8 protocol, __be16 port, __be16 new_port)
{ {
switch (protocol) { switch (protocol) {
case IPPROTO_TCP: case IPPROTO_TCP:
if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0) nf_flow_nat_port_tcp(skb, thoff, port, new_port);
return NF_DROP;
break; break;
case IPPROTO_UDP: case IPPROTO_UDP:
if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0) nf_flow_nat_port_udp(skb, thoff, port, new_port);
return NF_DROP;
break; break;
} }
return 0;
} }
int nf_flow_snat_port(const struct flow_offload *flow, void nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff, struct sk_buff *skb, unsigned int thoff,
u8 protocol, enum flow_offload_tuple_dir dir) u8 protocol, enum flow_offload_tuple_dir dir)
{ {
struct flow_ports *hdr; struct flow_ports *hdr;
__be16 port, new_port; __be16 port, new_port;
if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
return -1;
hdr = (void *)(skb_network_header(skb) + thoff); hdr = (void *)(skb_network_header(skb) + thoff);
switch (dir) { switch (dir) {
...@@ -463,24 +445,19 @@ int nf_flow_snat_port(const struct flow_offload *flow, ...@@ -463,24 +445,19 @@ int nf_flow_snat_port(const struct flow_offload *flow,
new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
hdr->dest = new_port; hdr->dest = new_port;
break; break;
default:
return -1;
} }
return nf_flow_nat_port(skb, thoff, protocol, port, new_port); nf_flow_nat_port(skb, thoff, protocol, port, new_port);
} }
EXPORT_SYMBOL_GPL(nf_flow_snat_port); EXPORT_SYMBOL_GPL(nf_flow_snat_port);
int nf_flow_dnat_port(const struct flow_offload *flow, void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
struct sk_buff *skb, unsigned int thoff, unsigned int thoff, u8 protocol,
u8 protocol, enum flow_offload_tuple_dir dir) enum flow_offload_tuple_dir dir)
{ {
struct flow_ports *hdr; struct flow_ports *hdr;
__be16 port, new_port; __be16 port, new_port;
if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
return -1;
hdr = (void *)(skb_network_header(skb) + thoff); hdr = (void *)(skb_network_header(skb) + thoff);
switch (dir) { switch (dir) {
...@@ -494,11 +471,9 @@ int nf_flow_dnat_port(const struct flow_offload *flow, ...@@ -494,11 +471,9 @@ int nf_flow_dnat_port(const struct flow_offload *flow,
new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port; new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
hdr->source = new_port; hdr->source = new_port;
break; break;
default:
return -1;
} }
return nf_flow_nat_port(skb, thoff, protocol, port, new_port); nf_flow_nat_port(skb, thoff, protocol, port, new_port);
} }
EXPORT_SYMBOL_GPL(nf_flow_dnat_port); EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
......
This diff is collapsed.
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
#include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_tuple.h>
static struct workqueue_struct *nf_flow_offload_wq; static struct workqueue_struct *nf_flow_offload_add_wq;
static struct workqueue_struct *nf_flow_offload_del_wq;
static struct workqueue_struct *nf_flow_offload_stats_wq;
struct flow_offload_work { struct flow_offload_work {
struct list_head list; struct list_head list;
...@@ -826,7 +828,12 @@ static void flow_offload_work_handler(struct work_struct *work) ...@@ -826,7 +828,12 @@ static void flow_offload_work_handler(struct work_struct *work)
static void flow_offload_queue_work(struct flow_offload_work *offload) static void flow_offload_queue_work(struct flow_offload_work *offload)
{ {
queue_work(nf_flow_offload_wq, &offload->work); if (offload->cmd == FLOW_CLS_REPLACE)
queue_work(nf_flow_offload_add_wq, &offload->work);
else if (offload->cmd == FLOW_CLS_DESTROY)
queue_work(nf_flow_offload_del_wq, &offload->work);
else
queue_work(nf_flow_offload_stats_wq, &offload->work);
} }
static struct flow_offload_work * static struct flow_offload_work *
...@@ -898,8 +905,11 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable, ...@@ -898,8 +905,11 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable) void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
{ {
if (nf_flowtable_hw_offload(flowtable)) if (nf_flowtable_hw_offload(flowtable)) {
flush_workqueue(nf_flow_offload_wq); flush_workqueue(nf_flow_offload_add_wq);
flush_workqueue(nf_flow_offload_del_wq);
flush_workqueue(nf_flow_offload_stats_wq);
}
} }
static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
...@@ -1011,15 +1021,33 @@ EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup); ...@@ -1011,15 +1021,33 @@ EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
int nf_flow_table_offload_init(void) int nf_flow_table_offload_init(void)
{ {
nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload", nf_flow_offload_add_wq = alloc_workqueue("nf_ft_offload_add",
WQ_UNBOUND, 0); WQ_UNBOUND | WQ_SYSFS, 0);
if (!nf_flow_offload_wq) if (!nf_flow_offload_add_wq)
return -ENOMEM; return -ENOMEM;
nf_flow_offload_del_wq = alloc_workqueue("nf_ft_offload_del",
WQ_UNBOUND | WQ_SYSFS, 0);
if (!nf_flow_offload_del_wq)
goto err_del_wq;
nf_flow_offload_stats_wq = alloc_workqueue("nf_ft_offload_stats",
WQ_UNBOUND | WQ_SYSFS, 0);
if (!nf_flow_offload_stats_wq)
goto err_stats_wq;
return 0; return 0;
err_stats_wq:
destroy_workqueue(nf_flow_offload_del_wq);
err_del_wq:
destroy_workqueue(nf_flow_offload_add_wq);
return -ENOMEM;
} }
void nf_flow_table_offload_exit(void) void nf_flow_table_offload_exit(void)
{ {
destroy_workqueue(nf_flow_offload_wq); destroy_workqueue(nf_flow_offload_add_wq);
destroy_workqueue(nf_flow_offload_del_wq);
destroy_workqueue(nf_flow_offload_stats_wq);
} }
...@@ -900,6 +900,12 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table) ...@@ -900,6 +900,12 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
nft_table_disable(net, table, 0); nft_table_disable(net, table, 0);
} }
enum {
NFT_TABLE_STATE_UNCHANGED = 0,
NFT_TABLE_STATE_DORMANT,
NFT_TABLE_STATE_WAKEUP
};
static int nf_tables_updtable(struct nft_ctx *ctx) static int nf_tables_updtable(struct nft_ctx *ctx)
{ {
struct nft_trans *trans; struct nft_trans *trans;
...@@ -929,19 +935,17 @@ static int nf_tables_updtable(struct nft_ctx *ctx) ...@@ -929,19 +935,17 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if ((flags & NFT_TABLE_F_DORMANT) && if ((flags & NFT_TABLE_F_DORMANT) &&
!(ctx->table->flags & NFT_TABLE_F_DORMANT)) { !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
nft_trans_table_enable(trans) = false; nft_trans_table_state(trans) = NFT_TABLE_STATE_DORMANT;
} else if (!(flags & NFT_TABLE_F_DORMANT) && } else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) { ctx->table->flags & NFT_TABLE_F_DORMANT) {
ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
ret = nf_tables_table_enable(ctx->net, ctx->table); ret = nf_tables_table_enable(ctx->net, ctx->table);
if (ret >= 0) if (ret >= 0)
nft_trans_table_enable(trans) = true; nft_trans_table_state(trans) = NFT_TABLE_STATE_WAKEUP;
else
ctx->table->flags |= NFT_TABLE_F_DORMANT;
} }
if (ret < 0) if (ret < 0)
goto err; goto err;
nft_trans_table_flags(trans) = flags;
nft_trans_table_update(trans) = true; nft_trans_table_update(trans) = true;
list_add_tail(&trans->list, &ctx->net->nft.commit_list); list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0; return 0;
...@@ -8068,11 +8072,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) ...@@ -8068,11 +8072,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
switch (trans->msg_type) { switch (trans->msg_type) {
case NFT_MSG_NEWTABLE: case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) { if (nft_trans_table_update(trans)) {
if (!nft_trans_table_enable(trans)) { if (nft_trans_table_state(trans) == NFT_TABLE_STATE_DORMANT)
nf_tables_table_disable(net, nf_tables_table_disable(net, trans->ctx.table);
trans->ctx.table);
trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; trans->ctx.table->flags = nft_trans_table_flags(trans);
}
} else { } else {
nft_clear(net, trans->ctx.table); nft_clear(net, trans->ctx.table);
} }
...@@ -8283,11 +8286,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) ...@@ -8283,11 +8286,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
switch (trans->msg_type) { switch (trans->msg_type) {
case NFT_MSG_NEWTABLE: case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) { if (nft_trans_table_update(trans)) {
if (nft_trans_table_enable(trans)) { if (nft_trans_table_state(trans) == NFT_TABLE_STATE_WAKEUP)
nf_tables_table_disable(net, nf_tables_table_disable(net, trans->ctx.table);
trans->ctx.table);
trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
}
nft_trans_destroy(trans); nft_trans_destroy(trans);
} else { } else {
list_del_rcu(&trans->ctx.table->list); list_del_rcu(&trans->ctx.table->list);
...@@ -8557,6 +8558,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, ...@@ -8557,6 +8558,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
data->verdict.chain); data->verdict.chain);
if (err < 0) if (err < 0)
return err; return err;
break;
default: default:
break; break;
} }
......
...@@ -527,6 +527,7 @@ static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) ...@@ -527,6 +527,7 @@ static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
case NFT_CT_ZONE: case NFT_CT_ZONE:
if (--nft_ct_pcpu_template_refcnt == 0) if (--nft_ct_pcpu_template_refcnt == 0)
nft_ct_tmpl_put_pcpu(); nft_ct_tmpl_put_pcpu();
break;
#endif #endif
default: default:
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment