Commit e7af85db authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
nf pull request for net

This series contains netfilter fixes for net, they are:

1) Fix lockdep splat in nft_hash when releasing sets from the
   rcu_callback context. We don't the mutex there anymore.

2) Remove unnecessary spinlock_bh in the destroy path of the nf_tables
   rbtree set type from rcu_callback context.

3) Fix another lockdep splat in rhashtable. None of the callers hold
   a mutex when calling rhashtable_destroy.

4) Fix duplicated error reporting from nfnetlink when aborting and
   replaying a batch.

5) Fix a Kconfig issue reported by kbuild robot.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 445f7f4d 679ab4dd
...@@ -588,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init); ...@@ -588,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
* rhashtable_destroy - destroy hash table * rhashtable_destroy - destroy hash table
* @ht: the hash table to destroy * @ht: the hash table to destroy
* *
* Frees the bucket array. * Frees the bucket array. This function is not rcu safe, therefore the caller
* has to make sure that no resizing may happen by unpublishing the hashtable
* and waiting for the quiescent cycle before releasing the bucket array.
*/ */
void rhashtable_destroy(const struct rhashtable *ht) void rhashtable_destroy(const struct rhashtable *ht)
{ {
const struct bucket_table *tbl = rht_dereference(ht->tbl, ht); bucket_table_free(ht->tbl);
bucket_table_free(tbl);
} }
EXPORT_SYMBOL_GPL(rhashtable_destroy); EXPORT_SYMBOL_GPL(rhashtable_destroy);
......
...@@ -847,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY ...@@ -847,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY
tristate '"TPROXY" target transparent proxying support' tristate '"TPROXY" target transparent proxying support'
depends on NETFILTER_XTABLES depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED depends on NETFILTER_ADVANCED
depends on (IPV6 || IPV6=n)
depends on IP_NF_MANGLE depends on IP_NF_MANGLE
select NF_DEFRAG_IPV4 select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
......
...@@ -222,6 +222,51 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) ...@@ -222,6 +222,51 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
} }
} }
struct nfnl_err {
struct list_head head;
struct nlmsghdr *nlh;
int err;
};
static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
{
struct nfnl_err *nfnl_err;
nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
if (nfnl_err == NULL)
return -ENOMEM;
nfnl_err->nlh = nlh;
nfnl_err->err = err;
list_add_tail(&nfnl_err->head, list);
return 0;
}
static void nfnl_err_del(struct nfnl_err *nfnl_err)
{
list_del(&nfnl_err->head);
kfree(nfnl_err);
}
static void nfnl_err_reset(struct list_head *err_list)
{
struct nfnl_err *nfnl_err, *next;
list_for_each_entry_safe(nfnl_err, next, err_list, head)
nfnl_err_del(nfnl_err);
}
static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
{
struct nfnl_err *nfnl_err, *next;
list_for_each_entry_safe(nfnl_err, next, err_list, head) {
netlink_ack(skb, nfnl_err->nlh, nfnl_err->err);
nfnl_err_del(nfnl_err);
}
}
static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
u_int16_t subsys_id) u_int16_t subsys_id)
{ {
...@@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
const struct nfnetlink_subsystem *ss; const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc; const struct nfnl_callback *nc;
bool success = true, done = false; bool success = true, done = false;
static LIST_HEAD(err_list);
int err; int err;
if (subsys_id >= NFNL_SUBSYS_COUNT) if (subsys_id >= NFNL_SUBSYS_COUNT)
...@@ -287,6 +333,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -287,6 +333,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
type = nlh->nlmsg_type; type = nlh->nlmsg_type;
if (type == NFNL_MSG_BATCH_BEGIN) { if (type == NFNL_MSG_BATCH_BEGIN) {
/* Malformed: Batch begin twice */ /* Malformed: Batch begin twice */
nfnl_err_reset(&err_list);
success = false; success = false;
goto done; goto done;
} else if (type == NFNL_MSG_BATCH_END) { } else if (type == NFNL_MSG_BATCH_END) {
...@@ -333,6 +380,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -333,6 +380,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
* original skb. * original skb.
*/ */
if (err == -EAGAIN) { if (err == -EAGAIN) {
nfnl_err_reset(&err_list);
ss->abort(skb); ss->abort(skb);
nfnl_unlock(subsys_id); nfnl_unlock(subsys_id);
kfree_skb(nskb); kfree_skb(nskb);
...@@ -341,11 +389,24 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -341,11 +389,24 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
} }
ack: ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err) { if (nlh->nlmsg_flags & NLM_F_ACK || err) {
/* Errors are delivered once the full batch has been
* processed, this avoids that the same error is
* reported several times when replaying the batch.
*/
if (nfnl_err_add(&err_list, nlh, err) < 0) {
/* We failed to enqueue an error, reset the
* list of errors and send OOM to userspace
* pointing to the batch header.
*/
nfnl_err_reset(&err_list);
netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
success = false;
goto done;
}
/* We don't stop processing the batch on errors, thus, /* We don't stop processing the batch on errors, thus,
* userspace gets all the errors that the batch * userspace gets all the errors that the batch
* triggers. * triggers.
*/ */
netlink_ack(skb, nlh, err);
if (err) if (err)
success = false; success = false;
} }
...@@ -361,6 +422,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -361,6 +422,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
else else
ss->abort(skb); ss->abort(skb);
nfnl_err_deliver(&err_list, oskb);
nfnl_unlock(subsys_id); nfnl_unlock(subsys_id);
kfree_skb(nskb); kfree_skb(nskb);
} }
......
...@@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set, ...@@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set,
static void nft_hash_destroy(const struct nft_set *set) static void nft_hash_destroy(const struct nft_set *set)
{ {
const struct rhashtable *priv = nft_set_priv(set); const struct rhashtable *priv = nft_set_priv(set);
const struct bucket_table *tbl; const struct bucket_table *tbl = priv->tbl;
struct nft_hash_elem *he, *next; struct nft_hash_elem *he, *next;
unsigned int i; unsigned int i;
tbl = rht_dereference(priv->tbl, priv); for (i = 0; i < tbl->size; i++) {
for (i = 0; i < tbl->size; i++) for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node);
rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node) he != NULL; he = next) {
next = rht_entry(he->node.next, struct nft_hash_elem, node);
nft_hash_elem_destroy(set, he); nft_hash_elem_destroy(set, he);
}
}
rhashtable_destroy(priv); rhashtable_destroy(priv);
} }
......
...@@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set) ...@@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set)
struct nft_rbtree_elem *rbe; struct nft_rbtree_elem *rbe;
struct rb_node *node; struct rb_node *node;
spin_lock_bh(&nft_rbtree_lock);
while ((node = priv->root.rb_node) != NULL) { while ((node = priv->root.rb_node) != NULL) {
rb_erase(node, &priv->root); rb_erase(node, &priv->root);
rbe = rb_entry(node, struct nft_rbtree_elem, node); rbe = rb_entry(node, struct nft_rbtree_elem, node);
nft_rbtree_elem_destroy(set, rbe); nft_rbtree_elem_destroy(set, rbe);
} }
spin_unlock_bh(&nft_rbtree_lock);
} }
static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment