Commit db6140e5 authored by Paul Blakey's avatar Paul Blakey Committed by Pablo Neira Ayuso

net/sched: act_ct: Fix flow table lookup failure with no originating ifindex

After cited commit optimizted hw insertion, flow table entries are
populated with ifindex information which was intended to only be used
for HW offload. This tuple ifindex is hashed in the flow table key, so
it must be filled for lookup to be successful. But tuple ifindex is only
relevant for the netfilter flowtables (nft), so it's not filled in
act_ct flow table lookup, resulting in lookup failure, and no SW
offload and no offload teardown for TCP connection FIN/RST packets.

To fix this, add new tc ifindex field to tuple, which will
only be used for offloading, not for lookup, as it will not be
part of the tuple hash.

Fixes: 9795ded7 ("net/sched: act_ct: Fill offloading tuple iifidx")
Signed-off-by: default avatarPaul Blakey <paulb@nvidia.com>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 3b836da4
......@@ -96,6 +96,7 @@ enum flow_offload_xmit_type {
FLOW_OFFLOAD_XMIT_NEIGH,
FLOW_OFFLOAD_XMIT_XFRM,
FLOW_OFFLOAD_XMIT_DIRECT,
FLOW_OFFLOAD_XMIT_TC,
};
#define NF_FLOW_TABLE_ENCAP_MAX 2
......@@ -127,7 +128,7 @@ struct flow_offload_tuple {
struct { } __hash;
u8 dir:2,
xmit_type:2,
xmit_type:3,
encap_num:2,
in_vlan_ingress:2;
u16 mtu;
......@@ -142,6 +143,9 @@ struct flow_offload_tuple {
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
} out;
struct {
u32 iifidx;
} tc;
};
};
......
......@@ -110,7 +110,11 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
nf_flow_rule_lwt_match(match, tun_info);
}
key->meta.ingress_ifindex = tuple->iifidx;
if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_TC)
key->meta.ingress_ifindex = tuple->tc.iifidx;
else
key->meta.ingress_ifindex = tuple->iifidx;
mask->meta.ingress_ifindex = 0xffffffff;
if (tuple->encap_num > 0 && !(tuple->in_vlan_ingress & BIT(0)) &&
......
......@@ -361,6 +361,13 @@ static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
}
}
static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
{
entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
}
static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct,
bool tcp)
......@@ -385,10 +392,8 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
act_ct_ext = nf_conn_act_ct_ext_find(ct);
if (act_ct_ext) {
entry->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
act_ct_ext->ifindex[IP_CT_DIR_ORIGINAL];
entry->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
act_ct_ext->ifindex[IP_CT_DIR_REPLY];
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
}
err = flow_offload_add(&ct_ft->nf_ft, entry);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment