Commit c63a7cc4 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso Committed by David S. Miller

netfilter: flowtable: use dev_fill_forward_path() to obtain ingress device

Obtain the ingress device in the tuple from the route in the reply
direction. Use dev_fill_forward_path() instead to get the real ingress
device for this flow.

Fall back to use the ingress device that the IP forwarding route
provides if:

- dev_fill_forward_path() finds no real ingress device.
- the ingress device that is obtained is not part of the flowtable
  devices.
- this route has a xfrm policy.
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5139c0c0
......@@ -165,6 +165,9 @@ static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
struct nf_flow_route {
struct {
struct dst_entry *dst;
struct {
u32 ifindex;
} in;
enum flow_offload_xmit_type xmit_type;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
......
......@@ -79,7 +79,6 @@ static int flow_offload_fill_route(struct flow_offload *flow,
enum flow_offload_tuple_dir dir)
{
struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
struct dst_entry *other_dst = route->tuple[!dir].dst;
struct dst_entry *dst = route->tuple[dir].dst;
if (!dst_hold_safe(route->tuple[dir].dst))
......@@ -94,7 +93,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
break;
}
flow_tuple->iifidx = other_dst->dev->ifindex;
flow_tuple->iifidx = route->tuple[dir].in.ifindex;
flow_tuple->xmit_type = route->tuple[dir].xmit_type;
flow_tuple->dst_cache = dst;
......
......@@ -31,14 +31,104 @@ static void nft_default_forward_path(struct nf_flow_route *route,
struct dst_entry *dst_cache,
enum ip_conntrack_dir dir)
{
route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
route->tuple[dir].dst = dst_cache;
route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
}
static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
const struct dst_entry *dst_cache,
const struct nf_conn *ct,
enum ip_conntrack_dir dir,
struct net_device_path_stack *stack)
{
const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
struct net_device *dev = dst_cache->dev;
unsigned char ha[ETH_ALEN];
struct neighbour *n;
u8 nud_state;
n = dst_neigh_lookup(dst_cache, daddr);
if (!n)
return -1;
read_lock_bh(&n->lock);
nud_state = n->nud_state;
ether_addr_copy(ha, n->ha);
read_unlock_bh(&n->lock);
neigh_release(n);
if (!(nud_state & NUD_VALID))
return -1;
return dev_fill_forward_path(dev, ha, stack);
}
struct nft_forward_info {
const struct net_device *indev;
};
static void nft_dev_path_info(const struct net_device_path_stack *stack,
struct nft_forward_info *info)
{
const struct net_device_path *path;
int i;
for (i = 0; i < stack->num_paths; i++) {
path = &stack->path[i];
switch (path->type) {
case DEV_PATH_ETHERNET:
info->indev = path->dev;
break;
case DEV_PATH_VLAN:
case DEV_PATH_BRIDGE:
default:
info->indev = NULL;
break;
}
}
}
static bool nft_flowtable_find_dev(const struct net_device *dev,
struct nft_flowtable *ft)
{
struct nft_hook *hook;
bool found = false;
list_for_each_entry_rcu(hook, &ft->hook_list, list) {
if (hook->ops.dev != dev)
continue;
found = true;
break;
}
return found;
}
static void nft_dev_forward_path(struct nf_flow_route *route,
const struct nf_conn *ct,
enum ip_conntrack_dir dir,
struct nft_flowtable *ft)
{
const struct dst_entry *dst = route->tuple[dir].dst;
struct net_device_path_stack stack;
struct nft_forward_info info = {};
if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0)
nft_dev_path_info(&stack, &info);
if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
return;
route->tuple[!dir].in.ifindex = info.indev->ifindex;
}
static int nft_flow_route(const struct nft_pktinfo *pkt,
const struct nf_conn *ct,
struct nf_flow_route *route,
enum ip_conntrack_dir dir)
enum ip_conntrack_dir dir,
struct nft_flowtable *ft)
{
struct dst_entry *this_dst = skb_dst(pkt->skb);
struct dst_entry *other_dst = NULL;
......@@ -63,6 +153,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
nft_default_forward_path(route, this_dst, dir);
nft_default_forward_path(route, other_dst, !dir);
if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
nft_dev_forward_path(route, ct, dir, ft);
nft_dev_forward_path(route, ct, !dir, ft);
}
return 0;
}
......@@ -90,8 +186,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
struct tcphdr _tcph, *tcph = NULL;
struct nf_flow_route route = {};
enum ip_conntrack_info ctinfo;
struct nf_flow_route route;
struct flow_offload *flow;
enum ip_conntrack_dir dir;
struct nf_conn *ct;
......@@ -128,7 +224,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
goto out;
dir = CTINFO2DIR(ctinfo);
if (nft_flow_route(pkt, ct, &route, dir) < 0)
if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
goto err_flow_route;
flow = flow_offload_alloc(ct);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment