Commit 73f97025 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso Committed by David S. Miller

netfilter: nft_flow_offload: use direct xmit if hardware offload is enabled

If there is a forward path to reach an ethernet device and hardware
offload is enabled, then use the direct xmit path.

Moreover, store the real device in the direct xmit path info since
software datapath uses dev_hard_header() to push the layer encapsulation
headers while hardware offload refers to the real device.
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent eeff3000
......@@ -131,6 +131,7 @@ struct flow_offload_tuple {
struct dst_entry *dst_cache;
struct {
u32 ifidx;
u32 hw_ifidx;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
} out;
......@@ -188,6 +189,7 @@ struct nf_flow_route {
} in;
struct {
u32 ifindex;
u32 hw_ifindex;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
} out;
......
......@@ -106,6 +106,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
ETH_ALEN);
flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
break;
case FLOW_OFFLOAD_XMIT_XFRM:
case FLOW_OFFLOAD_XMIT_NEIGH:
......
......@@ -508,7 +508,7 @@ static void flow_offload_redirect(struct net *net,
switch (this_tuple->xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
this_tuple = &flow->tuplehash[dir].tuple;
ifindex = this_tuple->out.ifidx;
ifindex = this_tuple->out.hw_ifidx;
break;
case FLOW_OFFLOAD_XMIT_NEIGH:
other_tuple = &flow->tuplehash[!dir].tuple;
......
......@@ -66,6 +66,7 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
struct nft_forward_info {
const struct net_device *indev;
const struct net_device *outdev;
const struct net_device *hw_outdev;
struct id {
__u16 id;
__be16 proto;
......@@ -76,9 +77,18 @@ struct nft_forward_info {
enum flow_offload_xmit_type xmit_type;
};
static bool nft_is_valid_ether_device(const struct net_device *dev)
{
if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
return false;
return true;
}
static void nft_dev_path_info(const struct net_device_path_stack *stack,
struct nft_forward_info *info,
unsigned char *ha)
unsigned char *ha, struct nf_flowtable *flowtable)
{
const struct net_device_path *path;
int i;
......@@ -140,6 +150,12 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
}
if (!info->outdev)
info->outdev = info->indev;
info->hw_outdev = info->indev;
if (nf_flowtable_hw_offload(flowtable) &&
nft_is_valid_ether_device(info->indev))
info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
}
static bool nft_flowtable_find_dev(const struct net_device *dev,
......@@ -171,7 +187,7 @@ static void nft_dev_forward_path(struct nf_flow_route *route,
int i;
if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
nft_dev_path_info(&stack, &info, ha);
nft_dev_path_info(&stack, &info, ha, &ft->data);
if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
return;
......@@ -187,6 +203,7 @@ static void nft_dev_forward_path(struct nf_flow_route *route,
memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
route->tuple[dir].out.ifindex = info.outdev->ifindex;
route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
route->tuple[dir].xmit_type = info.xmit_type;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment