Commit 9d947690 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
bpf 2023-04-19

We've added 3 non-merge commits during the last 6 day(s) which contain
a total of 3 files changed, 34 insertions(+), 9 deletions(-).

The main changes are:

1) Fix a crash on s390's bpf_arch_text_poke() under a NULL new_addr,
   from Ilya Leoshkevich.

2) Fix a bug in BPF verifier's precision tracker, from Daniel Borkmann
   and Andrii Nakryiko.

3) Fix a regression in veth's xdp_features which led to a broken BPF CI
   selftest, from Lorenzo Bianconi.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Fix incorrect verifier pruning due to missing register precision taints
  veth: take into account peer device for NETDEV_XDP_ACT_NDO_XMIT xdp_features flag
  s390/bpf: Fix bpf_arch_text_poke() with new_addr == NULL
====================

Link: https://lore.kernel.org/r/20230419195847.27060-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 52b37ae8 71b547f5
......@@ -539,7 +539,7 @@ static void bpf_jit_plt(void *plt, void *ret, void *target)
{
memcpy(plt, bpf_plt, BPF_PLT_SIZE);
*(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
*(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target;
*(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
}
/*
......@@ -2010,7 +2010,9 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
} __packed insn;
char expected_plt[BPF_PLT_SIZE];
char current_plt[BPF_PLT_SIZE];
char new_plt[BPF_PLT_SIZE];
char *plt;
char *ret;
int err;
/* Verify the branch to be patched. */
......@@ -2032,12 +2034,15 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
if (err < 0)
return err;
bpf_jit_plt(expected_plt, (char *)ip + 6, old_addr);
ret = (char *)ip + 6;
bpf_jit_plt(expected_plt, ret, old_addr);
if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
return -EINVAL;
/* Adjust the call address. */
bpf_jit_plt(new_plt, ret, new_addr);
s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
&new_addr, sizeof(void *));
new_plt + (bpf_plt_target - bpf_plt),
sizeof(void *));
}
/* Adjust the mask of the branch. */
......
......@@ -1262,11 +1262,12 @@ static void veth_set_xdp_features(struct net_device *dev)
peer = rtnl_dereference(priv->peer);
if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
struct veth_priv *priv_peer = netdev_priv(peer);
xdp_features_t val = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
if (priv->_xdp_prog || veth_gro_requested(dev))
if (priv_peer->_xdp_prog || veth_gro_requested(peer))
val |= NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
xdp_set_features_flag(dev, val);
......@@ -1504,19 +1505,23 @@ static int veth_set_features(struct net_device *dev,
{
netdev_features_t changed = features ^ dev->features;
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
int err;
if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
return 0;
peer = rtnl_dereference(priv->peer);
if (features & NETIF_F_GRO) {
err = veth_napi_enable(dev);
if (err)
return err;
xdp_features_set_redirect_target(dev, true);
if (peer)
xdp_features_set_redirect_target(peer, true);
} else {
xdp_features_clear_redirect_target(dev);
if (peer)
xdp_features_clear_redirect_target(peer);
veth_napi_del(dev);
}
return 0;
......@@ -1598,13 +1603,13 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
peer->max_mtu = max_mtu;
}
xdp_features_set_redirect_target(dev, true);
xdp_features_set_redirect_target(peer, true);
}
if (old_prog) {
if (!prog) {
if (!veth_gro_requested(dev))
xdp_features_clear_redirect_target(dev);
if (peer && !veth_gro_requested(dev))
xdp_features_clear_redirect_target(peer);
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
......
......@@ -2967,6 +2967,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
}
} else if (opcode == BPF_EXIT) {
return -ENOTSUPP;
} else if (BPF_SRC(insn->code) == BPF_X) {
if (!(*reg_mask & (dreg | sreg)))
return 0;
/* dreg <cond> sreg
* Both dreg and sreg need precision before
* this insn. If only sreg was marked precise
* before it would be equally necessary to
* propagate it to dreg.
*/
*reg_mask |= (sreg | dreg);
/* else dreg <cond> K
* Only dreg still needs precision before
* this insn, so for the K-based conditional
* there is nothing new to be marked.
*/
}
} else if (class == BPF_LD) {
if (!(*reg_mask & dreg))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment