Commit 0cea651d authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Enable cpumasks to be used as kptrs'

David Vernet says:

====================

This is part 3 of https://lore.kernel.org/all/20230119235833.2948341-1-void@manifault.com/

Part 2: https://lore.kernel.org/bpf/20230120192523.3650503-1-void@manifault.com/

This series is based off of commit b613d335 ("bpf: Allow trusted
args to walk struct when checking BTF IDs").

Changelog:
----------
v2 -> v3:
- Rebase onto master (commit described above). Only conflict that
  required resolution was updating the task_kfunc selftest suite error
  message location.
- Put copyright onto one line in kernel/bpf/cpumask.c.
- Remove now-unneeded pid-checking logic from
  progs/nested_trust_success.c.
- Fix a couple of small grammatical typos in documentation.

v1 -> v2:
- Put back 'static' keyword in bpf_find_btf_id()
  (kernel test robot <lkp@intel.com>)
- Surround cpumask kfuncs in __diag() blocks to avoid no-prototype build
  warnings (kernel test robot <lkp@intel.com>)
- Enable ___init suffixes to a type definition to signal that a type is
  a nocast alias of another type. That is, that when passed to a kfunc
  that expects one of the two types, the verifier will reject the other
  even if they're equivalent according to the C standard (Kumar and
  Alexei)
- Reject NULL for all trusted args, not just PTR_TO_MEM (Kumar)
- Reject both NULL and PTR_MAYBE_NULL for all trusted args (Kumar and
  Alexei )
- Improve examples given in cpumask documentation (Alexei)
- Use __success macro for nested_trust test (Alexei)
- Fix comment typo in struct bpf_cpumask comment header.
- Fix another example in the bpf_cpumask doc examples.
- Add documentation for ___init suffix change mentioned above.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents b613d335 027bdec8
.. SPDX-License-Identifier: GPL-2.0
.. _cpumasks-header-label:
==================
BPF cpumask kfuncs
==================
1. Introduction
===============
``struct cpumask`` is a bitmap data structure in the kernel whose indices
reflect the CPUs on the system. Commonly, cpumasks are used to track which CPUs
a task is affinitized to, but they can also be used to e.g. track which cores
are associated with a scheduling domain, which cores on a machine are idle,
etc.
BPF provides programs with a set of :ref:`kfuncs-header-label` that can be
used to allocate, mutate, query, and free cpumasks.
2. BPF cpumask objects
======================
There are two different types of cpumasks that can be used by BPF programs.
2.1 ``struct bpf_cpumask *``
----------------------------
``struct bpf_cpumask *`` is a cpumask that is allocated by BPF, on behalf of a
BPF program, and whose lifecycle is entirely controlled by BPF. These cpumasks
are RCU-protected, can be mutated, can be used as kptrs, and can be safely cast
to a ``struct cpumask *``.
2.1.1 ``struct bpf_cpumask *`` lifecycle
----------------------------------------
A ``struct bpf_cpumask *`` is allocated, acquired, and released, using the
following functions:
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_create
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_acquire
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_release
For example:
.. code-block:: c
struct cpumask_map_value {
struct bpf_cpumask __kptr_ref * cpumask;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct cpumask_map_value);
__uint(max_entries, 65536);
} cpumask_map SEC(".maps");
static int cpumask_map_insert(struct bpf_cpumask *mask, u32 pid)
{
struct cpumask_map_value local, *v;
long status;
struct bpf_cpumask *old;
u32 key = pid;
local.cpumask = NULL;
status = bpf_map_update_elem(&cpumask_map, &key, &local, 0);
if (status) {
bpf_cpumask_release(mask);
return status;
}
v = bpf_map_lookup_elem(&cpumask_map, &key);
if (!v) {
bpf_cpumask_release(mask);
return -ENOENT;
}
old = bpf_kptr_xchg(&v->cpumask, mask);
if (old)
bpf_cpumask_release(old);
return 0;
}
/**
* A sample tracepoint showing how a task's cpumask can be queried and
* recorded as a kptr.
*/
SEC("tp_btf/task_newtask")
int BPF_PROG(record_task_cpumask, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
int ret;
cpumask = bpf_cpumask_create();
if (!cpumask)
return -ENOMEM;
if (!bpf_cpumask_full(task->cpus_ptr))
bpf_printk("task %s has CPU affinity", task->comm);
bpf_cpumask_copy(cpumask, task->cpus_ptr);
return cpumask_map_insert(cpumask, task->pid);
}
----
2.1.1 ``struct bpf_cpumask *`` as kptrs
---------------------------------------
As mentioned and illustrated above, these ``struct bpf_cpumask *`` objects can
also be stored in a map and used as kptrs. If a ``struct bpf_cpumask *`` is in
a map, the reference can be removed from the map with bpf_kptr_xchg(), or
opportunistically acquired with bpf_cpumask_kptr_get():
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_kptr_get
Here is an example of a ``struct bpf_cpumask *`` being retrieved from a map:
.. code-block:: c
/* struct containing the struct bpf_cpumask kptr which is stored in the map. */
struct cpumasks_kfunc_map_value {
struct bpf_cpumask __kptr_ref * bpf_cpumask;
};
/* The map containing struct cpumasks_kfunc_map_value entries. */
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct cpumasks_kfunc_map_value);
__uint(max_entries, 1);
} cpumasks_kfunc_map SEC(".maps");
/* ... */
/**
* A simple example tracepoint program showing how a
* struct bpf_cpumask * kptr that is stored in a map can
* be acquired using the bpf_cpumask_kptr_get() kfunc.
*/
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(cgrp_ancestor_example, struct cgroup *cgrp, const char *path)
{
struct bpf_cpumask *kptr;
struct cpumasks_kfunc_map_value *v;
u32 key = 0;
/* Assume a bpf_cpumask * kptr was previously stored in the map. */
v = bpf_map_lookup_elem(&cpumasks_kfunc_map, &key);
if (!v)
return -ENOENT;
/* Acquire a reference to the bpf_cpumask * kptr that's already stored in the map. */
kptr = bpf_cpumask_kptr_get(&v->cpumask);
if (!kptr)
/* If no bpf_cpumask was present in the map, it's because
* we're racing with another CPU that removed it with
* bpf_kptr_xchg() between the bpf_map_lookup_elem()
* above, and our call to bpf_cpumask_kptr_get().
* bpf_cpumask_kptr_get() internally safely handles this
* race, and will return NULL if the cpumask is no longer
* present in the map by the time we invoke the kfunc.
*/
return -EBUSY;
/* Free the reference we just took above. Note that the
* original struct bpf_cpumask * kptr is still in the map. It will
* be freed either at a later time if another context deletes
* it from the map, or automatically by the BPF subsystem if
* it's still present when the map is destroyed.
*/
bpf_cpumask_release(kptr);
return 0;
}
----
2.2 ``struct cpumask``
----------------------
``struct cpumask`` is the object that actually contains the cpumask bitmap
being queried, mutated, etc. A ``struct bpf_cpumask`` wraps a ``struct
cpumask``, which is why it's safe to cast it as such (note however that it is
**not** safe to cast a ``struct cpumask *`` to a ``struct bpf_cpumask *``, and
the verifier will reject any program that tries to do so).
As we'll see below, any kfunc that mutates its cpumask argument will take a
``struct bpf_cpumask *`` as that argument. Any argument that simply queries the
cpumask will instead take a ``struct cpumask *``.
3. cpumask kfuncs
=================
Above, we described the kfuncs that can be used to allocate, acquire, release,
etc a ``struct bpf_cpumask *``. This section of the document will describe the
kfuncs for mutating and querying cpumasks.
3.1 Mutating cpumasks
---------------------
Some cpumask kfuncs are "read-only" in that they don't mutate any of their
arguments, whereas others mutate at least one argument (which means that the
argument must be a ``struct bpf_cpumask *``, as described above).
This section will describe all of the cpumask kfuncs which mutate at least one
argument. :ref:`cpumasks-querying-label` below describes the read-only kfuncs.
3.1.1 Setting and clearing CPUs
-------------------------------
bpf_cpumask_set_cpu() and bpf_cpumask_clear_cpu() can be used to set and clear
a CPU in a ``struct bpf_cpumask`` respectively:
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_set_cpu bpf_cpumask_clear_cpu
These kfuncs are pretty straightforward, and can be used, for example, as
follows:
.. code-block:: c
/**
* A sample tracepoint showing how a cpumask can be queried.
*/
SEC("tp_btf/task_newtask")
int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
cpumask = bpf_cpumask_create();
if (!cpumask)
return -ENOMEM;
bpf_cpumask_set_cpu(0, cpumask);
if (!bpf_cpumask_test_cpu(0, cast(cpumask)))
/* Should never happen. */
goto release_exit;
bpf_cpumask_clear_cpu(0, cpumask);
if (bpf_cpumask_test_cpu(0, cast(cpumask)))
/* Should never happen. */
goto release_exit;
/* struct cpumask * pointers such as task->cpus_ptr can also be queried. */
if (bpf_cpumask_test_cpu(0, task->cpus_ptr))
bpf_printk("task %s can use CPU %d", task->comm, 0);
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
----
bpf_cpumask_test_and_set_cpu() and bpf_cpumask_test_and_clear_cpu() are
complementary kfuncs that allow callers to atomically test and set (or clear)
CPUs:
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_test_and_set_cpu bpf_cpumask_test_and_clear_cpu
----
We can also set and clear entire ``struct bpf_cpumask *`` objects in one
operation using bpf_cpumask_setall() and bpf_cpumask_clear():
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_setall bpf_cpumask_clear
3.1.2 Operations between cpumasks
---------------------------------
In addition to setting and clearing individual CPUs in a single cpumask,
callers can also perform bitwise operations between multiple cpumasks using
bpf_cpumask_and(), bpf_cpumask_or(), and bpf_cpumask_xor():
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_and bpf_cpumask_or bpf_cpumask_xor
The following is an example of how they may be used. Note that some of the
kfuncs shown in this example will be covered in more detail below.
.. code-block:: c
/**
* A sample tracepoint showing how a cpumask can be mutated using
bitwise operators (and queried).
*/
SEC("tp_btf/task_newtask")
int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
mask1 = bpf_cpumask_create();
if (!mask1)
return -ENOMEM;
mask2 = bpf_cpumask_create();
if (!mask2) {
bpf_cpumask_release(mask1);
return -ENOMEM;
}
// ...Safely create the other two masks... */
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
bpf_cpumask_and(dst1, (const struct cpumask *)mask1, (const struct cpumask *)mask2);
if (!bpf_cpumask_empty((const struct cpumask *)dst1))
/* Should never happen. */
goto release_exit;
bpf_cpumask_or(dst1, (const struct cpumask *)mask1, (const struct cpumask *)mask2);
if (!bpf_cpumask_test_cpu(0, (const struct cpumask *)dst1))
/* Should never happen. */
goto release_exit;
if (!bpf_cpumask_test_cpu(1, (const struct cpumask *)dst1))
/* Should never happen. */
goto release_exit;
bpf_cpumask_xor(dst2, (const struct cpumask *)mask1, (const struct cpumask *)mask2);
if (!bpf_cpumask_equal((const struct cpumask *)dst1,
(const struct cpumask *)dst2))
/* Should never happen. */
goto release_exit;
release_exit:
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(dst1);
bpf_cpumask_release(dst2);
return 0;
}
----
The contents of an entire cpumask may be copied to another using
bpf_cpumask_copy():
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_copy
----
.. _cpumasks-querying-label:
3.2 Querying cpumasks
---------------------
In addition to the above kfuncs, there is also a set of read-only kfuncs that
can be used to query the contents of cpumasks.
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_first bpf_cpumask_first_zero bpf_cpumask_test_cpu
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_equal bpf_cpumask_intersects bpf_cpumask_subset
bpf_cpumask_empty bpf_cpumask_full
.. kernel-doc:: kernel/bpf/cpumask.c
:identifiers: bpf_cpumask_any bpf_cpumask_any_and
----
Some example usages of these querying kfuncs were shown above. We will not
replicate those exmaples here. Note, however, that all of the aforementioned
kfuncs are tested in `tools/testing/selftests/bpf/progs/cpumask_success.c`_, so
please take a look there if you're looking for more examples of how they can be
used.
.. _tools/testing/selftests/bpf/progs/cpumask_success.c:
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/tools/testing/selftests/bpf/progs/cpumask_success.c
4. Adding BPF cpumask kfuncs
============================
The set of supported BPF cpumask kfuncs are not (yet) a 1-1 match with the
cpumask operations in include/linux/cpumask.h. Any of those cpumask operations
could easily be encapsulated in a new kfunc if and when required. If you'd like
to support a new cpumask operation, please feel free to submit a patch. If you
do add a new cpumask kfunc, please document it here, and add any relevant
selftest testcases to the cpumask selftest suite.
......@@ -20,6 +20,7 @@ that goes into great technical depth about the BPF Architecture.
syscall_api
helpers
kfuncs
cpumasks
programs
maps
bpf_prog_run
......
.. SPDX-License-Identifier: GPL-2.0
.. _kfuncs-header-label:
=============================
BPF Kernel Functions (kfuncs)
=============================
......@@ -163,7 +167,8 @@ KF_ACQUIRE and KF_RET_NULL flags.
The KF_TRUSTED_ARGS flag is used for kfuncs taking pointer arguments. It
indicates that the all pointer arguments are valid, and that all pointers to
BTF objects have been passed in their unmodified form (that is, at a zero
offset, and without having been obtained from walking another pointer).
offset, and without having been obtained from walking another pointer, with one
exception described below).
There are two types of pointers to kernel objects which are considered "valid":
......@@ -176,6 +181,25 @@ KF_TRUSTED_ARGS kfuncs, and may have a non-zero offset.
The definition of "valid" pointers is subject to change at any time, and has
absolutely no ABI stability guarantees.
As mentioned above, a nested pointer obtained from walking a trusted pointer is
no longer trusted, with one exception. If a struct type has a field that is
guaranteed to be valid as long as its parent pointer is trusted, the
``BTF_TYPE_SAFE_NESTED`` macro can be used to express that to the verifier as
follows:
.. code-block:: c
BTF_TYPE_SAFE_NESTED(struct task_struct) {
const cpumask_t *cpus_ptr;
};
In other words, you must:
1. Wrap the trusted pointer type in the ``BTF_TYPE_SAFE_NESTED`` macro.
2. Specify the type and name of the trusted nested field. This field must match
the field in the original type definition exactly.
2.4.6 KF_SLEEPABLE flag
-----------------------
......@@ -223,6 +247,49 @@ type. An example is shown below::
}
late_initcall(init_subsystem);
2.6 Specifying no-cast aliases with ___init
--------------------------------------------
The verifier will always enforce that the BTF type of a pointer passed to a
kfunc by a BPF program, matches the type of pointer specified in the kfunc
definition. The verifier, does, however, allow types that are equivalent
according to the C standard to be passed to the same kfunc arg, even if their
BTF_IDs differ.
For example, for the following type definition:
.. code-block:: c
struct bpf_cpumask {
cpumask_t cpumask;
refcount_t usage;
};
The verifier would allow a ``struct bpf_cpumask *`` to be passed to a kfunc
taking a ``cpumask_t *`` (which is a typedef of ``struct cpumask *``). For
instance, both ``struct cpumask *`` and ``struct bpf_cpmuask *`` can be passed
to bpf_cpumask_test_cpu().
In some cases, this type-aliasing behavior is not desired. ``struct
nf_conn___init`` is one such example:
.. code-block:: c
struct nf_conn___init {
struct nf_conn ct;
};
The C standard would consider these types to be equivalent, but it would not
always be safe to pass either type to a trusted kfunc. ``struct
nf_conn___init`` represents an allocated ``struct nf_conn`` object that has
*not yet been initialized*, so it would therefore be unsafe to pass a ``struct
nf_conn___init *`` to a kfunc that's expecting a fully initialized ``struct
nf_conn *`` (e.g. ``bpf_ct_change_timeout()``).
In order to accommodate such requirements, the verifier will enforce strict
PTR_TO_BTF_ID type matching if two types have the exact same name, with one
being suffixed with ``___init``.
3. Core kfuncs
==============
......@@ -420,3 +487,10 @@ the verifier. bpf_cgroup_ancestor() can be used as follows:
bpf_cgroup_release(parent);
return 0;
}
3.3 struct cpumask * kfuncs
---------------------------
BPF provides a set of kfuncs that can be used to query, allocate, mutate, and
destroy struct cpumask * objects. Please refer to :ref:`cpumasks-header-label`
for more details.
......@@ -36,6 +36,7 @@ obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o
endif
ifeq ($(CONFIG_BPF_JIT),y)
obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
obj-$(CONFIG_BPF_PRELOAD) += preload/
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2023 Meta, Inc */
#include <linux/bpf.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/cpumask.h>
/**
* struct bpf_cpumask - refcounted BPF cpumask wrapper structure
* @cpumask: The actual cpumask embedded in the struct.
* @usage: Object reference counter. When the refcount goes to 0, the
* memory is released back to the BPF allocator, which provides
* RCU safety.
*
* Note that we explicitly embed a cpumask_t rather than a cpumask_var_t. This
* is done to avoid confusing the verifier due to the typedef of cpumask_var_t
* changing depending on whether CONFIG_CPUMASK_OFFSTACK is defined or not. See
* the details in <linux/cpumask.h>. The consequence is that this structure is
* likely a bit larger than it needs to be when CONFIG_CPUMASK_OFFSTACK is
* defined due to embedding the whole NR_CPUS-size bitmap, but the extra memory
* overhead is minimal. For the more typical case of CONFIG_CPUMASK_OFFSTACK
* not being defined, the structure is the same size regardless.
*/
struct bpf_cpumask {
cpumask_t cpumask;
refcount_t usage;
};
static struct bpf_mem_alloc bpf_cpumask_ma;
static bool cpu_valid(u32 cpu)
{
return cpu < nr_cpu_ids;
}
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"Global kfuncs as their definitions will be in BTF");
/**
* bpf_cpumask_create() - Create a mutable BPF cpumask.
*
* Allocates a cpumask that can be queried, mutated, acquired, and released by
* a BPF program. The cpumask returned by this function must either be embedded
* in a map as a kptr, or freed with bpf_cpumask_release().
*
* bpf_cpumask_create() allocates memory using the BPF memory allocator, and
* will not block. It may return NULL if no memory is available.
*/
struct bpf_cpumask *bpf_cpumask_create(void)
{
struct bpf_cpumask *cpumask;
cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask));
if (!cpumask)
return NULL;
memset(cpumask, 0, sizeof(*cpumask));
refcount_set(&cpumask->usage, 1);
return cpumask;
}
/**
* bpf_cpumask_acquire() - Acquire a reference to a BPF cpumask.
* @cpumask: The BPF cpumask being acquired. The cpumask must be a trusted
* pointer.
*
* Acquires a reference to a BPF cpumask. The cpumask returned by this function
* must either be embedded in a map as a kptr, or freed with
* bpf_cpumask_release().
*/
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
{
refcount_inc(&cpumask->usage);
return cpumask;
}
/**
* bpf_cpumask_kptr_get() - Attempt to acquire a reference to a BPF cpumask
* stored in a map.
* @cpumaskp: A pointer to a BPF cpumask map value.
*
* Attempts to acquire a reference to a BPF cpumask stored in a map value. The
* cpumask returned by this function must either be embedded in a map as a
* kptr, or freed with bpf_cpumask_release(). This function may return NULL if
* no BPF cpumask was found in the specified map value.
*/
struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
{
struct bpf_cpumask *cpumask;
/* The BPF memory allocator frees memory backing its caches in an RCU
* callback. Thus, we can safely use RCU to ensure that the cpumask is
* safe to read.
*/
rcu_read_lock();
cpumask = READ_ONCE(*cpumaskp);
if (cpumask && !refcount_inc_not_zero(&cpumask->usage))
cpumask = NULL;
rcu_read_unlock();
return cpumask;
}
/**
* bpf_cpumask_release() - Release a previously acquired BPF cpumask.
* @cpumask: The cpumask being released.
*
* Releases a previously acquired reference to a BPF cpumask. When the final
* reference of the BPF cpumask has been released, it is subsequently freed in
* an RCU callback in the BPF memory allocator.
*/
void bpf_cpumask_release(struct bpf_cpumask *cpumask)
{
if (!cpumask)
return;
if (refcount_dec_and_test(&cpumask->usage)) {
migrate_disable();
bpf_mem_free(&bpf_cpumask_ma, cpumask);
migrate_enable();
}
}
/**
* bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask.
* @cpumask: The cpumask being queried.
*
* Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask
* pointer may be safely passed to this function.
*/
u32 bpf_cpumask_first(const struct cpumask *cpumask)
{
return cpumask_first(cpumask);
}
/**
* bpf_cpumask_first_zero() - Get the index of the first unset bit in the
* cpumask.
* @cpumask: The cpumask being queried.
*
* Find the index of the first unset bit of the cpumask. A struct bpf_cpumask
* pointer may be safely passed to this function.
*/
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
{
return cpumask_first_zero(cpumask);
}
/**
* bpf_cpumask_set_cpu() - Set a bit for a CPU in a BPF cpumask.
* @cpu: The CPU to be set in the cpumask.
* @cpumask: The BPF cpumask in which a bit is being set.
*/
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return;
cpumask_set_cpu(cpu, (struct cpumask *)cpumask);
}
/**
* bpf_cpumask_clear_cpu() - Clear a bit for a CPU in a BPF cpumask.
* @cpu: The CPU to be cleared from the cpumask.
* @cpumask: The BPF cpumask in which a bit is being cleared.
*/
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return;
cpumask_clear_cpu(cpu, (struct cpumask *)cpumask);
}
/**
* bpf_cpumask_test_cpu() - Test whether a CPU is set in a cpumask.
* @cpu: The CPU being queried for.
* @cpumask: The cpumask being queried for containing a CPU.
*
* Return:
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu.
*/
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
return cpumask_test_cpu(cpu, (struct cpumask *)cpumask);
}
/**
* bpf_cpumask_test_and_set_cpu() - Atomically test and set a CPU in a BPF cpumask.
* @cpu: The CPU being set and queried for.
* @cpumask: The BPF cpumask being set and queried for containing a CPU.
*
* Return:
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
*/
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask);
}
/**
* bpf_cpumask_test_and_clear_cpu() - Atomically test and clear a CPU in a BPF
* cpumask.
* @cpu: The CPU being cleared and queried for.
* @cpumask: The BPF cpumask being cleared and queried for containing a CPU.
*
* Return:
* * true - @cpu is set in the cpumask
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
*/
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
if (!cpu_valid(cpu))
return false;
return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask);
}
/**
* bpf_cpumask_setall() - Set all of the bits in a BPF cpumask.
* @cpumask: The BPF cpumask having all of its bits set.
*/
void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
{
cpumask_setall((struct cpumask *)cpumask);
}
/**
* bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask.
* @cpumask: The BPF cpumask being cleared.
*/
void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
{
cpumask_clear((struct cpumask *)cpumask);
}
/**
* bpf_cpumask_and() - AND two cpumasks and store the result.
* @dst: The BPF cpumask where the result is being stored.
* @src1: The first input.
* @src2: The second input.
*
* Return:
* * true - @dst has at least one bit set following the operation
* * false - @dst is empty following the operation
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_and(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
{
return cpumask_and((struct cpumask *)dst, src1, src2);
}
/**
* bpf_cpumask_or() - OR two cpumasks and store the result.
* @dst: The BPF cpumask where the result is being stored.
* @src1: The first input.
* @src2: The second input.
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
void bpf_cpumask_or(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
{
cpumask_or((struct cpumask *)dst, src1, src2);
}
/**
* bpf_cpumask_xor() - XOR two cpumasks and store the result.
* @dst: The BPF cpumask where the result is being stored.
* @src1: The first input.
* @src2: The second input.
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
void bpf_cpumask_xor(struct bpf_cpumask *dst,
const struct cpumask *src1,
const struct cpumask *src2)
{
cpumask_xor((struct cpumask *)dst, src1, src2);
}
/**
* bpf_cpumask_equal() - Check two cpumasks for equality.
* @src1: The first input.
* @src2: The second input.
*
* Return:
* * true - @src1 and @src2 have the same bits set.
* * false - @src1 and @src2 differ in at least one bit.
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_equal(src1, src2);
}
/**
* bpf_cpumask_intersects() - Check two cpumasks for overlap.
* @src1: The first input.
* @src2: The second input.
*
* Return:
* * true - @src1 and @src2 have at least one of the same bits set.
* * false - @src1 and @src2 don't have any of the same bits set.
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_intersects(src1, src2);
}
/**
* bpf_cpumask_subset() - Check if a cpumask is a subset of another.
* @src1: The first cpumask being checked as a subset.
* @src2: The second cpumask being checked as a superset.
*
* Return:
* * true - All of the bits of @src1 are set in @src2.
* * false - At least one bit in @src1 is not set in @src2.
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_subset(src1, src2);
}
/**
* bpf_cpumask_empty() - Check if a cpumask is empty.
* @cpumask: The cpumask being checked.
*
* Return:
* * true - None of the bits in @cpumask are set.
* * false - At least one bit in @cpumask is set.
*
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
*/
bool bpf_cpumask_empty(const struct cpumask *cpumask)
{
return cpumask_empty(cpumask);
}
/**
* bpf_cpumask_full() - Check if a cpumask has all bits set.
* @cpumask: The cpumask being checked.
*
* Return:
* * true - All of the bits in @cpumask are set.
* * false - At least one bit in @cpumask is cleared.
*
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
*/
bool bpf_cpumask_full(const struct cpumask *cpumask)
{
return cpumask_full(cpumask);
}
/**
* bpf_cpumask_copy() - Copy the contents of a cpumask into a BPF cpumask.
* @dst: The BPF cpumask being copied into.
* @src: The cpumask being copied.
*
* A struct bpf_cpumask pointer may be safely passed to @src.
*/
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
{
cpumask_copy((struct cpumask *)dst, src);
}
/**
* bpf_cpumask_any() - Return a random set CPU from a cpumask.
* @cpumask: The cpumask being queried.
*
* Return:
* * A random set bit within [0, num_cpus) if at least one bit is set.
* * >= num_cpus if no bit is set.
*
* A struct bpf_cpumask pointer may be safely passed to @src.
*/
u32 bpf_cpumask_any(const struct cpumask *cpumask)
{
return cpumask_any(cpumask);
}
/**
* bpf_cpumask_any_and() - Return a random set CPU from the AND of two
* cpumasks.
* @src1: The first cpumask.
* @src2: The second cpumask.
*
* Return:
* * A random set bit within [0, num_cpus) if at least one bit is set.
* * >= num_cpus if no bit is set.
*
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
*/
u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
{
return cpumask_any_and(src1, src2);
}
__diag_pop();
BTF_SET8_START(cpumask_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cpumask_first, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_and, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_or, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_full, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_any, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_cpumask_any_and, KF_TRUSTED_ARGS)
BTF_SET8_END(cpumask_kfunc_btf_ids)
static const struct btf_kfunc_id_set cpumask_kfunc_set = {
.owner = THIS_MODULE,
.set = &cpumask_kfunc_btf_ids,
};
BTF_ID_LIST(cpumask_dtor_ids)
BTF_ID(struct, bpf_cpumask)
BTF_ID(func, bpf_cpumask_release)
static int __init cpumask_kfunc_init(void)
{
int ret;
const struct btf_id_dtor_kfunc cpumask_dtors[] = {
{
.btf_id = cpumask_dtor_ids[0],
.kfunc_btf_id = cpumask_dtor_ids[1]
},
};
ret = bpf_mem_alloc_init(&bpf_cpumask_ma, 0, false);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors,
ARRAY_SIZE(cpumask_dtors),
THIS_MODULE);
}
late_initcall(cpumask_kfunc_init);
......@@ -9194,6 +9194,12 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EINVAL;
}
if (is_kfunc_trusted_args(meta) &&
(register_is_null(reg) || type_may_be_null(reg->type))) {
verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
return -EACCES;
}
if (reg->ref_obj_id) {
if (is_kfunc_release(meta) && meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
......
......@@ -13,6 +13,7 @@ cgroup_hierarchical_stats # JIT does not support calling kernel f
cgrp_kfunc # JIT does not support calling kernel function
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
core_read_macros # unknown func bpf_probe_read#4 (overlapping)
cpumask # JIT does not support calling kernel function
d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
decap_sanity # JIT does not support calling kernel function (kfunc)
deny_namespace # failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
......@@ -44,6 +45,7 @@ map_kptr # failed to open_and_load program: -524
modify_return # modify_return attach failed: -524 (trampoline)
module_attach # skel_attach skeleton attach failed: -524 (trampoline)
mptcp
nested_trust # JIT does not support calling kernel function
netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?)
probe_user # check_kprobe_res wrong kprobe res from probe read (?)
rcu_read_lock # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
......
......@@ -93,11 +93,11 @@ static struct {
const char *prog_name;
const char *expected_err_msg;
} failure_tests[] = {
{"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"},
{"cgrp_kfunc_acquire_untrusted", "Possibly NULL pointer passed to trusted arg0"},
{"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"},
{"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
{"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
{"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"},
{"cgrp_kfunc_acquire_null", "Possibly NULL pointer passed to trusted arg0"},
{"cgrp_kfunc_acquire_unreleased", "Unreleased reference"},
{"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
{"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "cpumask_failure.skel.h"
#include "cpumask_success.skel.h"
static const char * const cpumask_success_testcases[] = {
"test_alloc_free_cpumask",
"test_set_clear_cpu",
"test_setall_clear_cpu",
"test_first_firstzero_cpu",
"test_test_and_set_clear",
"test_and_or_xor",
"test_intersects_subset",
"test_copy_any_anyand",
"test_insert_leave",
"test_insert_remove_release",
"test_insert_kptr_get_release",
};
static void verify_success(const char *prog_name)
{
struct cpumask_success *skel;
struct bpf_program *prog;
struct bpf_link *link = NULL;
pid_t child_pid;
int status;
skel = cpumask_success__open();
if (!ASSERT_OK_PTR(skel, "cpumask_success__open"))
return;
skel->bss->pid = getpid();
skel->bss->nr_cpus = libbpf_num_possible_cpus();
cpumask_success__load(skel);
if (!ASSERT_OK_PTR(skel, "cpumask_success__load"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
link = bpf_program__attach(prog);
if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
goto cleanup;
child_pid = fork();
if (!ASSERT_GT(child_pid, -1, "child_pid"))
goto cleanup;
if (child_pid == 0)
_exit(0);
waitpid(child_pid, &status, 0);
ASSERT_OK(skel->bss->err, "post_wait_err");
cleanup:
bpf_link__destroy(link);
cpumask_success__destroy(skel);
}
void test_cpumask(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(cpumask_success_testcases); i++) {
if (!test__start_subtest(cpumask_success_testcases[i]))
continue;
verify_success(cpumask_success_testcases[i]);
}
RUN_TESTS(cpumask_failure);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "nested_trust_failure.skel.h"
#include "nested_trust_success.skel.h"
void test_nested_trust(void)
{
RUN_TESTS(nested_trust_success);
RUN_TESTS(nested_trust_failure);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#ifndef _CPUMASK_COMMON_H
#define _CPUMASK_COMMON_H
#include "errno.h"
#include <stdbool.h>
int err;
struct __cpumask_map_value {
struct bpf_cpumask __kptr_ref * cpumask;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct __cpumask_map_value);
__uint(max_entries, 1);
} __cpumask_map SEC(".maps");
struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumask) __ksym;
u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
bool bpf_cpumask_and(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
const struct cpumask *src2) __ksym;
void bpf_cpumask_or(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
const struct cpumask *src2) __ksym;
void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
const struct cpumask *src2) __ksym;
bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
u32 bpf_cpumask_any(const struct cpumask *src) __ksym;
u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym;
static inline const struct cpumask *cast(struct bpf_cpumask *cpumask)
{
return (const struct cpumask *)cpumask;
}
static inline struct bpf_cpumask *create_cpumask(void)
{
struct bpf_cpumask *cpumask;
cpumask = bpf_cpumask_create();
if (!cpumask) {
err = 1;
return NULL;
}
if (!bpf_cpumask_empty(cast(cpumask))) {
err = 2;
bpf_cpumask_release(cpumask);
return NULL;
}
return cpumask;
}
static inline struct __cpumask_map_value *cpumask_map_value_lookup(void)
{
u32 key = 0;
return bpf_map_lookup_elem(&__cpumask_map, &key);
}
static inline int cpumask_map_insert(struct bpf_cpumask *mask)
{
struct __cpumask_map_value local, *v;
long status;
struct bpf_cpumask *old;
u32 key = 0;
local.cpumask = NULL;
status = bpf_map_update_elem(&__cpumask_map, &key, &local, 0);
if (status) {
bpf_cpumask_release(mask);
return status;
}
v = bpf_map_lookup_elem(&__cpumask_map, &key);
if (!v) {
bpf_cpumask_release(mask);
return -ENOENT;
}
old = bpf_kptr_xchg(&v->cpumask, mask);
if (old) {
bpf_cpumask_release(old);
return -EEXIST;
}
return 0;
}
#endif /* _CPUMASK_COMMON_H */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "cpumask_common.h"
char _license[] SEC("license") = "GPL";
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
cpumask = create_cpumask();
/* cpumask is never released. */
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg0")
int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
cpumask = create_cpumask();
/* cpumask is released twice. */
bpf_cpumask_release(cpumask);
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("bpf_cpumask_acquire args#0 expected pointer to STRUCT bpf_cpumask")
int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
/* Can't acquire a non-struct bpf_cpumask. */
cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
/* Can't set the CPU of a non-struct bpf_cpumask. */
bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask))
return 0;
v = cpumask_map_value_lookup();
if (!v)
return 0;
cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
/* cpumask is never released. */
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_kptr_get_no_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask))
return 0;
v = cpumask_map_value_lookup();
if (!v)
return 0;
cpumask = bpf_cpumask_kptr_get(&v->cpumask);
/* cpumask is never released. */
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg0")
int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
{
/* NULL passed to KF_TRUSTED_ARGS kfunc. */
bpf_cpumask_empty(NULL);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "cpumask_common.h"
char _license[] SEC("license") = "GPL";
int pid, nr_cpus;
static bool is_test_task(void)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
return pid == cur_pid;
}
static bool create_cpumask_set(struct bpf_cpumask **out1,
struct bpf_cpumask **out2,
struct bpf_cpumask **out3,
struct bpf_cpumask **out4)
{
struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;
mask1 = create_cpumask();
if (!mask1)
return false;
mask2 = create_cpumask();
if (!mask2) {
bpf_cpumask_release(mask1);
err = 3;
return false;
}
mask3 = create_cpumask();
if (!mask3) {
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
err = 4;
return false;
}
mask4 = create_cpumask();
if (!mask4) {
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(mask3);
err = 5;
return false;
}
*out1 = mask1;
*out2 = mask2;
*out3 = mask3;
*out4 = mask4;
return true;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
bpf_cpumask_set_cpu(0, cpumask);
if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
err = 3;
goto release_exit;
}
bpf_cpumask_clear_cpu(0, cpumask);
if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
err = 4;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
bpf_cpumask_setall(cpumask);
if (!bpf_cpumask_full(cast(cpumask))) {
err = 3;
goto release_exit;
}
bpf_cpumask_clear(cpumask);
if (!bpf_cpumask_empty(cast(cpumask))) {
err = 4;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
err = 3;
goto release_exit;
}
if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
err = 4;
goto release_exit;
}
bpf_cpumask_set_cpu(0, cpumask);
if (bpf_cpumask_first(cast(cpumask)) != 0) {
err = 5;
goto release_exit;
}
if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
err = 6;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
if (!is_test_task())
return 0;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
err = 3;
goto release_exit;
}
if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
err = 4;
goto release_exit;
}
if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
err = 5;
goto release_exit;
}
release_exit:
bpf_cpumask_release(cpumask);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
if (!is_test_task())
return 0;
if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
return 0;
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
err = 6;
goto release_exit;
}
if (!bpf_cpumask_empty(cast(dst1))) {
err = 7;
goto release_exit;
}
bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
err = 8;
goto release_exit;
}
if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
err = 9;
goto release_exit;
}
bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
err = 10;
goto release_exit;
}
release_exit:
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(dst1);
bpf_cpumask_release(dst2);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
if (!is_test_task())
return 0;
if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
return 0;
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
err = 6;
goto release_exit;
}
bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
err = 7;
goto release_exit;
}
if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
err = 8;
goto release_exit;
}
if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
err = 9;
goto release_exit;
}
release_exit:
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(dst1);
bpf_cpumask_release(dst2);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
u32 cpu;
if (!is_test_task())
return 0;
if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
return 0;
bpf_cpumask_set_cpu(0, mask1);
bpf_cpumask_set_cpu(1, mask2);
bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
cpu = bpf_cpumask_any(cast(mask1));
if (cpu != 0) {
err = 6;
goto release_exit;
}
cpu = bpf_cpumask_any(cast(dst2));
if (cpu < nr_cpus) {
err = 7;
goto release_exit;
}
bpf_cpumask_copy(dst2, cast(dst1));
if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
err = 8;
goto release_exit;
}
cpu = bpf_cpumask_any(cast(dst2));
if (cpu > 1) {
err = 9;
goto release_exit;
}
cpu = bpf_cpumask_any_and(cast(mask1), cast(mask2));
if (cpu < nr_cpus) {
err = 10;
goto release_exit;
}
release_exit:
bpf_cpumask_release(mask1);
bpf_cpumask_release(mask2);
bpf_cpumask_release(dst1);
bpf_cpumask_release(dst2);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask))
err = 3;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask)) {
err = 3;
return 0;
}
v = cpumask_map_value_lookup();
if (!v) {
err = 4;
return 0;
}
cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
if (cpumask)
bpf_cpumask_release(cpumask);
else
err = 5;
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask();
if (!cpumask)
return 0;
if (cpumask_map_insert(cpumask)) {
err = 3;
return 0;
}
v = cpumask_map_value_lookup();
if (!v) {
err = 4;
return 0;
}
cpumask = bpf_cpumask_kptr_get(&v->cpumask);
if (cpumask)
bpf_cpumask_release(cpumask);
else
err = 5;
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#ifndef _NESTED_TRUST_COMMON_H
#define _NESTED_TRUST_COMMON_H
#include <stdbool.h>
bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
#endif /* _NESTED_TRUST_COMMON_H */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "nested_trust_common.h"
char _license[] SEC("license") = "GPL";
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
* TP_PROTO(struct task_struct *p, u64 clone_flags)
*/
SEC("tp_btf/task_newtask")
__failure __msg("R2 must be referenced or trusted")
int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_test_cpu(0, task->user_cpus_ptr);
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc")
int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_first_zero(&task->cpus_mask);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "nested_trust_common.h"
char _license[] SEC("license") = "GPL";
SEC("tp_btf/task_newtask")
__success
int BPF_PROG(test_read_cpumask, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_test_cpu(0, task->cpus_ptr);
return 0;
}
......@@ -28,7 +28,7 @@ static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *ta
}
SEC("tp_btf/task_newtask")
__failure __msg("R1 must be referenced or trusted")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
......@@ -86,7 +86,7 @@ int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 cl
SEC("tp_btf/task_newtask")
__failure __msg("arg#0 pointer type STRUCT task_struct must point")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags)
{
struct task_struct *acquired;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment