Commit d0858cbd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'overflow-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull overflow updates from Kees Cook:
 "These changes come in roughly two halves: support of Gustavo A. R.
  Silva's struct_size() work via additional helpers for catching
  overflow allocation size calculations, and conversions of selftests to
  KUnit (which includes some tweaks for UML + Clang):

   - Convert overflow selftest to KUnit

   - Convert stackinit selftest to KUnit

   - Implement size_t saturating arithmetic helpers

   - Allow struct_size() to be used in initializers"

* tag 'overflow-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  lib: stackinit: Convert to KUnit
  um: Allow builds with Clang
  lib: overflow: Convert to Kunit
  overflow: Provide constant expression struct_size
  overflow: Implement size_t saturating arithmetic helpers
  test_overflow: Regularize test reporting output
parents 2142b7f0 02788ebc
......@@ -71,6 +71,9 @@ Instead, the 2-factor form of the allocator should be used::
foo = kmalloc_array(count, size, GFP_KERNEL);
Specifically, kmalloc() can be replaced with kmalloc_array(), and
kzalloc() can be replaced with kcalloc().
If no 2-factor form is available, the saturate-on-overflow helpers should
be used::
......@@ -91,9 +94,20 @@ Instead, use the helper::
array usage and switch to a `flexible array member
<#zero-length-and-one-element-arrays>`_ instead.
See array_size(), array3_size(), and struct_size(),
for more details as well as the related check_add_overflow() and
check_mul_overflow() family of functions.
For other calculations, please compose the use of the size_mul(),
size_add(), and size_sub() helpers. For example, in the case of::
foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL);
Instead, use the helpers::
foo = krealloc(size_add(current_size,
size_mul(chunk_size,
size_sub(count, 3))), GFP_KERNEL);
For more details, also see array3_size() and flex_array_size(),
as well as the related check_mul_overflow(), check_add_overflow(),
check_sub_overflow(), and check_shl_overflow() family of functions.
simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull()
----------------------------------------------------------------------
......
......@@ -93,6 +93,7 @@ int execvp_noalloc(char *buf, const char *file, char *const argv[])
up finding no executable we can use, we want to diagnose
that we did find one but were denied access. */
got_eacces = 1;
break;
case ENOENT:
case ESTALE:
case ENOTDIR:
......
......@@ -8,12 +8,11 @@
#define __FRAME_OFFSETS
#include <linux/ptrace.h>
#include <asm/types.h>
#include <linux/kbuild.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define DEFINE_LONGS(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long)))
#define DEFINE_LONGS(sym, val) \
COMMENT(#val " / sizeof(unsigned long)"); \
DEFINE(sym, val / sizeof(unsigned long))
void foo(void)
{
......
......@@ -4,6 +4,7 @@
#include <linux/compiler.h>
#include <linux/limits.h>
#include <linux/const.h>
/*
* We need to compute the minimum and maximum values representable in a given
......@@ -118,81 +119,94 @@ static inline bool __must_check __must_check_overflow(bool overflow)
}))
/**
* array_size() - Calculate size of 2-dimensional array.
*
* @a: dimension one
* @b: dimension two
* size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
*
* Calculates size of 2-dimensional array: @a * @b.
* @factor1: first factor
* @factor2: second factor
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
* Returns: calculate @factor1 * @factor2, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. The
* lvalue must be size_t to avoid implicit type conversion.
*/
static inline __must_check size_t array_size(size_t a, size_t b)
static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
if (check_mul_overflow(factor1, factor2, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* array3_size() - Calculate size of 3-dimensional array.
* size_add() - Calculate size_t addition with saturation at SIZE_MAX
*
* @a: dimension one
* @b: dimension two
* @c: dimension three
*
* Calculates size of 3-dimensional array: @a * @b * @c.
* @addend1: first addend
* @addend2: second addend
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
* Returns: calculate @addend1 + @addend2, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. The
* lvalue must be size_t to avoid implicit type conversion.
*/
static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
static inline size_t __must_check size_add(size_t addend1, size_t addend2)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_mul_overflow(bytes, c, &bytes))
if (check_add_overflow(addend1, addend2, &bytes))
return SIZE_MAX;
return bytes;
}
/*
* Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for
* struct_size() below.
/**
* size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
*
* @minuend: value to subtract from
* @subtrahend: value to subtract from @minuend
*
* Returns: calculate @minuend - @subtrahend, both promoted to size_t,
* with any overflow causing the return value to be SIZE_MAX. For
* composition with the size_add() and size_mul() helpers, neither
* argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
* The lvalue must be size_t to avoid implicit type conversion.
*/
static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_add_overflow(bytes, c, &bytes))
if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
check_sub_overflow(minuend, subtrahend, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* struct_size() - Calculate size of structure with trailing array.
* @p: Pointer to the structure.
* @member: Name of the array member.
* @count: Number of elements in the array.
* array_size() - Calculate size of 2-dimensional array.
*
* Calculates size of memory needed for structure @p followed by an
* array of @count number of @member elements.
* @a: dimension one
* @b: dimension two
*
* Return: number of bytes needed or SIZE_MAX on overflow.
* Calculates size of 2-dimensional array: @a * @b.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
#define struct_size(p, member, count) \
__ab_c_size(count, \
sizeof(*(p)->member) + __must_be_array((p)->member),\
sizeof(*(p)))
#define array_size(a, b) size_mul(a, b)
/**
* array3_size() - Calculate size of 3-dimensional array.
*
* @a: dimension one
* @b: dimension two
* @c: dimension three
*
* Calculates size of 3-dimensional array: @a * @b * @c.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
/**
* flex_array_size() - Calculate size of a flexible array member
......@@ -208,7 +222,25 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define flex_array_size(p, member, count) \
array_size(count, \
sizeof(*(p)->member) + __must_be_array((p)->member))
__builtin_choose_expr(__is_constexpr(count), \
(count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
/**
* struct_size() - Calculate size of structure with trailing flexible array.
*
* @p: Pointer to the structure.
* @member: Name of the array member.
* @count: Number of elements in the array.
*
* Calculates size of memory needed for structure @p followed by an
* array of @count number of @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define struct_size(p, member, count) \
__builtin_choose_expr(__is_constexpr(count), \
sizeof(*(p)) + flex_array_size(p, member, count), \
size_add(sizeof(*(p)), flex_array_size(p, member, count)))
#endif /* __LINUX_OVERFLOW_H */
......@@ -2214,9 +2214,6 @@ config TEST_UUID
config TEST_XARRAY
tristate "Test the XArray code at runtime"
config TEST_OVERFLOW
tristate "Test check_*_overflow() functions at runtime"
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
help
......@@ -2501,6 +2498,30 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
config OVERFLOW_KUNIT_TEST
tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Builds unit tests for the check_*_overflow(), size_*(), allocation, and
related functions.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
config STACKINIT_KUNIT_TEST
tristate "Test level of stack variable initialization" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Test if the kernel is zero-initializing stack variables and
padding. Coverage is controlled by compiler flags,
CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO,
CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
config TEST_UDELAY
tristate "udelay test driver"
help
......@@ -2592,17 +2613,6 @@ config TEST_OBJAGG
Enable this option to test object aggregation manager on boot
(or module load).
config TEST_STACKINIT
tristate "Test level of stack variable initialization"
help
Test if the kernel is zero-initializing stack variables and
padding. Coverage is controlled by compiler flags,
CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
If unsure, say N.
config TEST_MEMINIT
tristate "Test heap/page initialization"
help
......
......@@ -77,7 +77,6 @@ obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
......@@ -94,8 +93,6 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
CFLAGS_test_stackinit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
......@@ -363,6 +360,9 @@ obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Test cases for arithmetic overflow checks.
* Test cases for arithmetic overflow checks. See:
* https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests
* ./tools/testing/kunit/kunit.py run overflow [--raw_output]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
......@@ -19,7 +21,7 @@
t a, b; \
t sum, diff, prod; \
bool s_of, d_of, p_of; \
} t ## _tests[] __initconst
} t ## _tests[]
DEFINE_TEST_ARRAY(u8) = {
{0, 0, 0, 0, 0, false, false, false},
......@@ -220,43 +222,31 @@ DEFINE_TEST_ARRAY(s64) = {
bool _of; \
\
_of = check_ ## op ## _overflow(a, b, &_r); \
if (_of != of) { \
pr_warn("expected "fmt" "sym" "fmt \
" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
err = 1; \
} \
if (_r != r) { \
pr_warn("expected "fmt" "sym" "fmt" == " \
fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
err = 1; \
} \
KUNIT_EXPECT_EQ_MSG(test, _of, of, \
"expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
KUNIT_EXPECT_EQ_MSG(test, _r, r, \
"expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
} while (0)
#define DEFINE_TEST_FUNC(t, fmt) \
static int __init do_test_ ## t(const struct test_ ## t *p) \
static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \
{ \
int err = 0; \
\
check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
\
return err; \
} \
\
static int __init test_ ## t ## _overflow(void) { \
int err = 0; \
static void t ## _overflow_test(struct kunit *test) { \
unsigned i; \
\
pr_info("%-3s: %zu arithmetic tests\n", #t, \
ARRAY_SIZE(t ## _tests)); \
for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
err |= do_test_ ## t(&t ## _tests[i]); \
return err; \
do_test_ ## t(test, &t ## _tests[i]); \
kunit_info(test, "%zu %s arithmetic tests finished\n", \
ARRAY_SIZE(t ## _tests), #t); \
}
DEFINE_TEST_FUNC(u8, "%d");
......@@ -270,199 +260,176 @@ DEFINE_TEST_FUNC(u64, "%llu");
DEFINE_TEST_FUNC(s64, "%lld");
#endif
static int __init test_overflow_calculation(void)
{
int err = 0;
err |= test_u8_overflow();
err |= test_s8_overflow();
err |= test_u16_overflow();
err |= test_s16_overflow();
err |= test_u32_overflow();
err |= test_s32_overflow();
#if BITS_PER_LONG == 64
err |= test_u64_overflow();
err |= test_s64_overflow();
#endif
return err;
}
static int __init test_overflow_shift(void)
static void overflow_shift_test(struct kunit *test)
{
int err = 0;
int count = 0;
/* Args are: value, shift, type, expected result, overflow expected */
#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \
int __failed = 0; \
#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \
typeof(a) __a = (a); \
typeof(s) __s = (s); \
t __e = (expect); \
t __d; \
bool __of = check_shl_overflow(__a, __s, &__d); \
if (__of != of) { \
pr_warn("expected (%s)(%s << %s) to%s overflow\n", \
KUNIT_EXPECT_EQ_MSG(test, __of, of, \
"expected (%s)(%s << %s) to%s overflow\n", \
#t, #a, #s, of ? "" : " not"); \
__failed = 1; \
} else if (!__of && __d != __e) { \
pr_warn("expected (%s)(%s << %s) == %s\n", \
KUNIT_EXPECT_EQ_MSG(test, __d, __e, \
"expected (%s)(%s << %s) == %s\n", \
#t, #a, #s, #expect); \
if ((t)-1 < 0) \
pr_warn("got %lld\n", (s64)__d); \
kunit_info(test, "got %lld\n", (s64)__d); \
else \
pr_warn("got %llu\n", (u64)__d); \
__failed = 1; \
kunit_info(test, "got %llu\n", (u64)__d); \
} \
if (!__failed) \
pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \
of ? "overflow" : #expect); \
__failed; \
})
count++; \
} while (0)
/* Sane shifts. */
err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64,
0xFFFFFFFFULL << 32, false);
TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false);
/* Sane shift: start and end with 0, without a too-wide shift. */
err |= TEST_ONE_SHIFT(0, 7, u8, 0, false);
err |= TEST_ONE_SHIFT(0, 15, u16, 0, false);
err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
err |= TEST_ONE_SHIFT(0, 31, u32, 0, false);
err |= TEST_ONE_SHIFT(0, 63, u64, 0, false);
TEST_ONE_SHIFT(0, 7, u8, 0, false);
TEST_ONE_SHIFT(0, 15, u16, 0, false);
TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
TEST_ONE_SHIFT(0, 31, u32, 0, false);
TEST_ONE_SHIFT(0, 63, u64, 0, false);
/* Sane shift: start and end with 0, without reaching signed bit. */
err |= TEST_ONE_SHIFT(0, 6, s8, 0, false);
err |= TEST_ONE_SHIFT(0, 14, s16, 0, false);
err |= TEST_ONE_SHIFT(0, 30, int, 0, false);
err |= TEST_ONE_SHIFT(0, 30, s32, 0, false);
err |= TEST_ONE_SHIFT(0, 62, s64, 0, false);
TEST_ONE_SHIFT(0, 6, s8, 0, false);
TEST_ONE_SHIFT(0, 14, s16, 0, false);
TEST_ONE_SHIFT(0, 30, int, 0, false);
TEST_ONE_SHIFT(0, 30, s32, 0, false);
TEST_ONE_SHIFT(0, 62, s64, 0, false);
/* Overflow: shifted the bit off the end. */
err |= TEST_ONE_SHIFT(1, 8, u8, 0, true);
err |= TEST_ONE_SHIFT(1, 16, u16, 0, true);
err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
err |= TEST_ONE_SHIFT(1, 32, u32, 0, true);
err |= TEST_ONE_SHIFT(1, 64, u64, 0, true);
TEST_ONE_SHIFT(1, 8, u8, 0, true);
TEST_ONE_SHIFT(1, 16, u16, 0, true);
TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
TEST_ONE_SHIFT(1, 32, u32, 0, true);
TEST_ONE_SHIFT(1, 64, u64, 0, true);
/* Overflow: shifted into the signed bit. */
err |= TEST_ONE_SHIFT(1, 7, s8, 0, true);
err |= TEST_ONE_SHIFT(1, 15, s16, 0, true);
err |= TEST_ONE_SHIFT(1, 31, int, 0, true);
err |= TEST_ONE_SHIFT(1, 31, s32, 0, true);
err |= TEST_ONE_SHIFT(1, 63, s64, 0, true);
TEST_ONE_SHIFT(1, 7, s8, 0, true);
TEST_ONE_SHIFT(1, 15, s16, 0, true);
TEST_ONE_SHIFT(1, 31, int, 0, true);
TEST_ONE_SHIFT(1, 31, s32, 0, true);
TEST_ONE_SHIFT(1, 63, s64, 0, true);
/* Overflow: high bit falls off unsigned types. */
/* 10010110 */
err |= TEST_ONE_SHIFT(150, 1, u8, 0, true);
TEST_ONE_SHIFT(150, 1, u8, 0, true);
/* 1000100010010110 */
err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true);
TEST_ONE_SHIFT(34966, 1, u16, 0, true);
/* 10000100000010001000100010010110 */
err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
/* 1000001000010000010000000100000010000100000010001000100010010110 */
err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
/* Overflow: bit shifted into signed bit on signed types. */
/* 01001011 */
err |= TEST_ONE_SHIFT(75, 1, s8, 0, true);
TEST_ONE_SHIFT(75, 1, s8, 0, true);
/* 0100010001001011 */
err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true);
TEST_ONE_SHIFT(17483, 1, s16, 0, true);
/* 01000010000001000100010001001011 */
err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
/* 0100000100001000001000000010000001000010000001000100010001001011 */
err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
/* Overflow: bit shifted past signed bit on signed types. */
/* 01001011 */
err |= TEST_ONE_SHIFT(75, 2, s8, 0, true);
TEST_ONE_SHIFT(75, 2, s8, 0, true);
/* 0100010001001011 */
err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true);
TEST_ONE_SHIFT(17483, 2, s16, 0, true);
/* 01000010000001000100010001001011 */
err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
/* 0100000100001000001000000010000001000010000001000100010001001011 */
err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
/* Overflow: values larger than destination type. */
err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
/* Nonsense: negative initial value. */
err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true);
err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true);
err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true);
err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true);
err |= TEST_ONE_SHIFT(-10, 0, int, 0, true);
err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true);
err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true);
err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
TEST_ONE_SHIFT(-1, 0, s8, 0, true);
TEST_ONE_SHIFT(-1, 0, u8, 0, true);
TEST_ONE_SHIFT(-5, 0, s16, 0, true);
TEST_ONE_SHIFT(-5, 0, u16, 0, true);
TEST_ONE_SHIFT(-10, 0, int, 0, true);
TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
TEST_ONE_SHIFT(-100, 0, s32, 0, true);
TEST_ONE_SHIFT(-100, 0, u32, 0, true);
TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
/* Nonsense: negative shift values. */
err |= TEST_ONE_SHIFT(0, -5, s8, 0, true);
err |= TEST_ONE_SHIFT(0, -5, u8, 0, true);
err |= TEST_ONE_SHIFT(0, -10, s16, 0, true);
err |= TEST_ONE_SHIFT(0, -10, u16, 0, true);
err |= TEST_ONE_SHIFT(0, -15, int, 0, true);
err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
err |= TEST_ONE_SHIFT(0, -20, s32, 0, true);
err |= TEST_ONE_SHIFT(0, -20, u32, 0, true);
err |= TEST_ONE_SHIFT(0, -30, s64, 0, true);
err |= TEST_ONE_SHIFT(0, -30, u64, 0, true);
TEST_ONE_SHIFT(0, -5, s8, 0, true);
TEST_ONE_SHIFT(0, -5, u8, 0, true);
TEST_ONE_SHIFT(0, -10, s16, 0, true);
TEST_ONE_SHIFT(0, -10, u16, 0, true);
TEST_ONE_SHIFT(0, -15, int, 0, true);
TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
TEST_ONE_SHIFT(0, -20, s32, 0, true);
TEST_ONE_SHIFT(0, -20, u32, 0, true);
TEST_ONE_SHIFT(0, -30, s64, 0, true);
TEST_ONE_SHIFT(0, -30, u64, 0, true);
/* Overflow: shifted at or beyond entire type's bit width. */
err |= TEST_ONE_SHIFT(0, 8, u8, 0, true);
err |= TEST_ONE_SHIFT(0, 9, u8, 0, true);
err |= TEST_ONE_SHIFT(0, 8, s8, 0, true);
err |= TEST_ONE_SHIFT(0, 9, s8, 0, true);
err |= TEST_ONE_SHIFT(0, 16, u16, 0, true);
err |= TEST_ONE_SHIFT(0, 17, u16, 0, true);
err |= TEST_ONE_SHIFT(0, 16, s16, 0, true);
err |= TEST_ONE_SHIFT(0, 17, s16, 0, true);
err |= TEST_ONE_SHIFT(0, 32, u32, 0, true);
err |= TEST_ONE_SHIFT(0, 33, u32, 0, true);
err |= TEST_ONE_SHIFT(0, 32, int, 0, true);
err |= TEST_ONE_SHIFT(0, 33, int, 0, true);
err |= TEST_ONE_SHIFT(0, 32, s32, 0, true);
err |= TEST_ONE_SHIFT(0, 33, s32, 0, true);
err |= TEST_ONE_SHIFT(0, 64, u64, 0, true);
err |= TEST_ONE_SHIFT(0, 65, u64, 0, true);
err |= TEST_ONE_SHIFT(0, 64, s64, 0, true);
err |= TEST_ONE_SHIFT(0, 65, s64, 0, true);
TEST_ONE_SHIFT(0, 8, u8, 0, true);
TEST_ONE_SHIFT(0, 9, u8, 0, true);
TEST_ONE_SHIFT(0, 8, s8, 0, true);
TEST_ONE_SHIFT(0, 9, s8, 0, true);
TEST_ONE_SHIFT(0, 16, u16, 0, true);
TEST_ONE_SHIFT(0, 17, u16, 0, true);
TEST_ONE_SHIFT(0, 16, s16, 0, true);
TEST_ONE_SHIFT(0, 17, s16, 0, true);
TEST_ONE_SHIFT(0, 32, u32, 0, true);
TEST_ONE_SHIFT(0, 33, u32, 0, true);
TEST_ONE_SHIFT(0, 32, int, 0, true);
TEST_ONE_SHIFT(0, 33, int, 0, true);
TEST_ONE_SHIFT(0, 32, s32, 0, true);
TEST_ONE_SHIFT(0, 33, s32, 0, true);
TEST_ONE_SHIFT(0, 64, u64, 0, true);
TEST_ONE_SHIFT(0, 65, u64, 0, true);
TEST_ONE_SHIFT(0, 64, s64, 0, true);
TEST_ONE_SHIFT(0, 65, s64, 0, true);
/*
* Corner case: for unsigned types, we fail when we've shifted
......@@ -473,13 +440,14 @@ static int __init test_overflow_shift(void)
* signed bit). So, for now, we will test this condition but
* mark it as not expected to overflow.
*/
err |= TEST_ONE_SHIFT(0, 7, s8, 0, false);
err |= TEST_ONE_SHIFT(0, 15, s16, 0, false);
err |= TEST_ONE_SHIFT(0, 31, int, 0, false);
err |= TEST_ONE_SHIFT(0, 31, s32, 0, false);
err |= TEST_ONE_SHIFT(0, 63, s64, 0, false);
return err;
TEST_ONE_SHIFT(0, 7, s8, 0, false);
TEST_ONE_SHIFT(0, 15, s16, 0, false);
TEST_ONE_SHIFT(0, 31, int, 0, false);
TEST_ONE_SHIFT(0, 31, s32, 0, false);
TEST_ONE_SHIFT(0, 63, s64, 0, false);
kunit_info(test, "%d shift tests finished\n", count);
#undef TEST_ONE_SHIFT
}
/*
......@@ -499,7 +467,7 @@ static int __init test_overflow_shift(void)
#define TEST_SIZE (5 * 4096)
#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
static int __init test_ ## func (void *arg) \
static void test_ ## func (struct kunit *test, void *arg) \
{ \
volatile size_t a = TEST_SIZE; \
volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
......@@ -507,31 +475,24 @@ static int __init test_ ## func (void *arg) \
\
/* Tiny allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
if (!ptr) { \
pr_warn(#func " failed regular allocation?!\n"); \
return 1; \
} \
KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
#func " failed regular allocation?!\n"); \
free ## want_arg (free_func, arg, ptr); \
\
/* Wrapped allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
a * b); \
if (!ptr) { \
pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \
return 1; \
} \
KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
#func " unexpectedly failed bad wrapping?!\n"); \
free ## want_arg (free_func, arg, ptr); \
\
/* Saturated allocation test. */ \
ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
array_size(a, b)); \
if (ptr) { \
pr_warn(#func " missed saturation!\n"); \
KUNIT_FAIL(test, #func " missed saturation!\n"); \
free ## want_arg (free_func, arg, ptr); \
return 1; \
} \
pr_info(#func " detected saturation\n"); \
return 0; \
}
/*
......@@ -544,10 +505,7 @@ DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0);
DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1);
DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0);
DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1);
DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
......@@ -555,60 +513,158 @@ DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
static int __init test_overflow_allocation(void)
static void overflow_allocation_test(struct kunit *test)
{
const char device_name[] = "overflow-test";
struct device *dev;
int err = 0;
int count = 0;
#define check_allocation_overflow(alloc) do { \
count++; \
test_ ## alloc(test, dev); \
} while (0)
/* Create dummy device for devm_kmalloc()-family tests. */
dev = root_device_register(device_name);
if (IS_ERR(dev)) {
pr_warn("Cannot register test device\n");
return 1;
}
err |= test_kmalloc(NULL);
err |= test_kmalloc_node(NULL);
err |= test_kzalloc(NULL);
err |= test_kzalloc_node(NULL);
err |= test_kvmalloc(NULL);
err |= test_kvmalloc_node(NULL);
err |= test_kvzalloc(NULL);
err |= test_kvzalloc_node(NULL);
err |= test_vmalloc(NULL);
err |= test_vmalloc_node(NULL);
err |= test_vzalloc(NULL);
err |= test_vzalloc_node(NULL);
err |= test_devm_kmalloc(dev);
err |= test_devm_kzalloc(dev);
KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
"Cannot register test device\n");
check_allocation_overflow(kmalloc);
check_allocation_overflow(kmalloc_node);
check_allocation_overflow(kzalloc);
check_allocation_overflow(kzalloc_node);
check_allocation_overflow(__vmalloc);
check_allocation_overflow(kvmalloc);
check_allocation_overflow(kvmalloc_node);
check_allocation_overflow(kvzalloc);
check_allocation_overflow(kvzalloc_node);
check_allocation_overflow(devm_kmalloc);
check_allocation_overflow(devm_kzalloc);
device_unregister(dev);
return err;
kunit_info(test, "%d allocation overflow tests finished\n", count);
#undef check_allocation_overflow
}
static int __init test_module_init(void)
struct __test_flex_array {
unsigned long flags;
size_t count;
unsigned long data[];
};
static void overflow_size_helpers_test(struct kunit *test)
{
int err = 0;
/* Make sure struct_size() can be used in a constant expression. */
u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)];
struct __test_flex_array *obj;
int count = 0;
int var;
volatile int unconst = 0;
/* Verify constant expression against runtime version. */
var = 55;
OPTIMIZER_HIDE_VAR(var);
KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var));
#define check_one_size_helper(expected, func, args...) do { \
size_t _r = func(args); \
KUNIT_EXPECT_EQ_MSG(test, _r, expected, \
"expected " #func "(" #args ") to return %zu but got %zu instead\n", \
(size_t)(expected), _r); \
count++; \
} while (0)
err |= test_overflow_calculation();
err |= test_overflow_shift();
err |= test_overflow_allocation();
var = 4;
check_one_size_helper(20, size_mul, var++, 5);
check_one_size_helper(20, size_mul, 4, var++);
check_one_size_helper(0, size_mul, 0, 3);
check_one_size_helper(0, size_mul, 3, 0);
check_one_size_helper(6, size_mul, 2, 3);
check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1);
check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3);
check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3);
var = 4;
check_one_size_helper(9, size_add, var++, 5);
check_one_size_helper(9, size_add, 4, var++);
check_one_size_helper(9, size_add, 9, 0);
check_one_size_helper(9, size_add, 0, 9);
check_one_size_helper(5, size_add, 2, 3);
check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1);
check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3);
check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3);
var = 4;
check_one_size_helper(1, size_sub, var--, 3);
check_one_size_helper(1, size_sub, 4, var--);
check_one_size_helper(1, size_sub, 3, 2);
check_one_size_helper(9, size_sub, 9, 0);
check_one_size_helper(SIZE_MAX, size_sub, 9, -3);
check_one_size_helper(SIZE_MAX, size_sub, 0, 9);
check_one_size_helper(SIZE_MAX, size_sub, 2, 3);
check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0);
check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10);
check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX);
check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX);
check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1);
check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3);
check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3);
var = 4;
check_one_size_helper(4 * sizeof(*obj->data),
flex_array_size, obj, data, var++);
check_one_size_helper(5 * sizeof(*obj->data),
flex_array_size, obj, data, var++);
check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst);
check_one_size_helper(sizeof(*obj->data),
flex_array_size, obj, data, 1 + unconst);
check_one_size_helper(7 * sizeof(*obj->data),
flex_array_size, obj, data, 7 + unconst);
check_one_size_helper(SIZE_MAX,
flex_array_size, obj, data, -1 + unconst);
check_one_size_helper(SIZE_MAX,
flex_array_size, obj, data, SIZE_MAX - 4 + unconst);
var = 4;
check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)),
struct_size, obj, data, var++);
check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)),
struct_size, obj, data, var++);
check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst);
check_one_size_helper(sizeof(*obj) + sizeof(*obj->data),
struct_size, obj, data, 1 + unconst);
check_one_size_helper(SIZE_MAX,
struct_size, obj, data, -3 + unconst);
check_one_size_helper(SIZE_MAX,
struct_size, obj, data, SIZE_MAX - 3 + unconst);
kunit_info(test, "%d overflow size helper tests finished\n", count);
#undef check_one_size_helper
}
if (err) {
pr_warn("FAIL!\n");
err = -EINVAL;
} else {
pr_info("all tests passed\n");
}
static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(u8_overflow_test),
KUNIT_CASE(s8_overflow_test),
KUNIT_CASE(u16_overflow_test),
KUNIT_CASE(s16_overflow_test),
KUNIT_CASE(u32_overflow_test),
KUNIT_CASE(s32_overflow_test),
#if BITS_PER_LONG == 64
KUNIT_CASE(u64_overflow_test),
KUNIT_CASE(s64_overflow_test),
#endif
KUNIT_CASE(overflow_shift_test),
KUNIT_CASE(overflow_allocation_test),
KUNIT_CASE(overflow_size_helpers_test),
{}
};
return err;
}
static struct kunit_suite overflow_test_suite = {
.name = "overflow",
.test_cases = overflow_test_cases,
};
static void __exit test_module_exit(void)
{ }
kunit_test_suite(overflow_test_suite);
module_init(test_module_init);
module_exit(test_module_exit);
MODULE_LICENSE("Dual MIT/GPL");
......@@ -2,76 +2,21 @@
/*
* Test cases for compiler-based stack variable zeroing via
* -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
* For example, see:
* https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests
* ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
* --make_option LLVM=1 \
* --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y
*
* External build example:
* clang -O2 -Wall -ftrivial-auto-var-init=pattern \
* -o test_stackinit test_stackinit.c
*/
#ifdef __KERNEL__
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#else
/* Userspace headers. */
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include <sys/types.h>
/* Linux kernel-ism stubs for stand-alone userspace build. */
#define KBUILD_MODNAME "stackinit"
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define pr_err(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_warn(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) fprintf(stdout, pr_fmt(fmt), ##__VA_ARGS__)
#define __init /**/
#define __exit /**/
#define __user /**/
#define noinline __attribute__((__noinline__))
#define __aligned(x) __attribute__((__aligned__(x)))
#ifdef __clang__
# define __compiletime_error(message) /**/
#else
# define __compiletime_error(message) __attribute__((__error__(message)))
#endif
#define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
extern void prefix ## suffix(void) __compiletime_error(msg); \
if (!(condition)) \
prefix ## suffix(); \
} while (0)
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
#define BUILD_BUG_ON(condition) \
BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
#define module_init(func) static int (*do_init)(void) = func
#define module_exit(func) static void (*do_exit)(void) = func
#define MODULE_LICENSE(str) int main(void) { \
int rc; \
/* License: str */ \
rc = do_init(); \
if (rc == 0) \
do_exit(); \
return rc; \
}
#endif /* __KERNEL__ */
/* Exfiltration buffer. */
#define MAX_VAR_SIZE 128
static u8 check_buf[MAX_VAR_SIZE];
......@@ -201,7 +146,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
*/
#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
/* Returns 0 on success, 1 on failure. */ \
static noinline __init int test_ ## name (void) \
static noinline void test_ ## name (struct kunit *test) \
{ \
var_type zero INIT_CLONE_ ## which; \
int ignored; \
......@@ -220,10 +165,8 @@ static noinline __init int test_ ## name (void) \
/* Verify all bytes overwritten with 0xFF. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] != 0xFF); \
if (sum) { \
pr_err(#name ": leaf fill was not 0xFF!?\n"); \
return 1; \
} \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
"leaf fill was not 0xFF!?\n"); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
......@@ -231,32 +174,29 @@ static noinline __init int test_ ## name (void) \
FETCH_ARG_ ## which(zero)); \
\
/* Validate that compiler lined up fill and target. */ \
if (!range_contains(fill_start, fill_size, \
target_start, target_size)) { \
pr_err(#name ": stack fill missed target!?\n"); \
pr_err(#name ": fill %zu wide\n", fill_size); \
pr_err(#name ": target offset by %d\n", \
(int)((ssize_t)(uintptr_t)fill_start - \
(ssize_t)(uintptr_t)target_start)); \
return 1; \
} \
KUNIT_ASSERT_TRUE_MSG(test, \
range_contains(fill_start, fill_size, \
target_start, target_size), \
"stack fill missed target!? " \
"(fill %zu wide, target offset by %d)\n", \
fill_size, \
(int)((ssize_t)(uintptr_t)fill_start - \
(ssize_t)(uintptr_t)target_start)); \
\
/* Look for any bytes still 0xFF in check region. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] == 0xFF); \
\
if (sum == 0) { \
pr_info(#name " ok\n"); \
return 0; \
} else { \
pr_warn(#name " %sFAIL (uninit bytes: %d)\n", \
(xfail) ? "X" : "", sum); \
return (xfail) ? 0 : 1; \
} \
if (sum != 0 && xfail) \
kunit_skip(test, \
"XFAIL uninit bytes: %d\n", \
sum); \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
"uninit bytes: %d\n", sum); \
}
#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \
static noinline DO_NOTHING_TYPE_ ## which(var_type) \
do_nothing_ ## name(var_type *ptr) \
{ \
/* Will always be true, but compiler doesn't know. */ \
......@@ -265,9 +205,8 @@ do_nothing_ ## name(var_type *ptr) \
else \
return DO_NOTHING_RETURN_ ## which(ptr + 1); \
} \
static noinline __init int leaf_ ## name(unsigned long sp, \
bool fill, \
var_type *arg) \
static noinline int leaf_ ## name(unsigned long sp, bool fill, \
var_type *arg) \
{ \
char buf[VAR_BUFFER]; \
var_type var \
......@@ -341,6 +280,27 @@ struct test_user {
unsigned long four;
};
#define ALWAYS_PASS WANT_SUCCESS
#define ALWAYS_FAIL XFAIL
#ifdef CONFIG_INIT_STACK_NONE
# define USER_PASS XFAIL
# define BYREF_PASS XFAIL
# define STRONG_PASS XFAIL
#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS XFAIL
# define STRONG_PASS XFAIL
#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS WANT_SUCCESS
# define STRONG_PASS XFAIL
#else
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS WANT_SUCCESS
# define STRONG_PASS WANT_SUCCESS
#endif
#define DEFINE_SCALAR_TEST(name, init, xfail) \
DEFINE_TEST(name ## _ ## init, name, SCALAR, \
init, xfail)
......@@ -364,27 +324,26 @@ struct test_user {
DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
DEFINE_STRUCT_TEST(packed, init, xfail)
#define DEFINE_STRUCT_INITIALIZER_TESTS(base) \
#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \
DEFINE_STRUCT_TESTS(base ## _ ## partial, \
WANT_SUCCESS); \
DEFINE_STRUCT_TESTS(base ## _ ## all, \
WANT_SUCCESS)
xfail); \
DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
/* These should be fully initialized all the time! */
DEFINE_SCALAR_TESTS(zero, WANT_SUCCESS);
DEFINE_STRUCT_TESTS(zero, WANT_SUCCESS);
DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
/* Struct initializers: padding may be left uninitialized. */
DEFINE_STRUCT_INITIALIZER_TESTS(static);
DEFINE_STRUCT_INITIALIZER_TESTS(dynamic);
DEFINE_STRUCT_INITIALIZER_TESTS(runtime);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic);
DEFINE_STRUCT_TESTS(assigned_copy, XFAIL);
DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
/* No initialization without compiler instrumentation. */
DEFINE_SCALAR_TESTS(none, WANT_SUCCESS);
DEFINE_STRUCT_TESTS(none, WANT_SUCCESS);
DEFINE_SCALAR_TESTS(none, STRONG_PASS);
DEFINE_STRUCT_TESTS(none, BYREF_PASS);
/* Initialization of members with __user attribute. */
DEFINE_TEST(user, struct test_user, STRUCT, none, WANT_SUCCESS);
DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS);
/*
* Check two uses through a variable declaration outside either path,
......@@ -398,7 +357,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
* This is intentionally unreachable. To silence the
* warning, build with -Wno-switch-unreachable
*/
uint64_t var;
uint64_t var[10];
case 1:
target_start = &var;
......@@ -423,19 +382,19 @@ static int noinline __leaf_switch_none(int path, bool fill)
memcpy(check_buf, target_start, target_size);
break;
default:
var = 5;
return var & forced_mask;
var[1] = 5;
return var[1] & forced_mask;
}
return 0;
}
static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill,
static noinline int leaf_switch_1_none(unsigned long sp, bool fill,
uint64_t *arg)
{
return __leaf_switch_none(1, fill);
}
static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
uint64_t *arg)
{
return __leaf_switch_none(2, fill);
......@@ -447,68 +406,56 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
* non-code areas (i.e. in a switch statement before the first "case").
* https://bugs.llvm.org/show_bug.cgi?id=44916
*/
DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, XFAIL);
DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, XFAIL);
static int __init test_stackinit_init(void)
{
unsigned int failures = 0;
#define test_scalars(init) do { \
failures += test_u8_ ## init (); \
failures += test_u16_ ## init (); \
failures += test_u32_ ## init (); \
failures += test_u64_ ## init (); \
failures += test_char_array_ ## init (); \
} while (0)
#define test_structs(init) do { \
failures += test_small_hole_ ## init (); \
failures += test_big_hole_ ## init (); \
failures += test_trailing_hole_ ## init (); \
failures += test_packed_ ## init (); \
} while (0)
DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
#define KUNIT_test_scalars(init) \
KUNIT_CASE(test_u8_ ## init), \
KUNIT_CASE(test_u16_ ## init), \
KUNIT_CASE(test_u32_ ## init), \
KUNIT_CASE(test_u64_ ## init), \
KUNIT_CASE(test_char_array_ ## init)
#define KUNIT_test_structs(init) \
KUNIT_CASE(test_small_hole_ ## init), \
KUNIT_CASE(test_big_hole_ ## init), \
KUNIT_CASE(test_trailing_hole_ ## init),\
KUNIT_CASE(test_packed_ ## init) \
static struct kunit_case stackinit_test_cases[] = {
/* These are explicitly initialized and should always pass. */
test_scalars(zero);
test_structs(zero);
KUNIT_test_scalars(zero),
KUNIT_test_structs(zero),
/* Padding here appears to be accidentally always initialized? */
test_structs(dynamic_partial);
test_structs(assigned_dynamic_partial);
KUNIT_test_structs(dynamic_partial),
KUNIT_test_structs(assigned_dynamic_partial),
/* Padding initialization depends on compiler behaviors. */
test_structs(static_partial);
test_structs(static_all);
test_structs(dynamic_all);
test_structs(runtime_partial);
test_structs(runtime_all);
test_structs(assigned_static_partial);
test_structs(assigned_static_all);
test_structs(assigned_dynamic_all);
KUNIT_test_structs(static_partial),
KUNIT_test_structs(static_all),
KUNIT_test_structs(dynamic_all),
KUNIT_test_structs(runtime_partial),
KUNIT_test_structs(runtime_all),
KUNIT_test_structs(assigned_static_partial),
KUNIT_test_structs(assigned_static_all),
KUNIT_test_structs(assigned_dynamic_all),
/* Everything fails this since it effectively performs a memcpy(). */
test_structs(assigned_copy);
KUNIT_test_structs(assigned_copy),
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
test_scalars(none);
failures += test_switch_1_none();
failures += test_switch_2_none();
KUNIT_test_scalars(none),
KUNIT_CASE(test_switch_1_none),
KUNIT_CASE(test_switch_2_none),
/* STRUCTLEAK_BYREF should cover from here down. */
test_structs(none);
KUNIT_test_structs(none),
/* STRUCTLEAK will only cover this. */
failures += test_user();
if (failures == 0)
pr_info("all tests passed!\n");
else
pr_err("failures: %u\n", failures);
KUNIT_CASE(test_user),
{}
};
return failures ? -EINVAL : 0;
}
module_init(test_stackinit_init);
static struct kunit_suite stackinit_test_suite = {
.name = "stackinit",
.test_cases = stackinit_test_cases,
};
static void __exit test_stackinit_exit(void)
{ }
module_exit(test_stackinit_exit);
kunit_test_suites(&stackinit_test_suite);
MODULE_LICENSE("GPL");
......@@ -10,6 +10,7 @@ CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu
CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
CLANG_TARGET_FLAGS_um := $(CLANG_TARGET_FLAGS_$(SUBARCH))
CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(SRCARCH))
ifeq ($(CROSS_COMPILE),)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment