Commit dc189b8e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2024-03-27-11-25' of...

Merge tag 'mm-hotfixes-stable-2024-03-27-11-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Various hotfixes. About half are cc:stable and the remainder address
  post-6.8 issues or aren't considered suitable for backporting.

  zswap figures prominently in the post-6.8 issues - folloup against the
  large amount of changes we have just made to that code.

  Apart from that, all over the map"

* tag 'mm-hotfixes-stable-2024-03-27-11-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits)
  crash: use macro to add crashk_res into iomem early for specific arch
  mm: zswap: fix data loss on SWP_SYNCHRONOUS_IO devices
  selftests/mm: fix ARM related issue with fork after pthread_create
  hexagon: vmlinux.lds.S: handle attributes section
  userfaultfd: fix deadlock warning when locking src and dst VMAs
  tmpfs: fix race on handling dquot rbtree
  selftests/mm: sigbus-wp test requires UFFD_FEATURE_WP_HUGETLBFS_SHMEM
  mm: zswap: fix writeback shinker GFP_NOIO/GFP_NOFS recursion
  ARM: prctl: reject PR_SET_MDWE on pre-ARMv6
  prctl: generalize PR_SET_MDWE support check to be per-arch
  MAINTAINERS: remove incorrect M: tag for dm-devel@lists.linux.dev
  mm: zswap: fix kernel BUG in sg_init_one
  selftests: mm: restore settings from only parent process
  tools/Makefile: remove cgroup target
  mm: cachestat: fix two shmem bugs
  mm: increase folio batch size
  mm,page_owner: fix recursion
  mailmap: update entry for Leonard Crestez
  init: open /initrd.image with O_LARGEFILE
  selftests/mm: Fix build with _FORTIFY_SOURCE
  ...
parents 96249052 32fbe524
......@@ -340,7 +340,8 @@ Lee Jones <lee@kernel.org> <joneslee@google.com>
Lee Jones <lee@kernel.org> <lee.jones@canonical.com>
Lee Jones <lee@kernel.org> <lee.jones@linaro.org>
Lee Jones <lee@kernel.org> <lee@ubuntu.com>
Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@nxp.com>
Leonard Crestez <cdleonard@gmail.com> <leonard.crestez@intel.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonard Göhrs <l.goehrs@pengutronix.de>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
......
......@@ -6173,7 +6173,6 @@ F: include/uapi/linux/dm-*.h
DEVICE-MAPPER VDO TARGET
M: Matthew Sakai <msakai@redhat.com>
M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
F: Documentation/admin-guide/device-mapper/vdo*.rst
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__
#include <asm/system_info.h>
#include <uapi/asm/mman.h>
static inline bool arch_memory_deny_write_exec_supported(void)
{
return cpu_architecture() >= CPU_ARCH_ARMv6;
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
#endif /* __ASM_MMAN_H__ */
......@@ -63,6 +63,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
.hexagon.attributes 0 : { *(.hexagon.attributes) }
DISCARDS
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__
#include <uapi/asm/mman.h>
/* PARISC cannot allow mdwe as it needs writable stacks */
static inline bool arch_memory_deny_write_exec_supported(void)
{
return false;
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
#endif /* __ASM_MMAN_H__ */
......@@ -39,4 +39,6 @@ static inline unsigned long crash_low_size_default(void)
#endif
}
#define HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
#endif /* _X86_CRASH_RESERVE_H */
......@@ -162,6 +162,14 @@ calc_vm_flag_bits(unsigned long flags)
unsigned long vm_commit_limit(void);
#ifndef arch_memory_deny_write_exec_supported
static inline bool arch_memory_deny_write_exec_supported(void)
{
return true;
}
#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
#endif
/*
* Denies creating a writable executable mapping or gaining executable permissions.
*
......
......@@ -11,8 +11,8 @@
#include <linux/types.h>
/* 15 pointers + header align the folio_batch structure to a power of two */
#define PAGEVEC_SIZE 15
/* 31 pointers + header align the folio_batch structure to a power of two */
#define PAGEVEC_SIZE 31
struct folio;
......
......@@ -682,7 +682,7 @@ static void __init populate_initrd_image(char *err)
printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
err);
file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
if (IS_ERR(file))
return;
......
......@@ -366,7 +366,9 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
insert_resource(&iomem_resource, &crashk_low_res);
#endif
#endif
return 0;
}
......@@ -448,8 +450,12 @@ void __init reserve_crashkernel_generic(char *cmdline,
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
insert_resource(&iomem_resource, &crashk_res);
#endif
}
#ifndef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
static __init int insert_crashkernel_resources(void)
{
if (crashk_res.start < crashk_res.end)
......@@ -462,3 +468,4 @@ static __init int insert_crashkernel_resources(void)
}
early_initcall(insert_crashkernel_resources);
#endif
#endif
......@@ -2408,8 +2408,11 @@ static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
return -EINVAL;
/* PARISC cannot allow mdwe as it needs writable stacks */
if (IS_ENABLED(CONFIG_PARISC))
/*
* EOPNOTSUPP might be more appropriate here in principle, but
* existing userspace depends on EINVAL specifically.
*/
if (!arch_memory_deny_write_exec_supported())
return -EINVAL;
current_bits = get_current_mdwe();
......
......@@ -4197,7 +4197,23 @@ static void filemap_cachestat(struct address_space *mapping,
/* shmem file - in swap cache */
swp_entry_t swp = radix_to_swp_entry(folio);
/* swapin error results in poisoned entry */
if (non_swap_entry(swp))
goto resched;
/*
* Getting a swap entry from the shmem
* inode means we beat
* shmem_unuse(). rcu_read_lock()
* ensures swapoff waits for us before
* freeing the swapper space. However,
* we can race with swapping and
* invalidation, so there might not be
* a shadow in the swapcache (yet).
*/
shadow = get_shadow_from_swap_cache(swp);
if (!shadow)
goto resched;
}
#endif
if (workingset_test_recent(shadow, true, &workingset))
......
......@@ -1536,7 +1536,9 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
VM_WARN_ON_ONCE(userfaultfd_wp(vma));
if (userfaultfd_pte_wp(vma, ptent))
zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
details, ptent);
ksm_might_unmap_zero_page(mm, ptent);
return 1;
}
......
......@@ -54,6 +54,22 @@ static depot_stack_handle_t early_handle;
static void init_early_allocated_pages(void);
static inline void set_current_in_page_owner(void)
{
/*
* Avoid recursion.
*
* We might need to allocate more memory from page_owner code, so make
* sure to signal it in order to avoid recursion.
*/
current->in_page_owner = 1;
}
static inline void unset_current_in_page_owner(void)
{
current->in_page_owner = 0;
}
static int __init early_page_owner_param(char *buf)
{
int ret = kstrtobool(buf, &page_owner_enabled);
......@@ -133,23 +149,16 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
depot_stack_handle_t handle;
unsigned int nr_entries;
/*
* Avoid recursion.
*
* Sometimes page metadata allocation tracking requires more
* memory to be allocated:
* - when new stack trace is saved to stack depot
*/
if (current->in_page_owner)
return dummy_handle;
current->in_page_owner = 1;
set_current_in_page_owner();
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;
unset_current_in_page_owner();
current->in_page_owner = 0;
return handle;
}
......@@ -164,9 +173,13 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
gfp_mask |= __GFP_NOWARN;
set_current_in_page_owner();
stack = kmalloc(sizeof(*stack), gfp_mask);
if (!stack)
if (!stack) {
unset_current_in_page_owner();
return;
}
unset_current_in_page_owner();
stack->stack_record = stack_record;
stack->next = NULL;
......
......@@ -116,7 +116,7 @@ static int shmem_free_file_info(struct super_block *sb, int type)
static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
{
struct mem_dqinfo *info = sb_dqinfo(sb, qid->type);
struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, *qid);
struct quota_info *dqopt = sb_dqopt(sb);
struct quota_id *entry = NULL;
......@@ -126,6 +126,7 @@ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
return -ESRCH;
down_read(&dqopt->dqio_sem);
node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
......@@ -165,7 +166,7 @@ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
static int shmem_acquire_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
struct rb_node **n = &((struct rb_root *)info->dqi_priv)->rb_node;
struct rb_node **n;
struct shmem_sb_info *sbinfo = dquot->dq_sb->s_fs_info;
struct rb_node *parent = NULL, *new_node = NULL;
struct quota_id *new_entry, *entry;
......@@ -176,6 +177,8 @@ static int shmem_acquire_dquot(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
down_write(&dqopt->dqio_sem);
n = &((struct rb_root *)info->dqi_priv)->rb_node;
while (*n) {
parent = *n;
entry = rb_entry(parent, struct quota_id, node);
......@@ -264,7 +267,7 @@ static bool shmem_is_empty_dquot(struct dquot *dquot)
static int shmem_release_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, dquot->dq_id);
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
struct quota_id *entry = NULL;
......@@ -275,6 +278,7 @@ static int shmem_release_dquot(struct dquot *dquot)
goto out_dqlock;
down_write(&dqopt->dqio_sem);
node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
......
......@@ -1444,7 +1444,8 @@ static int uffd_move_lock(struct mm_struct *mm,
*/
down_read(&(*dst_vmap)->vm_lock->lock);
if (*dst_vmap != *src_vmap)
down_read(&(*src_vmap)->vm_lock->lock);
down_read_nested(&(*src_vmap)->vm_lock->lock,
SINGLE_DEPTH_NESTING);
}
mmap_read_unlock(mm);
return err;
......
......@@ -1080,7 +1080,17 @@ static void zswap_decompress(struct zswap_entry *entry, struct page *page)
mutex_lock(&acomp_ctx->mutex);
src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
if (acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) {
/*
* If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
* to do crypto_acomp_decompress() which might sleep. In such cases, we must
* resort to copying the buffer to a temporary one.
* Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
* such as a kmap address of high memory or even ever a vmap address.
* However, sg_init_one is only equipped to handle linearly mapped low memory.
* In such cases, we also must copy the buffer to a temporary and lowmem one.
*/
if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
!virt_addr_valid(src)) {
memcpy(acomp_ctx->buffer, src, entry->length);
src = acomp_ctx->buffer;
zpool_unmap_handle(zpool, entry->handle);
......@@ -1094,7 +1104,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct page *page)
BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
mutex_unlock(&acomp_ctx->mutex);
if (!acomp_ctx->is_sleepable || zpool_can_sleep_mapped(zpool))
if (src != acomp_ctx->buffer)
zpool_unmap_handle(zpool, entry->handle);
}
......@@ -1313,6 +1323,14 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
return 0;
/*
* The shrinker resumes swap writeback, which will enter block
* and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
* rules (may_enter_fs()), which apply on a per-folio basis.
*/
if (!gfp_has_io_fs(sc->gfp_mask))
return 0;
#ifdef CONFIG_MEMCG_KMEM
mem_cgroup_flush_stats(memcg);
nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
......@@ -1618,6 +1636,7 @@ bool zswap_load(struct folio *folio)
swp_entry_t swp = folio->swap;
pgoff_t offset = swp_offset(swp);
struct page *page = &folio->page;
bool swapcache = folio_test_swapcache(folio);
struct zswap_tree *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
u8 *dst;
......@@ -1630,6 +1649,19 @@ bool zswap_load(struct folio *folio)
spin_unlock(&tree->lock);
return false;
}
/*
* When reading into the swapcache, invalidate our entry. The
* swapcache can be the authoritative owner of the page and
* its mappings, and the pressure that results from having two
* in-memory copies outweighs any benefits of caching the
* compression work.
*
* (Most swapins go through the swapcache. The notable
* exception is the singleton fault on SWP_SYNCHRONOUS_IO
* files, which reads into a private page and may free it if
* the fault fails. We remain the primary owner of the entry.)
*/
if (swapcache)
zswap_rb_erase(&tree->rbroot, entry);
spin_unlock(&tree->lock);
......@@ -1645,9 +1677,10 @@ bool zswap_load(struct folio *folio)
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);
if (swapcache) {
zswap_entry_free(entry);
folio_mark_dirty(folio);
}
return true;
}
......
......@@ -11,7 +11,6 @@ help:
@echo ''
@echo ' acpi - ACPI tools'
@echo ' bpf - misc BPF tools'
@echo ' cgroup - cgroup tools'
@echo ' counter - counter tools'
@echo ' cpupower - a tool for all things x86 CPU power'
@echo ' debugging - tools for debugging'
......@@ -69,7 +68,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
cgroup counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
$(call descend,$@)
bpf/%: FORCE
......@@ -116,7 +115,7 @@ freefall: FORCE
kvm_stat: FORCE
$(call descend,kvm/$@)
all: acpi cgroup counter cpupower gpio hv firewire \
all: acpi counter cpupower gpio hv firewire \
perf selftests bootconfig spi turbostat usb \
virtio mm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
......@@ -128,7 +127,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
$(call descend,$(@:_install=),install)
selftests_install:
......@@ -155,7 +154,7 @@ freefall_install:
kvm_stat_install:
$(call descend,kvm/$(@:_install=),install)
install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
install: acpi_install counter_install cpupower_install gpio_install \
hv_install firewire_install iio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install mm_install bpf_install x86_energy_perf_policy_install \
......@@ -169,7 +168,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
$(call descend,$(@:_clean=),clean)
libapi_clean:
......@@ -209,7 +208,7 @@ freefall_clean:
build_clean:
$(call descend,build,clean)
clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
clean: acpi_clean counter_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean \
......
......@@ -203,7 +203,7 @@ int main(int argc, char **argv)
ksft_print_header();
ksft_set_plan(nthreads);
filed = open(file, O_RDWR|O_CREAT);
filed = open(file, O_RDWR|O_CREAT, 0664);
if (filed < 0)
ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
......
......@@ -1745,8 +1745,11 @@ void pkey_setup_shadow(void)
shadow_pkey_reg = __read_pkey_reg();
}
pid_t parent_pid;
void restore_settings_atexit(void)
{
if (parent_pid == getpid())
cat_into_file(buf, "/proc/sys/vm/nr_hugepages");
}
......@@ -1773,6 +1776,7 @@ void save_settings(void)
exit(__LINE__);
}
parent_pid = getpid();
atexit(restore_settings_atexit);
close(fd);
}
......
......@@ -137,7 +137,7 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
if (!map)
ksft_exit_fail_msg("anon mmap failed\n");
} else {
test_fd = open(fname, O_RDWR | O_CREAT);
test_fd = open(fname, O_RDWR | O_CREAT, 0664);
if (test_fd < 0) {
ksft_test_result_skip("Test %s open() file failed\n", __func__);
return;
......
......@@ -223,7 +223,7 @@ void split_file_backed_thp(void)
ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n");
}
fd = open(testfile, O_CREAT|O_WRONLY);
fd = open(testfile, O_CREAT|O_WRONLY, 0664);
if (fd == -1) {
ksft_perror("Cannot open testing file");
goto cleanup;
......
......@@ -18,6 +18,7 @@ bool test_uffdio_wp = true;
unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
uffd_test_case_ops_t *uffd_test_case_ops;
atomic_bool ready_for_fork;
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
......@@ -518,6 +519,8 @@ void *uffd_poll_thread(void *arg)
pollfd[1].fd = pipefd[cpu*2];
pollfd[1].events = POLLIN;
ready_for_fork = true;
for (;;) {
ret = poll(pollfd, 2, -1);
if (ret <= 0) {
......
......@@ -32,6 +32,7 @@
#include <inttypes.h>
#include <stdint.h>
#include <sys/random.h>
#include <stdatomic.h>
#include "../kselftest.h"
#include "vm_util.h"
......@@ -103,6 +104,7 @@ extern bool map_shared;
extern bool test_uffdio_wp;
extern unsigned long long *count_verify;
extern volatile bool test_uffdio_copy_eexist;
extern atomic_bool ready_for_fork;
extern uffd_test_ops_t anon_uffd_test_ops;
extern uffd_test_ops_t shmem_uffd_test_ops;
......
......@@ -775,6 +775,8 @@ static void uffd_sigbus_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
ready_for_fork = false;
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
......@@ -790,6 +792,9 @@ static void uffd_sigbus_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
while (!ready_for_fork)
; /* Wait for the poll_thread to start executing before forking */
pid = fork();
if (pid < 0)
err("fork");
......@@ -829,6 +834,8 @@ static void uffd_events_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
ready_for_fork = false;
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
true, wp, false))
......@@ -838,6 +845,9 @@ static void uffd_events_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
while (!ready_for_fork)
; /* Wait for the poll_thread to start executing before forking */
pid = fork();
if (pid < 0)
err("fork");
......@@ -1427,7 +1437,8 @@ uffd_test_case_t uffd_tests[] = {
.uffd_fn = uffd_sigbus_wp_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_SIGBUS |
UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP,
UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
{
.name = "events",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment