Commit 12ee72fe authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2023-11-17-14-04' of...

Merge tag 'mm-hotfixes-stable-2023-11-17-14-04' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Thirteen hotfixes. Seven are cc:stable and the remainder pertain to
  post-6.6 issues or aren't considered suitable for backporting"

* tag 'mm-hotfixes-stable-2023-11-17-14-04' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: more ptep_get() conversion
  parisc: fix mmap_base calculation when stack grows upwards
  mm/damon/core.c: avoid unintentional filtering out of schemes
  mm: kmem: drop __GFP_NOFAIL when allocating objcg vectors
  mm/damon/sysfs-schemes: handle tried region directory allocation failure
  mm/damon/sysfs-schemes: handle tried regions sysfs directory allocation failure
  mm/damon/sysfs: check error from damon_sysfs_update_target()
  mm: fix for negative counter: nr_file_hugepages
  selftests/mm: add hugetlb_fault_after_madv to .gitignore
  selftests/mm: restore number of hugepages
  selftests: mm: fix some build warnings
  selftests: mm: skip whole test instead of failure
  mm/damon/sysfs: eliminate potential uninitialized variable warning
parents ffd75bc7 afccb080
...@@ -140,11 +140,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN ...@@ -140,11 +140,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
default 8 default 8
config ARCH_MMAP_RND_BITS_MAX config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT default 18 if 64BIT
default 17 default 13
config ARCH_MMAP_RND_COMPAT_BITS_MAX config ARCH_MMAP_RND_COMPAT_BITS_MAX
default 17 default 13
# unless you want to implement ACPI on PA-RISC ... ;-) # unless you want to implement ACPI on PA-RISC ... ;-)
config PM config PM
......
...@@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */ ...@@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */
#define ELF_HWCAP 0 #define ELF_HWCAP 0
/* Masks for stack and mmap randomization */ #define STACK_RND_MASK 0x7ff /* 8MB of VA */
#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
#define STACK_RND_MASK MMAP_RND_MASK
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
#define arch_randomize_brk arch_randomize_brk
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm; struct linux_binprm;
......
...@@ -47,6 +47,8 @@ ...@@ -47,6 +47,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct rlimit;
unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
unsigned long calc_max_stack_size(unsigned long stack_max); unsigned long calc_max_stack_size(unsigned long stack_max);
/* /*
......
...@@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max) ...@@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
* indicating that "current" should be used instead of a passed-in * indicating that "current" should be used instead of a passed-in
* value from the exec bprm as done with arch_pick_mmap_layout(). * value from the exec bprm as done with arch_pick_mmap_layout().
*/ */
static unsigned long mmap_upper_limit(struct rlimit *rlim_stack) unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
{ {
unsigned long stack_base; unsigned long stack_base;
......
...@@ -924,7 +924,7 @@ static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, ...@@ -924,7 +924,7 @@ static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
matched = true; matched = true;
break; break;
default: default:
break; return false;
} }
return matched == filter->matching; return matched == filter->matching;
......
...@@ -162,6 +162,9 @@ damon_sysfs_scheme_regions_alloc(void) ...@@ -162,6 +162,9 @@ damon_sysfs_scheme_regions_alloc(void)
struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions), struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions),
GFP_KERNEL); GFP_KERNEL);
if (!regions)
return NULL;
regions->kobj = (struct kobject){}; regions->kobj = (struct kobject){};
INIT_LIST_HEAD(&regions->regions_list); INIT_LIST_HEAD(&regions->regions_list);
regions->nr_regions = 0; regions->nr_regions = 0;
...@@ -1823,6 +1826,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, ...@@ -1823,6 +1826,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
return 0; return 0;
region = damon_sysfs_scheme_region_alloc(r); region = damon_sysfs_scheme_region_alloc(r);
if (!region)
return 0;
list_add_tail(&region->list, &sysfs_regions->regions_list); list_add_tail(&region->list, &sysfs_regions->regions_list);
sysfs_regions->nr_regions++; sysfs_regions->nr_regions++;
if (kobject_init_and_add(&region->kobj, if (kobject_init_and_add(&region->kobj,
......
...@@ -1172,7 +1172,7 @@ static int damon_sysfs_update_target(struct damon_target *target, ...@@ -1172,7 +1172,7 @@ static int damon_sysfs_update_target(struct damon_target *target,
struct damon_ctx *ctx, struct damon_ctx *ctx,
struct damon_sysfs_target *sys_target) struct damon_sysfs_target *sys_target)
{ {
int err; int err = 0;
if (damon_target_has_pid(ctx)) { if (damon_target_has_pid(ctx)) {
err = damon_sysfs_update_target_pid(target, sys_target->pid); err = damon_sysfs_update_target_pid(target, sys_target->pid);
...@@ -1203,8 +1203,10 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx, ...@@ -1203,8 +1203,10 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
damon_for_each_target_safe(t, next, ctx) { damon_for_each_target_safe(t, next, ctx) {
if (i < sysfs_targets->nr) { if (i < sysfs_targets->nr) {
damon_sysfs_update_target(t, ctx, err = damon_sysfs_update_target(t, ctx,
sysfs_targets->targets_arr[i]); sysfs_targets->targets_arr[i]);
if (err)
return err;
} else { } else {
if (damon_target_has_pid(ctx)) if (damon_target_has_pid(ctx))
put_pid(t->pid); put_pid(t->pid);
......
...@@ -3443,7 +3443,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, ...@@ -3443,7 +3443,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
* handled in the specific fault path, and it'll prohibit the * handled in the specific fault path, and it'll prohibit the
* fault-around logic. * fault-around logic.
*/ */
if (!pte_none(vmf->pte[count])) if (!pte_none(ptep_get(&vmf->pte[count])))
goto skip; goto skip;
count++; count++;
......
...@@ -2769,15 +2769,17 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2769,15 +2769,17 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
int nr = folio_nr_pages(folio); int nr = folio_nr_pages(folio);
xas_split(&xas, folio, folio_order(folio)); xas_split(&xas, folio, folio_order(folio));
if (folio_test_pmd_mappable(folio)) {
if (folio_test_swapbacked(folio)) { if (folio_test_swapbacked(folio)) {
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, __lruvec_stat_mod_folio(folio,
-nr); NR_SHMEM_THPS, -nr);
} else { } else {
__lruvec_stat_mod_folio(folio, NR_FILE_THPS, __lruvec_stat_mod_folio(folio,
-nr); NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping); filemap_nr_thps_dec(mapping);
} }
} }
}
__split_huge_page(page, list, end); __split_huge_page(page, list, end);
ret = 0; ret = 0;
......
...@@ -468,7 +468,7 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex ...@@ -468,7 +468,7 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex
page = pfn_swap_entry_to_page(entry); page = pfn_swap_entry_to_page(entry);
} }
/* return 1 if the page is an normal ksm page or KSM-placed zero page */ /* return 1 if the page is an normal ksm page or KSM-placed zero page */
ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte); ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent);
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
return ret; return ret;
} }
......
...@@ -2936,7 +2936,8 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) ...@@ -2936,7 +2936,8 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
* Moreover, it should not come from DMA buffer and is not readily * Moreover, it should not come from DMA buffer and is not readily
* reclaimable. So those GFP bits should be masked off. * reclaimable. So those GFP bits should be masked off.
*/ */
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
__GFP_ACCOUNT | __GFP_NOFAIL)
/* /*
* mod_objcg_mlstate() may be called with irq enabled, so * mod_objcg_mlstate() may be called with irq enabled, so
......
...@@ -312,7 +312,7 @@ static int mfill_atomic_pte_poison(pmd_t *dst_pmd, ...@@ -312,7 +312,7 @@ static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
ret = -EEXIST; ret = -EEXIST;
/* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */ /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
if (!pte_none(*dst_pte)) if (!pte_none(ptep_get(dst_pte)))
goto out_unlock; goto out_unlock;
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
......
...@@ -414,6 +414,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack) ...@@ -414,6 +414,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{ {
#ifdef CONFIG_STACK_GROWSUP
/*
* For an upwards growing stack the calculation is much simpler.
* Memory for the maximum stack size is reserved at the top of the
* task. mmap_base starts directly below the stack and grows
* downwards.
*/
return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
#else
unsigned long gap = rlim_stack->rlim_cur; unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_guard_gap; unsigned long pad = stack_guard_gap;
...@@ -431,6 +440,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) ...@@ -431,6 +440,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
gap = MAX_GAP; gap = MAX_GAP;
return PAGE_ALIGN(STACK_TOP - gap - rnd); return PAGE_ALIGN(STACK_TOP - gap - rnd);
#endif
} }
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
......
...@@ -45,3 +45,4 @@ mdwe_test ...@@ -45,3 +45,4 @@ mdwe_test
gup_longterm gup_longterm
mkdirty mkdirty
va_high_addr_switch va_high_addr_switch
hugetlb_fault_after_madv
...@@ -94,19 +94,19 @@ int init_uffd(void) ...@@ -94,19 +94,19 @@ int init_uffd(void)
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY); uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY);
if (uffd == -1) if (uffd == -1)
ksft_exit_fail_msg("uffd syscall failed\n"); return uffd;
uffdio_api.api = UFFD_API; uffdio_api.api = UFFD_API;
uffdio_api.features = UFFD_FEATURE_WP_UNPOPULATED | UFFD_FEATURE_WP_ASYNC | uffdio_api.features = UFFD_FEATURE_WP_UNPOPULATED | UFFD_FEATURE_WP_ASYNC |
UFFD_FEATURE_WP_HUGETLBFS_SHMEM; UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
if (ioctl(uffd, UFFDIO_API, &uffdio_api)) if (ioctl(uffd, UFFDIO_API, &uffdio_api))
ksft_exit_fail_msg("UFFDIO_API\n"); return -1;
if (!(uffdio_api.api & UFFDIO_REGISTER_MODE_WP) || if (!(uffdio_api.api & UFFDIO_REGISTER_MODE_WP) ||
!(uffdio_api.features & UFFD_FEATURE_WP_UNPOPULATED) || !(uffdio_api.features & UFFD_FEATURE_WP_UNPOPULATED) ||
!(uffdio_api.features & UFFD_FEATURE_WP_ASYNC) || !(uffdio_api.features & UFFD_FEATURE_WP_ASYNC) ||
!(uffdio_api.features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM)) !(uffdio_api.features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM))
ksft_exit_fail_msg("UFFDIO_API error %llu\n", uffdio_api.api); return -1;
return 0; return 0;
} }
...@@ -1151,7 +1151,7 @@ int sanity_tests(void) ...@@ -1151,7 +1151,7 @@ int sanity_tests(void)
/* 9. Memory mapped file */ /* 9. Memory mapped file */
fd = open(__FILE__, O_RDONLY); fd = open(__FILE__, O_RDONLY);
if (fd < 0) if (fd < 0)
ksft_exit_fail_msg("%s Memory mapped file\n"); ksft_exit_fail_msg("%s Memory mapped file\n", __func__);
ret = stat(__FILE__, &sbuf); ret = stat(__FILE__, &sbuf);
if (ret < 0) if (ret < 0)
...@@ -1159,7 +1159,7 @@ int sanity_tests(void) ...@@ -1159,7 +1159,7 @@ int sanity_tests(void)
fmem = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); fmem = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (fmem == MAP_FAILED) if (fmem == MAP_FAILED)
ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
tmp_buf = malloc(sbuf.st_size); tmp_buf = malloc(sbuf.st_size);
memcpy(tmp_buf, fmem, sbuf.st_size); memcpy(tmp_buf, fmem, sbuf.st_size);
...@@ -1189,7 +1189,7 @@ int sanity_tests(void) ...@@ -1189,7 +1189,7 @@ int sanity_tests(void)
fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (fmem == MAP_FAILED) if (fmem == MAP_FAILED)
ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
wp_init(fmem, buf_size); wp_init(fmem, buf_size);
wp_addr_range(fmem, buf_size); wp_addr_range(fmem, buf_size);
...@@ -1479,6 +1479,10 @@ int main(void) ...@@ -1479,6 +1479,10 @@ int main(void)
struct stat sbuf; struct stat sbuf;
ksft_print_header(); ksft_print_header();
if (init_uffd())
return ksft_exit_pass();
ksft_set_plan(115); ksft_set_plan(115);
page_size = getpagesize(); page_size = getpagesize();
...@@ -1488,9 +1492,6 @@ int main(void) ...@@ -1488,9 +1492,6 @@ int main(void)
if (pagemap_fd < 0) if (pagemap_fd < 0)
return -EINVAL; return -EINVAL;
if (init_uffd())
ksft_exit_fail_msg("uffd init failed\n");
/* 1. Sanity testing */ /* 1. Sanity testing */
sanity_tests_sd(); sanity_tests_sd();
...@@ -1595,7 +1596,7 @@ int main(void) ...@@ -1595,7 +1596,7 @@ int main(void)
fmem = mmap(NULL, sbuf.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); fmem = mmap(NULL, sbuf.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (fmem == MAP_FAILED) if (fmem == MAP_FAILED)
ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
wp_init(fmem, sbuf.st_size); wp_init(fmem, sbuf.st_size);
wp_addr_range(fmem, sbuf.st_size); wp_addr_range(fmem, sbuf.st_size);
...@@ -1623,7 +1624,7 @@ int main(void) ...@@ -1623,7 +1624,7 @@ int main(void)
fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (fmem == MAP_FAILED) if (fmem == MAP_FAILED)
ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
wp_init(fmem, buf_size); wp_init(fmem, buf_size);
wp_addr_range(fmem, buf_size); wp_addr_range(fmem, buf_size);
......
...@@ -223,9 +223,12 @@ CATEGORY="hugetlb" run_test ./hugepage-mremap ...@@ -223,9 +223,12 @@ CATEGORY="hugetlb" run_test ./hugepage-mremap
CATEGORY="hugetlb" run_test ./hugepage-vmemmap CATEGORY="hugetlb" run_test ./hugepage-vmemmap
CATEGORY="hugetlb" run_test ./hugetlb-madvise CATEGORY="hugetlb" run_test ./hugetlb-madvise
nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
# For this test, we need one and just one huge page # For this test, we need one and just one huge page
echo 1 > /proc/sys/vm/nr_hugepages echo 1 > /proc/sys/vm/nr_hugepages
CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
# Restore the previous number of huge pages, since further tests rely on it
echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
if test_selected "hugetlb"; then if test_selected "hugetlb"; then
echo "NOTE: These hugetlb tests provide minimal coverage. Use" echo "NOTE: These hugetlb tests provide minimal coverage. Use"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment