Commit 8e819a76 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2023-12-07-18-47' of...

Merge tag 'mm-hotfixes-stable-2023-12-07-18-47' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "31 hotfixes. Ten of these address pre-6.6 issues and are marked
  cc:stable. The remainder address post-6.6 issues or aren't considered
  serious enough to justify backporting"

* tag 'mm-hotfixes-stable-2023-12-07-18-47' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (31 commits)
  mm/madvise: add cond_resched() in madvise_cold_or_pageout_pte_range()
  nilfs2: prevent WARNING in nilfs_sufile_set_segment_usage()
  mm/hugetlb: have CONFIG_HUGETLB_PAGE select CONFIG_XARRAY_MULTI
  scripts/gdb: fix lx-device-list-bus and lx-device-list-class
  MAINTAINERS: drop Antti Palosaari
  highmem: fix a memory copy problem in memcpy_from_folio
  nilfs2: fix missing error check for sb_set_blocksize call
  kernel/Kconfig.kexec: drop select of KEXEC for CRASH_DUMP
  units: add missing header
  drivers/base/cpu: crash data showing should depends on KEXEC_CORE
  mm/damon/sysfs-schemes: add timeout for update_schemes_tried_regions
  scripts/gdb/tasks: fix lx-ps command error
  mm/Kconfig: make userfaultfd a menuconfig
  selftests/mm: prevent duplicate runs caused by TEST_GEN_PROGS
  mm/damon/core: copy nr_accesses when splitting region
  lib/group_cpus.c: avoid acquiring cpu hotplug lock in group_cpus_evenly
  checkstack: fix printed address
  mm/memory_hotplug: fix error handling in add_memory_resource()
  mm/memory_hotplug: add missing mem_hotplug_lock
  .mailmap: add a new address mapping for Chester Lin
  ...
parents 5e3f5b81 b2f557a2
...@@ -117,6 +117,7 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com> ...@@ -117,6 +117,7 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com> Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
Chao Yu <chao@kernel.org> <yuchao0@huawei.com> Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
Chester Lin <chester62515@gmail.com> <clin@suse.com>
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com> Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org> Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
Chris Lew <quic_clew@quicinc.com> <clew@codeaurora.org> Chris Lew <quic_clew@quicinc.com> <clew@codeaurora.org>
......
...@@ -2944,6 +2944,14 @@ D: IPX development and support ...@@ -2944,6 +2944,14 @@ D: IPX development and support
N: Venkatesh Pallipadi (Venki) N: Venkatesh Pallipadi (Venki)
D: x86/HPET D: x86/HPET
N: Antti Palosaari
E: crope@iki.fi
D: Various DVB drivers
W: https://palosaari.fi/linux/
S: Yliopistokatu 1 D 513
S: FI-90570 Oulu
S: FINLAND
N: Kyungmin Park N: Kyungmin Park
E: kyungmin.park@samsung.com E: kyungmin.park@samsung.com
D: Samsung S5Pv210 and Exynos4210 mobile platforms D: Samsung S5Pv210 and Exynos4210 mobile platforms
......
This diff is collapsed.
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#ifndef _ARM_KEXEC_H #ifndef _ARM_KEXEC_H
#define _ARM_KEXEC_H #define _ARM_KEXEC_H
#ifdef CONFIG_KEXEC
/* Maximum physical address we can use pages from */ /* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */ /* Maximum address we can reach in physical address mode */
...@@ -82,6 +80,4 @@ static inline struct page *boot_pfn_to_page(unsigned long boot_pfn) ...@@ -82,6 +80,4 @@ static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* CONFIG_KEXEC */
#endif /* _ARM_KEXEC_H */ #endif /* _ARM_KEXEC_H */
...@@ -59,7 +59,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o ...@@ -59,7 +59,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
# Main staffs in KPROBES are in arch/arm/probes/ . # Main staffs in KPROBES are in arch/arm/probes/ .
obj-$(CONFIG_KPROBES) += patch.o insn.o obj-$(CONFIG_KPROBES) += patch.o insn.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
......
...@@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); ...@@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h> #include <linux/kexec.h>
static ssize_t crash_notes_show(struct device *dev, static ssize_t crash_notes_show(struct device *dev,
...@@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = { ...@@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
#endif #endif
static const struct attribute_group *common_cpu_attr_groups[] = { static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC_CORE
&crash_note_cpu_attr_group, &crash_note_cpu_attr_group,
#endif #endif
NULL NULL
}; };
static const struct attribute_group *hotplugable_cpu_attr_groups[] = { static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC_CORE
&crash_note_cpu_attr_group, &crash_note_cpu_attr_group,
#endif #endif
NULL NULL
......
...@@ -180,6 +180,9 @@ static inline unsigned long memblk_nr_poison(struct memory_block *mem) ...@@ -180,6 +180,9 @@ static inline unsigned long memblk_nr_poison(struct memory_block *mem)
} }
#endif #endif
/*
* Must acquire mem_hotplug_lock in write mode.
*/
static int memory_block_online(struct memory_block *mem) static int memory_block_online(struct memory_block *mem)
{ {
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
...@@ -204,10 +207,11 @@ static int memory_block_online(struct memory_block *mem) ...@@ -204,10 +207,11 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap) if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free; nr_vmemmap_pages = mem->altmap->free;
mem_hotplug_begin();
if (nr_vmemmap_pages) { if (nr_vmemmap_pages) {
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret) if (ret)
return ret; goto out;
} }
ret = online_pages(start_pfn + nr_vmemmap_pages, ret = online_pages(start_pfn + nr_vmemmap_pages,
...@@ -215,7 +219,7 @@ static int memory_block_online(struct memory_block *mem) ...@@ -215,7 +219,7 @@ static int memory_block_online(struct memory_block *mem)
if (ret) { if (ret) {
if (nr_vmemmap_pages) if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
return ret; goto out;
} }
/* /*
...@@ -227,9 +231,14 @@ static int memory_block_online(struct memory_block *mem) ...@@ -227,9 +231,14 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages); nr_vmemmap_pages);
mem->zone = zone; mem->zone = zone;
out:
mem_hotplug_done();
return ret; return ret;
} }
/*
* Must acquire mem_hotplug_lock in write mode.
*/
static int memory_block_offline(struct memory_block *mem) static int memory_block_offline(struct memory_block *mem)
{ {
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
...@@ -247,6 +256,7 @@ static int memory_block_offline(struct memory_block *mem) ...@@ -247,6 +256,7 @@ static int memory_block_offline(struct memory_block *mem)
if (mem->altmap) if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free; nr_vmemmap_pages = mem->altmap->free;
mem_hotplug_begin();
if (nr_vmemmap_pages) if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group, adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
-nr_vmemmap_pages); -nr_vmemmap_pages);
...@@ -258,13 +268,15 @@ static int memory_block_offline(struct memory_block *mem) ...@@ -258,13 +268,15 @@ static int memory_block_offline(struct memory_block *mem)
if (nr_vmemmap_pages) if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), adjust_present_page_count(pfn_to_page(start_pfn),
mem->group, nr_vmemmap_pages); mem->group, nr_vmemmap_pages);
return ret; goto out;
} }
if (nr_vmemmap_pages) if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL; mem->zone = NULL;
out:
mem_hotplug_done();
return ret; return ret;
} }
......
...@@ -268,6 +268,7 @@ config HUGETLBFS ...@@ -268,6 +268,7 @@ config HUGETLBFS
config HUGETLB_PAGE config HUGETLB_PAGE
def_bool HUGETLBFS def_bool HUGETLBFS
select XARRAY_MULTI
config HUGETLB_PAGE_OPTIMIZE_VMEMMAP config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE def_bool HUGETLB_PAGE
......
...@@ -501,15 +501,38 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) ...@@ -501,15 +501,38 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
down_write(&NILFS_MDT(sufile)->mi_sem); down_write(&NILFS_MDT(sufile)->mi_sem);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
if (!ret) { if (ret)
mark_buffer_dirty(bh); goto out_sem;
nilfs_mdt_mark_dirty(sufile);
kaddr = kmap_atomic(bh->b_page); kaddr = kmap_atomic(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
if (unlikely(nilfs_segment_usage_error(su))) {
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
kunmap_atomic(kaddr);
brelse(bh);
if (nilfs_segment_is_active(nilfs, segnum)) {
nilfs_error(sufile->i_sb,
"active segment %llu is erroneous",
(unsigned long long)segnum);
} else {
/*
* Segments marked erroneous are never allocated by
* nilfs_sufile_alloc(); only active segments, ie,
* the segments indexed by ns_segnum or ns_nextnum,
* can be erroneous here.
*/
WARN_ON_ONCE(1);
}
ret = -EIO;
} else {
nilfs_segment_usage_set_dirty(su); nilfs_segment_usage_set_dirty(su);
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile);
brelse(bh); brelse(bh);
} }
out_sem:
up_write(&NILFS_MDT(sufile)->mi_sem); up_write(&NILFS_MDT(sufile)->mi_sem);
return ret; return ret;
} }
...@@ -536,9 +559,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, ...@@ -536,9 +559,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(bh->b_page); kaddr = kmap_atomic(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
WARN_ON(nilfs_segment_usage_error(su)); if (modtime) {
if (modtime) /*
* Check segusage error and set su_lastmod only when updating
* this entry with a valid timestamp, not for cancellation.
*/
WARN_ON_ONCE(nilfs_segment_usage_error(su));
su->su_lastmod = cpu_to_le64(modtime); su->su_lastmod = cpu_to_le64(modtime);
}
su->su_nblocks = cpu_to_le32(nblocks); su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
......
...@@ -716,7 +716,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) ...@@ -716,7 +716,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
goto failed_sbh; goto failed_sbh;
} }
nilfs_release_super_block(nilfs); nilfs_release_super_block(nilfs);
sb_set_blocksize(sb, blocksize); if (!sb_set_blocksize(sb, blocksize)) {
nilfs_err(sb, "bad blocksize %d", blocksize);
err = -EINVAL;
goto out;
}
err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
if (err) if (err)
......
...@@ -1982,15 +1982,31 @@ static int pagemap_scan_test_walk(unsigned long start, unsigned long end, ...@@ -1982,15 +1982,31 @@ static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
struct pagemap_scan_private *p = walk->private; struct pagemap_scan_private *p = walk->private;
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
unsigned long vma_category = 0; unsigned long vma_category = 0;
bool wp_allowed = userfaultfd_wp_async(vma) &&
userfaultfd_wp_use_markers(vma);
if (userfaultfd_wp_async(vma) && userfaultfd_wp_use_markers(vma)) if (!wp_allowed) {
vma_category |= PAGE_IS_WPALLOWED; /* User requested explicit failure over wp-async capability */
else if (p->arg.flags & PM_SCAN_CHECK_WPASYNC) if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
return -EPERM; return -EPERM;
/*
* User requires wr-protect, and allows silently skipping
* unsupported vmas.
*/
if (p->arg.flags & PM_SCAN_WP_MATCHING)
return 1;
/*
* Then the request doesn't involve wr-protects at all,
* fall through to the rest checks, and allow vma walk.
*/
}
if (vma->vm_flags & VM_PFNMAP) if (vma->vm_flags & VM_PFNMAP)
return 1; return 1;
if (wp_allowed)
vma_category |= PAGE_IS_WPALLOWED;
if (!pagemap_scan_is_interesting_vma(vma_category, p)) if (!pagemap_scan_is_interesting_vma(vma_category, p))
return 1; return 1;
...@@ -2140,7 +2156,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, ...@@ -2140,7 +2156,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
return 0; return 0;
} }
if (!p->vec_out) { if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
/* Fast path for performing exclusive WP */ /* Fast path for performing exclusive WP */
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
if (pte_uffd_wp(ptep_get(pte))) if (pte_uffd_wp(ptep_get(pte)))
......
...@@ -321,7 +321,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, ...@@ -321,7 +321,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2, TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
compressed ? "" : "un", length); compressed ? "" : "un", length);
} }
if (length < 0 || length > output->length || if (length <= 0 || length > output->length ||
(index + length) > msblk->bytes_used) { (index + length) > msblk->bytes_used) {
res = -EIO; res = -EIO;
goto out; goto out;
......
...@@ -454,7 +454,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio, ...@@ -454,7 +454,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
memcpy(to, from, chunk); memcpy(to, from, chunk);
kunmap_local(from); kunmap_local(from);
from += chunk; to += chunk;
offset += chunk; offset += chunk;
len -= chunk; len -= chunk;
} while (len > 0); } while (len > 0);
......
...@@ -1268,10 +1268,7 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma) ...@@ -1268,10 +1268,7 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
} }
static inline bool __vma_private_lock(struct vm_area_struct *vma) bool __vma_private_lock(struct vm_area_struct *vma);
{
return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
}
/* /*
* Safe version of huge_pte_offset() to check the locks. See comments * Safe version of huge_pte_offset() to check the locks. See comments
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef _LINUX_UNITS_H #ifndef _LINUX_UNITS_H
#define _LINUX_UNITS_H #define _LINUX_UNITS_H
#include <linux/bits.h>
#include <linux/math.h> #include <linux/math.h>
/* Metric prefixes in accordance with Système international (d'unités) */ /* Metric prefixes in accordance with Système international (d'unités) */
......
...@@ -97,7 +97,6 @@ config CRASH_DUMP ...@@ -97,7 +97,6 @@ config CRASH_DUMP
depends on ARCH_SUPPORTS_KEXEC depends on ARCH_SUPPORTS_KEXEC
select CRASH_CORE select CRASH_CORE
select KEXEC_CORE select KEXEC_CORE
select KEXEC
help help
Generate crash dump after being started by kexec. Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels This should be normally only set in special crash dump kernels
......
...@@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) ...@@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
if (!masks) if (!masks)
goto fail_node_to_cpumask; goto fail_node_to_cpumask;
/* Stabilize the cpumasks */
cpus_read_lock();
build_node_to_cpumask(node_to_cpumask); build_node_to_cpumask(node_to_cpumask);
/*
* Make a local cache of 'cpu_present_mask', so the two stages
* spread can observe consistent 'cpu_present_mask' without holding
* cpu hotplug lock, then we can reduce deadlock risk with cpu
* hotplug code.
*
* Here CPU hotplug may happen when reading `cpu_present_mask`, and
* we can live with the case because it only affects that hotplug
* CPU is handled in the 1st or 2nd stage, and either way is correct
* from API user viewpoint since 2-stage spread is sort of
* optimization.
*/
cpumask_copy(npresmsk, data_race(cpu_present_mask));
/* grouping present CPUs first */ /* grouping present CPUs first */
ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
cpu_present_mask, nmsk, masks); npresmsk, nmsk, masks);
if (ret < 0) if (ret < 0)
goto fail_build_affinity; goto fail_build_affinity;
nr_present = ret; nr_present = ret;
...@@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) ...@@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
curgrp = 0; curgrp = 0;
else else
curgrp = nr_present; curgrp = nr_present;
cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
npresmsk, nmsk, masks); npresmsk, nmsk, masks);
if (ret >= 0) if (ret >= 0)
nr_others = ret; nr_others = ret;
fail_build_affinity: fail_build_affinity:
cpus_read_unlock();
if (ret >= 0) if (ret >= 0)
WARN_ON(nr_present + nr_others < numgrps); WARN_ON(nr_present + nr_others < numgrps);
......
...@@ -1201,13 +1201,6 @@ config ANON_VMA_NAME ...@@ -1201,13 +1201,6 @@ config ANON_VMA_NAME
area from being merged with adjacent virtual memory areas due to the area from being merged with adjacent virtual memory areas due to the
difference in their name. difference in their name.
config USERFAULTFD
bool "Enable userfaultfd() system call"
depends on MMU
help
Enable the userfaultfd() system call that allows to intercept and
handle page faults in userland.
config HAVE_ARCH_USERFAULTFD_WP config HAVE_ARCH_USERFAULTFD_WP
bool bool
help help
...@@ -1218,6 +1211,14 @@ config HAVE_ARCH_USERFAULTFD_MINOR ...@@ -1218,6 +1211,14 @@ config HAVE_ARCH_USERFAULTFD_MINOR
help help
Arch has userfaultfd minor fault support Arch has userfaultfd minor fault support
menuconfig USERFAULTFD
bool "Enable userfaultfd() system call"
depends on MMU
help
Enable the userfaultfd() system call that allows to intercept and
handle page faults in userland.
if USERFAULTFD
config PTE_MARKER_UFFD_WP config PTE_MARKER_UFFD_WP
bool "Userfaultfd write protection support for shmem/hugetlbfs" bool "Userfaultfd write protection support for shmem/hugetlbfs"
default y default y
...@@ -1227,6 +1228,7 @@ config PTE_MARKER_UFFD_WP ...@@ -1227,6 +1228,7 @@ config PTE_MARKER_UFFD_WP
Allows to create marker PTEs for userfaultfd write protection Allows to create marker PTEs for userfaultfd write protection
purposes. It is required to enable userfaultfd write protection on purposes. It is required to enable userfaultfd write protection on
file-backed memory types like shmem and hugetlbfs. file-backed memory types like shmem and hugetlbfs.
endif # USERFAULTFD
# multi-gen LRU { # multi-gen LRU {
config LRU_GEN config LRU_GEN
......
...@@ -1225,6 +1225,7 @@ static void damon_split_region_at(struct damon_target *t, ...@@ -1225,6 +1225,7 @@ static void damon_split_region_at(struct damon_target *t,
new->age = r->age; new->age = r->age;
new->last_nr_accesses = r->last_nr_accesses; new->last_nr_accesses = r->last_nr_accesses;
new->nr_accesses_bp = r->nr_accesses_bp; new->nr_accesses_bp = r->nr_accesses_bp;
new->nr_accesses = r->nr_accesses;
damon_insert_region(new, r, damon_next_region(r), t); damon_insert_region(new, r, damon_next_region(r), t);
} }
......
...@@ -139,6 +139,13 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = { ...@@ -139,6 +139,13 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = {
* damon_sysfs_before_damos_apply() understands the situation by showing the * damon_sysfs_before_damos_apply() understands the situation by showing the
* 'finished' status and do nothing. * 'finished' status and do nothing.
* *
* If DAMOS is not applied to any region due to any reasons including the
* access pattern, the watermarks, the quotas, and the filters,
* ->before_damos_apply() will not be called back. Until the situation is
* changed, the update will not be finished. To avoid this,
* damon_sysfs_after_sampling() set the status as 'finished' if more than two
* apply intervals of the scheme is passed while the state is 'idle'.
*
* Finally, the tried regions request handling finisher function * Finally, the tried regions request handling finisher function
* (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks. * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks.
*/ */
...@@ -154,6 +161,7 @@ struct damon_sysfs_scheme_regions { ...@@ -154,6 +161,7 @@ struct damon_sysfs_scheme_regions {
int nr_regions; int nr_regions;
unsigned long total_bytes; unsigned long total_bytes;
enum damos_sysfs_regions_upd_status upd_status; enum damos_sysfs_regions_upd_status upd_status;
unsigned long upd_timeout_jiffies;
}; };
static struct damon_sysfs_scheme_regions * static struct damon_sysfs_scheme_regions *
...@@ -1854,7 +1862,9 @@ static int damon_sysfs_after_sampling(struct damon_ctx *ctx) ...@@ -1854,7 +1862,9 @@ static int damon_sysfs_after_sampling(struct damon_ctx *ctx)
for (i = 0; i < sysfs_schemes->nr; i++) { for (i = 0; i < sysfs_schemes->nr; i++) {
sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
if (sysfs_regions->upd_status == if (sysfs_regions->upd_status ==
DAMOS_TRIED_REGIONS_UPD_STARTED) DAMOS_TRIED_REGIONS_UPD_STARTED ||
time_after(jiffies,
sysfs_regions->upd_timeout_jiffies))
sysfs_regions->upd_status = sysfs_regions->upd_status =
DAMOS_TRIED_REGIONS_UPD_FINISHED; DAMOS_TRIED_REGIONS_UPD_FINISHED;
} }
...@@ -1885,14 +1895,41 @@ int damon_sysfs_schemes_clear_regions( ...@@ -1885,14 +1895,41 @@ int damon_sysfs_schemes_clear_regions(
return 0; return 0;
} }
static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx)
{
struct damos *scheme;
int i = 0;
damon_for_each_scheme(scheme, ctx) {
if (i == n)
return scheme;
i++;
}
return NULL;
}
static void damos_tried_regions_init_upd_status( static void damos_tried_regions_init_upd_status(
struct damon_sysfs_schemes *sysfs_schemes) struct damon_sysfs_schemes *sysfs_schemes,
struct damon_ctx *ctx)
{ {
int i; int i;
struct damos *scheme;
struct damon_sysfs_scheme_regions *sysfs_regions;
for (i = 0; i < sysfs_schemes->nr; i++) for (i = 0; i < sysfs_schemes->nr; i++) {
sysfs_schemes->schemes_arr[i]->tried_regions->upd_status = sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
DAMOS_TRIED_REGIONS_UPD_IDLE; scheme = damos_sysfs_nth_scheme(i, ctx);
if (!scheme) {
sysfs_regions->upd_status =
DAMOS_TRIED_REGIONS_UPD_FINISHED;
continue;
}
sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE;
sysfs_regions->upd_timeout_jiffies = jiffies +
2 * usecs_to_jiffies(scheme->apply_interval_us ?
scheme->apply_interval_us :
ctx->attrs.sample_interval);
}
} }
/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
...@@ -1902,7 +1939,7 @@ int damon_sysfs_schemes_update_regions_start( ...@@ -1902,7 +1939,7 @@ int damon_sysfs_schemes_update_regions_start(
{ {
damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx); damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx);
damon_sysfs_schemes_for_damos_callback = sysfs_schemes; damon_sysfs_schemes_for_damos_callback = sysfs_schemes;
damos_tried_regions_init_upd_status(sysfs_schemes); damos_tried_regions_init_upd_status(sysfs_schemes, ctx);
damos_regions_upd_total_bytes_only = total_bytes_only; damos_regions_upd_total_bytes_only = total_bytes_only;
ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply; ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply;
ctx->callback.after_sampling = damon_sysfs_after_sampling; ctx->callback.after_sampling = damon_sysfs_after_sampling;
......
...@@ -3371,7 +3371,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, ...@@ -3371,7 +3371,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
} }
} }
if (pmd_none(*vmf->pmd)) if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
return false; return false;
......
...@@ -1182,6 +1182,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) ...@@ -1182,6 +1182,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
return (get_vma_private_data(vma) & flag) != 0; return (get_vma_private_data(vma) & flag) != 0;
} }
bool __vma_private_lock(struct vm_area_struct *vma)
{
return !(vma->vm_flags & VM_MAYSHARE) &&
get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
is_vma_resv_set(vma, HPAGE_RESV_OWNER);
}
void hugetlb_dup_vma_private(struct vm_area_struct *vma) void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{ {
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
......
...@@ -642,32 +642,16 @@ static struct kmemleak_object *__alloc_object(gfp_t gfp) ...@@ -642,32 +642,16 @@ static struct kmemleak_object *__alloc_object(gfp_t gfp)
if (!object) { if (!object) {
pr_warn("Cannot allocate a kmemleak_object structure\n"); pr_warn("Cannot allocate a kmemleak_object structure\n");
kmemleak_disable(); kmemleak_disable();
return NULL;
} }
return object;
}
static int __link_object(struct kmemleak_object *object, unsigned long ptr,
size_t size, int min_count, bool is_phys)
{
struct kmemleak_object *parent;
struct rb_node **link, *rb_parent;
unsigned long untagged_ptr;
unsigned long untagged_objp;
INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->object_list);
INIT_LIST_HEAD(&object->gray_list); INIT_LIST_HEAD(&object->gray_list);
INIT_HLIST_HEAD(&object->area_list); INIT_HLIST_HEAD(&object->area_list);
raw_spin_lock_init(&object->lock); raw_spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1); atomic_set(&object->use_count, 1);
object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
object->pointer = ptr;
object->size = kfence_ksize((void *)ptr) ?: size;
object->excess_ref = 0; object->excess_ref = 0;
object->min_count = min_count;
object->count = 0; /* white color initially */ object->count = 0; /* white color initially */
object->jiffies = jiffies;
object->checksum = 0; object->checksum = 0;
object->del_state = 0; object->del_state = 0;
...@@ -692,6 +676,24 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr, ...@@ -692,6 +676,24 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
/* kernel backtrace */ /* kernel backtrace */
object->trace_handle = set_track_prepare(); object->trace_handle = set_track_prepare();
return object;
}
static int __link_object(struct kmemleak_object *object, unsigned long ptr,
size_t size, int min_count, bool is_phys)
{
struct kmemleak_object *parent;
struct rb_node **link, *rb_parent;
unsigned long untagged_ptr;
unsigned long untagged_objp;
object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
object->pointer = ptr;
object->size = kfence_ksize((void *)ptr) ?: size;
object->min_count = min_count;
object->jiffies = jiffies;
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
/* /*
* Only update min_addr and max_addr with object * Only update min_addr and max_addr with object
...@@ -1150,6 +1152,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_percpu); ...@@ -1150,6 +1152,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
void __ref kmemleak_update_trace(const void *ptr) void __ref kmemleak_update_trace(const void *ptr)
{ {
struct kmemleak_object *object; struct kmemleak_object *object;
depot_stack_handle_t trace_handle;
unsigned long flags; unsigned long flags;
pr_debug("%s(0x%px)\n", __func__, ptr); pr_debug("%s(0x%px)\n", __func__, ptr);
...@@ -1166,8 +1169,9 @@ void __ref kmemleak_update_trace(const void *ptr) ...@@ -1166,8 +1169,9 @@ void __ref kmemleak_update_trace(const void *ptr)
return; return;
} }
trace_handle = set_track_prepare();
raw_spin_lock_irqsave(&object->lock, flags); raw_spin_lock_irqsave(&object->lock, flags);
object->trace_handle = set_track_prepare(); object->trace_handle = trace_handle;
raw_spin_unlock_irqrestore(&object->lock, flags); raw_spin_unlock_irqrestore(&object->lock, flags);
put_object(object); put_object(object);
......
...@@ -335,6 +335,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -335,6 +335,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
struct folio *folio = NULL; struct folio *folio = NULL;
LIST_HEAD(folio_list); LIST_HEAD(folio_list);
bool pageout_anon_only_filter; bool pageout_anon_only_filter;
unsigned int batch_count = 0;
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -EINTR; return -EINTR;
...@@ -416,6 +417,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -416,6 +417,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
regular_folio: regular_folio:
#endif #endif
tlb_change_page_size(tlb, PAGE_SIZE); tlb_change_page_size(tlb, PAGE_SIZE);
restart:
start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (!start_pte) if (!start_pte)
return 0; return 0;
...@@ -424,6 +426,15 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, ...@@ -424,6 +426,15 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
for (; addr < end; pte++, addr += PAGE_SIZE) { for (; addr < end; pte++, addr += PAGE_SIZE) {
ptent = ptep_get(pte); ptent = ptep_get(pte);
if (++batch_count == SWAP_CLUSTER_MAX) {
batch_count = 0;
if (need_resched()) {
pte_unmap_unlock(start_pte, ptl);
cond_resched();
goto restart;
}
}
if (pte_none(ptent)) if (pte_none(ptent))
continue; continue;
......
...@@ -3166,6 +3166,7 @@ __always_inline struct obj_cgroup *current_obj_cgroup(void) ...@@ -3166,6 +3166,7 @@ __always_inline struct obj_cgroup *current_obj_cgroup(void)
return NULL; return NULL;
from_memcg: from_memcg:
objcg = NULL;
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
/* /*
* Memcg pointer is protected by scope (see set_active_memcg()) * Memcg pointer is protected by scope (see set_active_memcg())
...@@ -3176,7 +3177,6 @@ __always_inline struct obj_cgroup *current_obj_cgroup(void) ...@@ -3176,7 +3177,6 @@ __always_inline struct obj_cgroup *current_obj_cgroup(void)
objcg = rcu_dereference_check(memcg->objcg, 1); objcg = rcu_dereference_check(memcg->objcg, 1);
if (likely(objcg)) if (likely(objcg))
break; break;
objcg = NULL;
} }
return objcg; return objcg;
......
...@@ -1517,6 +1517,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1517,6 +1517,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue; continue;
} else { } else {
/* We should have covered all the swap entry types */ /* We should have covered all the swap entry types */
pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
......
...@@ -1129,6 +1129,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) ...@@ -1129,6 +1129,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
} }
/*
* Must be called with mem_hotplug_lock in write mode.
*/
int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group) struct zone *zone, struct memory_group *group)
{ {
...@@ -1149,7 +1152,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, ...@@ -1149,7 +1152,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL; return -EINVAL;
mem_hotplug_begin();
/* associate pfn range with the zone */ /* associate pfn range with the zone */
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
...@@ -1208,7 +1210,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, ...@@ -1208,7 +1210,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
writeback_set_ratelimit(); writeback_set_ratelimit();
memory_notify(MEM_ONLINE, &arg); memory_notify(MEM_ONLINE, &arg);
mem_hotplug_done();
return 0; return 0;
failed_addition: failed_addition:
...@@ -1217,7 +1218,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, ...@@ -1217,7 +1218,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg); memory_notify(MEM_CANCEL_ONLINE, &arg);
remove_pfn_range_from_zone(zone, pfn, nr_pages); remove_pfn_range_from_zone(zone, pfn, nr_pages);
mem_hotplug_done();
return ret; return ret;
} }
...@@ -1458,7 +1458,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) ...@@ -1458,7 +1458,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
/* create memory block devices after memory was added */ /* create memory block devices after memory was added */
ret = create_memory_block_devices(start, size, params.altmap, group); ret = create_memory_block_devices(start, size, params.altmap, group);
if (ret) { if (ret) {
arch_remove_memory(start, size, NULL); arch_remove_memory(start, size, params.altmap);
goto error_free; goto error_free;
} }
...@@ -1863,6 +1863,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn, ...@@ -1863,6 +1863,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
return 0; return 0;
} }
/*
* Must be called with mem_hotplug_lock in write mode.
*/
int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group) struct zone *zone, struct memory_group *group)
{ {
...@@ -1885,8 +1888,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, ...@@ -1885,8 +1888,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL; return -EINVAL;
mem_hotplug_begin();
/* /*
* Don't allow to offline memory blocks that contain holes. * Don't allow to offline memory blocks that contain holes.
* Consequently, memory blocks with holes can never get onlined * Consequently, memory blocks with holes can never get onlined
...@@ -2031,7 +2032,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, ...@@ -2031,7 +2032,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
memory_notify(MEM_OFFLINE, &arg); memory_notify(MEM_OFFLINE, &arg);
remove_pfn_range_from_zone(zone, start_pfn, nr_pages); remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
mem_hotplug_done();
return 0; return 0;
failed_removal_isolated: failed_removal_isolated:
...@@ -2046,7 +2046,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, ...@@ -2046,7 +2046,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
(unsigned long long) start_pfn << PAGE_SHIFT, (unsigned long long) start_pfn << PAGE_SHIFT,
((unsigned long long) end_pfn << PAGE_SHIFT) - 1, ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
reason); reason);
mem_hotplug_done();
return ret; return ret;
} }
......
...@@ -138,15 +138,11 @@ $total_size = 0; ...@@ -138,15 +138,11 @@ $total_size = 0;
while (my $line = <STDIN>) { while (my $line = <STDIN>) {
if ($line =~ m/$funcre/) { if ($line =~ m/$funcre/) {
$func = $1; $func = $1;
next if $line !~ m/^($xs*)/; next if $line !~ m/^($x*)/;
if ($total_size > $min_stack) { if ($total_size > $min_stack) {
push @stack, "$intro$total_size\n"; push @stack, "$intro$total_size\n";
} }
$addr = "0x$1";
$addr = $1;
$addr =~ s/ /0/g;
$addr = "0x$addr";
$intro = "$addr $func [$file]:"; $intro = "$addr $func [$file]:";
my $padlen = 56 - length($intro); my $padlen = 56 - length($intro);
while ($padlen > 0) { while ($padlen > 0) {
......
...@@ -36,26 +36,26 @@ def for_each_bus(): ...@@ -36,26 +36,26 @@ def for_each_bus():
for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')): for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')):
subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj') subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys') subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
yield subsys_priv['bus'] yield subsys_priv
def for_each_class(): def for_each_class():
for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')): for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')):
subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj') subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys') subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
yield subsys_priv['class'] yield subsys_priv
def get_bus_by_name(name): def get_bus_by_name(name):
for item in for_each_bus(): for item in for_each_bus():
if item['name'].string() == name: if item['bus']['name'].string() == name:
return item return item
raise gdb.GdbError("Can't find bus type {!r}".format(name)) raise gdb.GdbError("Can't find bus type {!r}".format(name))
def get_class_by_name(name): def get_class_by_name(name):
for item in for_each_class(): for item in for_each_class():
if item['name'].string() == name: if item['class']['name'].string() == name:
return item return item
raise gdb.GdbError("Can't find device class {!r}".format(name)) raise gdb.GdbError("Can't find device class {!r}".format(name))
...@@ -70,13 +70,13 @@ def klist_for_each(klist): ...@@ -70,13 +70,13 @@ def klist_for_each(klist):
def bus_for_each_device(bus): def bus_for_each_device(bus):
for kn in klist_for_each(bus['p']['klist_devices']): for kn in klist_for_each(bus['klist_devices']):
dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus') dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus')
yield dp['device'] yield dp['device']
def class_for_each_device(cls): def class_for_each_device(cls):
for kn in klist_for_each(cls['p']['klist_devices']): for kn in klist_for_each(cls['klist_devices']):
dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class') dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class')
yield dp['device'] yield dp['device']
...@@ -103,7 +103,7 @@ class LxDeviceListBus(gdb.Command): ...@@ -103,7 +103,7 @@ class LxDeviceListBus(gdb.Command):
def invoke(self, arg, from_tty): def invoke(self, arg, from_tty):
if not arg: if not arg:
for bus in for_each_bus(): for bus in for_each_bus():
gdb.write('bus {}:\t{}\n'.format(bus['name'].string(), bus)) gdb.write('bus {}:\t{}\n'.format(bus['bus']['name'].string(), bus))
for dev in bus_for_each_device(bus): for dev in bus_for_each_device(bus):
_show_device(dev, level=1) _show_device(dev, level=1)
else: else:
...@@ -123,7 +123,7 @@ class LxDeviceListClass(gdb.Command): ...@@ -123,7 +123,7 @@ class LxDeviceListClass(gdb.Command):
def invoke(self, arg, from_tty): def invoke(self, arg, from_tty):
if not arg: if not arg:
for cls in for_each_class(): for cls in for_each_class():
gdb.write("class {}:\t{}\n".format(cls['name'].string(), cls)) gdb.write("class {}:\t{}\n".format(cls['class']['name'].string(), cls))
for dev in class_for_each_device(cls): for dev in class_for_each_device(cls):
_show_device(dev, level=1) _show_device(dev, level=1)
else: else:
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
import gdb import gdb
from linux import utils from linux import utils, lists
task_type = utils.CachedType("struct task_struct") task_type = utils.CachedType("struct task_struct")
...@@ -22,19 +22,15 @@ task_type = utils.CachedType("struct task_struct") ...@@ -22,19 +22,15 @@ task_type = utils.CachedType("struct task_struct")
def task_lists(): def task_lists():
task_ptr_type = task_type.get_type().pointer() task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address init_task = gdb.parse_and_eval("init_task").address
t = g = init_task t = init_task
while True: while True:
while True: thread_head = t['signal']['thread_head']
yield t for thread in lists.list_for_each_entry(thread_head, task_ptr_type, 'thread_node'):
yield thread
t = utils.container_of(t['thread_group']['next'], t = utils.container_of(t['tasks']['next'],
task_ptr_type, "thread_group") task_ptr_type, "tasks")
if t == g:
break
t = g = utils.container_of(g['tasks']['next'],
task_ptr_type, "tasks")
if t == init_task: if t == init_task:
return return
......
...@@ -60,7 +60,7 @@ TEST_GEN_FILES += mrelease_test ...@@ -60,7 +60,7 @@ TEST_GEN_FILES += mrelease_test
TEST_GEN_FILES += mremap_dontunmap TEST_GEN_FILES += mremap_dontunmap
TEST_GEN_FILES += mremap_test TEST_GEN_FILES += mremap_test
TEST_GEN_FILES += on-fault-limit TEST_GEN_FILES += on-fault-limit
TEST_GEN_PROGS += pagemap_ioctl TEST_GEN_FILES += pagemap_ioctl
TEST_GEN_FILES += thuge-gen TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += uffd-stress TEST_GEN_FILES += uffd-stress
...@@ -72,7 +72,7 @@ TEST_GEN_FILES += mdwe_test ...@@ -72,7 +72,7 @@ TEST_GEN_FILES += mdwe_test
TEST_GEN_FILES += hugetlb_fault_after_madv TEST_GEN_FILES += hugetlb_fault_after_madv
ifneq ($(ARCH),arm64) ifneq ($(ARCH),arm64)
TEST_GEN_PROGS += soft-dirty TEST_GEN_FILES += soft-dirty
endif endif
ifeq ($(ARCH),x86_64) ifeq ($(ARCH),x86_64)
......
...@@ -36,6 +36,7 @@ int pagemap_fd; ...@@ -36,6 +36,7 @@ int pagemap_fd;
int uffd; int uffd;
int page_size; int page_size;
int hpage_size; int hpage_size;
const char *progname;
#define LEN(region) ((region.end - region.start)/page_size) #define LEN(region) ((region.end - region.start)/page_size)
...@@ -1149,11 +1150,11 @@ int sanity_tests(void) ...@@ -1149,11 +1150,11 @@ int sanity_tests(void)
munmap(mem, mem_size); munmap(mem, mem_size);
/* 9. Memory mapped file */ /* 9. Memory mapped file */
fd = open(__FILE__, O_RDONLY); fd = open(progname, O_RDONLY);
if (fd < 0) if (fd < 0)
ksft_exit_fail_msg("%s Memory mapped file\n", __func__); ksft_exit_fail_msg("%s Memory mapped file\n", __func__);
ret = stat(__FILE__, &sbuf); ret = stat(progname, &sbuf);
if (ret < 0) if (ret < 0)
ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
...@@ -1472,12 +1473,14 @@ static void transact_test(int page_size) ...@@ -1472,12 +1473,14 @@ static void transact_test(int page_size)
extra_thread_faults); extra_thread_faults);
} }
int main(void) int main(int argc, char *argv[])
{ {
int mem_size, shmid, buf_size, fd, i, ret; int mem_size, shmid, buf_size, fd, i, ret;
char *mem, *map, *fmem; char *mem, *map, *fmem;
struct stat sbuf; struct stat sbuf;
progname = argv[0];
ksft_print_header(); ksft_print_header();
if (init_uffd()) if (init_uffd())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment