Commit 440b7895 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2022-10-20' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morron:
 "Seventeen hotfixes, mainly for MM.

  Five are cc:stable and the remainder address post-6.0 issues"

* tag 'mm-hotfixes-stable-2022-10-20' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  nouveau: fix migrate_to_ram() for faulting page
  mm/huge_memory: do not clobber swp_entry_t during THP split
  hugetlb: fix memory leak associated with vma_lock structure
  mm/page_alloc: reduce potential fragmentation in make_alloc_exact()
  mm: /proc/pid/smaps_rollup: fix maple tree search
  mm,hugetlb: take hugetlb_lock before decrementing h->resv_huge_pages
  mm/mmap: fix MAP_FIXED address return on VMA merge
  mm/mmap.c: __vma_adjust(): suppress uninitialized var warning
  mm/mmap: undo ->mmap() when mas_preallocate() fails
  init: Kconfig: fix spelling mistake "satify" -> "satisfy"
  ocfs2: clear dinode links count in case of error
  ocfs2: fix BUG when iput after ocfs2_mknod fails
  gcov: support GCC 12.1 and newer compilers
  zsmalloc: zs_destroy_pool: add size_class NULL check
  mm/mempolicy: fix mbind_range() arguments to vma_merge()
  mailmap: update email for Qais Yousef
  mailmap: update Dan Carpenter's email address
parents ce3d90a8 97061d44
...@@ -104,6 +104,7 @@ Christoph Hellwig <hch@lst.de> ...@@ -104,6 +104,7 @@ Christoph Hellwig <hch@lst.de>
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com> Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
Corey Minyard <minyard@acm.org> Corey Minyard <minyard@acm.org>
Damian Hobson-Garcia <dhobsong@igel.co.jp> Damian Hobson-Garcia <dhobsong@igel.co.jp>
Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com>
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com> Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net> Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch> Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
...@@ -353,7 +354,8 @@ Peter Oruba <peter@oruba.de> ...@@ -353,7 +354,8 @@ Peter Oruba <peter@oruba.de>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com> Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com> Praveen BP <praveenbp@ti.com>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com> Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com> Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com> Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com> Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl> Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
......
...@@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) ...@@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
.src = &src, .src = &src,
.dst = &dst, .dst = &dst,
.pgmap_owner = drm->dev, .pgmap_owner = drm->dev,
.fault_page = vmf->page,
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE, .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
}; };
......
...@@ -232,6 +232,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, ...@@ -232,6 +232,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
handle_t *handle = NULL; handle_t *handle = NULL;
struct ocfs2_super *osb; struct ocfs2_super *osb;
struct ocfs2_dinode *dirfe; struct ocfs2_dinode *dirfe;
struct ocfs2_dinode *fe = NULL;
struct buffer_head *new_fe_bh = NULL; struct buffer_head *new_fe_bh = NULL;
struct inode *inode = NULL; struct inode *inode = NULL;
struct ocfs2_alloc_context *inode_ac = NULL; struct ocfs2_alloc_context *inode_ac = NULL;
...@@ -382,6 +383,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, ...@@ -382,6 +383,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
goto leave; goto leave;
} }
fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
if (S_ISDIR(mode)) { if (S_ISDIR(mode)) {
status = ocfs2_fill_new_dir(osb, handle, dir, inode, status = ocfs2_fill_new_dir(osb, handle, dir, inode,
new_fe_bh, data_ac, meta_ac); new_fe_bh, data_ac, meta_ac);
...@@ -454,8 +456,11 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, ...@@ -454,8 +456,11 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
leave: leave:
if (status < 0 && did_quota_inode) if (status < 0 && did_quota_inode)
dquot_free_inode(inode); dquot_free_inode(inode);
if (handle) if (handle) {
if (status < 0 && fe)
ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle); ocfs2_commit_trans(osb, handle);
}
ocfs2_inode_unlock(dir, 1); ocfs2_inode_unlock(dir, 1);
if (did_block_signals) if (did_block_signals)
...@@ -632,18 +637,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, ...@@ -632,18 +637,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
return status; return status;
} }
status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
parent_fe_bh, handle, inode_ac, parent_fe_bh, handle, inode_ac,
fe_blkno, suballoc_loc, suballoc_bit); fe_blkno, suballoc_loc, suballoc_bit);
if (status < 0) {
u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit);
int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode,
inode_ac->ac_bh, suballoc_bit, bg_blkno, 1);
if (tmp)
mlog_errno(tmp);
}
return status;
} }
static int ocfs2_mkdir(struct user_namespace *mnt_userns, static int ocfs2_mkdir(struct user_namespace *mnt_userns,
...@@ -2028,8 +2024,11 @@ static int ocfs2_symlink(struct user_namespace *mnt_userns, ...@@ -2028,8 +2024,11 @@ static int ocfs2_symlink(struct user_namespace *mnt_userns,
ocfs2_clusters_to_bytes(osb->sb, 1)); ocfs2_clusters_to_bytes(osb->sb, 1));
if (status < 0 && did_quota_inode) if (status < 0 && did_quota_inode)
dquot_free_inode(inode); dquot_free_inode(inode);
if (handle) if (handle) {
if (status < 0 && fe)
ocfs2_set_links_count(fe, 0);
ocfs2_commit_trans(osb, handle); ocfs2_commit_trans(osb, handle);
}
ocfs2_inode_unlock(dir, 1); ocfs2_inode_unlock(dir, 1);
if (did_block_signals) if (did_block_signals)
......
...@@ -902,7 +902,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) ...@@ -902,7 +902,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
goto out_put_mm; goto out_put_mm;
hold_task_mempolicy(priv); hold_task_mempolicy(priv);
vma = mas_find(&mas, 0); vma = mas_find(&mas, ULONG_MAX);
if (unlikely(!vma)) if (unlikely(!vma))
goto empty_set; goto empty_set;
......
...@@ -66,7 +66,7 @@ config RUST_IS_AVAILABLE ...@@ -66,7 +66,7 @@ config RUST_IS_AVAILABLE
This shows whether a suitable Rust toolchain is available (found). This shows whether a suitable Rust toolchain is available (found).
Please see Documentation/rust/quick-start.rst for instructions on how Please see Documentation/rust/quick-start.rst for instructions on how
to satify the build requirements of Rust support. to satisfy the build requirements of Rust support.
In particular, the Makefile target 'rustavailable' is useful to check In particular, the Makefile target 'rustavailable' is useful to check
why the Rust toolchain is not being detected. why the Rust toolchain is not being detected.
......
...@@ -30,6 +30,13 @@ ...@@ -30,6 +30,13 @@
#define GCOV_TAG_FUNCTION_LENGTH 3 #define GCOV_TAG_FUNCTION_LENGTH 3
/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */
#if (__GNUC__ >= 12)
#define GCOV_UNIT_SIZE 4
#else
#define GCOV_UNIT_SIZE 1
#endif
static struct gcov_info *gcov_info_head; static struct gcov_info *gcov_info_head;
/** /**
...@@ -383,12 +390,18 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info) ...@@ -383,12 +390,18 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info)
pos += store_gcov_u32(buffer, pos, info->version); pos += store_gcov_u32(buffer, pos, info->version);
pos += store_gcov_u32(buffer, pos, info->stamp); pos += store_gcov_u32(buffer, pos, info->stamp);
#if (__GNUC__ >= 12)
/* Use zero as checksum of the compilation unit. */
pos += store_gcov_u32(buffer, pos, 0);
#endif
for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) { for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) {
fi_ptr = info->functions[fi_idx]; fi_ptr = info->functions[fi_idx];
/* Function record. */ /* Function record. */
pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH); pos += store_gcov_u32(buffer, pos,
GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE);
pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum); pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum);
pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
...@@ -402,7 +415,8 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info) ...@@ -402,7 +415,8 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info)
/* Counter record. */ /* Counter record. */
pos += store_gcov_u32(buffer, pos, pos += store_gcov_u32(buffer, pos,
GCOV_TAG_FOR_COUNTER(ct_idx)); GCOV_TAG_FOR_COUNTER(ct_idx));
pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2); pos += store_gcov_u32(buffer, pos,
ci_ptr->num * 2 * GCOV_UNIT_SIZE);
for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) { for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) {
pos += store_gcov_u64(buffer, pos, pos += store_gcov_u64(buffer, pos,
......
...@@ -2455,7 +2455,16 @@ static void __split_huge_page_tail(struct page *head, int tail, ...@@ -2455,7 +2455,16 @@ static void __split_huge_page_tail(struct page *head, int tail,
page_tail); page_tail);
page_tail->mapping = head->mapping; page_tail->mapping = head->mapping;
page_tail->index = head->index + tail; page_tail->index = head->index + tail;
page_tail->private = 0;
/*
* page->private should not be set in tail pages with the exception
* of swap cache pages that store the swp_entry_t in tail pages.
* Fix up and warn once if private is unexpectedly set.
*/
if (!folio_test_swapcache(page_folio(head))) {
VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, head);
page_tail->private = 0;
}
/* Page flags must be visible before we make the page non-compound. */ /* Page flags must be visible before we make the page non-compound. */
smp_wmb(); smp_wmb();
......
...@@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma) ...@@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
/* /*
* Clear vm_private_data * Clear vm_private_data
* - For shared mappings this is a per-vma semaphore that may be
* allocated in a subsequent call to hugetlb_vm_op_open.
* Before clearing, make sure pointer is not associated with vma
* as this will leak the structure. This is the case when called
* via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
* been called to allocate a new structure.
* - For MAP_PRIVATE mappings, this is the reserve map which does * - For MAP_PRIVATE mappings, this is the reserve map which does
* not apply to children. Faults generated by the children are * not apply to children. Faults generated by the children are
* not guaranteed to succeed, even if read-only. * not guaranteed to succeed, even if read-only.
* - For shared mappings this is a per-vma semaphore that may be
* allocated in a subsequent call to hugetlb_vm_op_open.
*/ */
vma->vm_private_data = (void *)0; if (vma->vm_flags & VM_MAYSHARE) {
if (!(vma->vm_flags & VM_MAYSHARE)) struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
return;
if (vma_lock && vma_lock->vma != vma)
vma->vm_private_data = NULL;
} else
vma->vm_private_data = NULL;
} }
/* /*
...@@ -2924,11 +2932,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -2924,11 +2932,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
page = alloc_buddy_huge_page_with_mpol(h, vma, addr); page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page) if (!page)
goto out_uncharge_cgroup; goto out_uncharge_cgroup;
spin_lock_irq(&hugetlb_lock);
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
SetHPageRestoreReserve(page); SetHPageRestoreReserve(page);
h->resv_huge_pages--; h->resv_huge_pages--;
} }
spin_lock_irq(&hugetlb_lock);
list_add(&page->lru, &h->hugepage_activelist); list_add(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page); set_page_refcounted(page);
/* Fall through */ /* Fall through */
...@@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) ...@@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
struct resv_map *resv = vma_resv_map(vma); struct resv_map *resv = vma_resv_map(vma);
/* /*
* HPAGE_RESV_OWNER indicates a private mapping.
* This new VMA should share its siblings reservation map if present. * This new VMA should share its siblings reservation map if present.
* The VMA will only ever have a valid reservation map pointer where * The VMA will only ever have a valid reservation map pointer where
* it is being copied for another still existing VMA. As that VMA * it is being copied for another still existing VMA. As that VMA
...@@ -4615,11 +4624,21 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) ...@@ -4615,11 +4624,21 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
/* /*
* vma_lock structure for sharable mappings is vma specific. * vma_lock structure for sharable mappings is vma specific.
* Clear old pointer (if copied via vm_area_dup) and create new. * Clear old pointer (if copied via vm_area_dup) and allocate
* new structure. Before clearing, make sure vma_lock is not
* for this vma.
*/ */
if (vma->vm_flags & VM_MAYSHARE) { if (vma->vm_flags & VM_MAYSHARE) {
vma->vm_private_data = NULL; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
hugetlb_vma_lock_alloc(vma);
if (vma_lock) {
if (vma_lock->vma != vma) {
vma->vm_private_data = NULL;
hugetlb_vma_lock_alloc(vma);
} else
pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
} else
hugetlb_vma_lock_alloc(vma);
} }
} }
......
...@@ -787,17 +787,22 @@ static int vma_replace_policy(struct vm_area_struct *vma, ...@@ -787,17 +787,22 @@ static int vma_replace_policy(struct vm_area_struct *vma,
static int mbind_range(struct mm_struct *mm, unsigned long start, static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long end, struct mempolicy *new_pol) unsigned long end, struct mempolicy *new_pol)
{ {
MA_STATE(mas, &mm->mm_mt, start - 1, start - 1); MA_STATE(mas, &mm->mm_mt, start, start);
struct vm_area_struct *prev; struct vm_area_struct *prev;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int err = 0; int err = 0;
pgoff_t pgoff; pgoff_t pgoff;
prev = mas_find_rev(&mas, 0); prev = mas_prev(&mas, 0);
if (prev && (start < prev->vm_end)) if (unlikely(!prev))
vma = prev; mas_set(&mas, start);
else
vma = mas_next(&mas, end - 1); vma = mas_find(&mas, end - 1);
if (WARN_ON(!vma))
return 0;
if (start > vma->vm_start)
prev = vma;
for (; vma; vma = mas_next(&mas, end - 1)) { for (; vma; vma = mas_next(&mas, end - 1)) {
unsigned long vmstart = max(start, vma->vm_start); unsigned long vmstart = max(start, vma->vm_start);
......
...@@ -618,7 +618,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, ...@@ -618,7 +618,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
struct vm_area_struct *expand) struct vm_area_struct *expand)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end); struct vm_area_struct *next_next = NULL; /* uninit var warning */
struct vm_area_struct *next = find_vma(mm, vma->vm_end);
struct vm_area_struct *orig_vma = vma; struct vm_area_struct *orig_vma = vma;
struct address_space *mapping = NULL; struct address_space *mapping = NULL;
struct rb_root_cached *root = NULL; struct rb_root_cached *root = NULL;
...@@ -2625,14 +2626,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -2625,14 +2626,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (error) if (error)
goto unmap_and_free_vma; goto unmap_and_free_vma;
/* Can addr have changed?? /*
* * Expansion is handled above, merging is handled below.
* Answer: Yes, several device drivers can do it in their * Drivers should not alter the address of the VMA.
* f_op->mmap method. -DaveM
*/ */
WARN_ON_ONCE(addr != vma->vm_start); if (WARN_ON((addr != vma->vm_start))) {
error = -EINVAL;
addr = vma->vm_start; goto close_and_free_vma;
}
mas_reset(&mas); mas_reset(&mas);
/* /*
...@@ -2654,7 +2655,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -2654,7 +2655,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vm_area_free(vma); vm_area_free(vma);
vma = merge; vma = merge;
/* Update vm_flags to pick up the change. */ /* Update vm_flags to pick up the change. */
addr = vma->vm_start;
vm_flags = vma->vm_flags; vm_flags = vma->vm_flags;
goto unmap_writable; goto unmap_writable;
} }
...@@ -2681,7 +2681,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -2681,7 +2681,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (mas_preallocate(&mas, vma, GFP_KERNEL)) { if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
error = -ENOMEM; error = -ENOMEM;
if (file) if (file)
goto unmap_and_free_vma; goto close_and_free_vma;
else else
goto free_vma; goto free_vma;
} }
......
...@@ -5784,14 +5784,18 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, ...@@ -5784,14 +5784,18 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
size_t size) size_t size)
{ {
if (addr) { if (addr) {
unsigned long alloc_end = addr + (PAGE_SIZE << order); unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
unsigned long used = addr + PAGE_ALIGN(size); struct page *page = virt_to_page((void *)addr);
struct page *last = page + nr;
split_page(virt_to_page((void *)addr), order);
while (used < alloc_end) { split_page_owner(page, 1 << order);
free_page(used); split_page_memcg(page, 1 << order);
used += PAGE_SIZE; while (page < --last)
} set_page_refcounted(last);
last = page + (1UL << order);
for (page += nr; page < last; page++)
__free_pages_ok(page, 0, FPI_TO_TAIL);
} }
return (void *)addr; return (void *)addr;
} }
......
...@@ -2311,6 +2311,9 @@ void zs_destroy_pool(struct zs_pool *pool) ...@@ -2311,6 +2311,9 @@ void zs_destroy_pool(struct zs_pool *pool)
int fg; int fg;
struct size_class *class = pool->size_class[i]; struct size_class *class = pool->size_class[i];
if (!class)
continue;
if (class->index != i) if (class->index != i)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment