Commit 4cee37b3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2022-12-10-1' of...

Merge tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Nine hotfixes.

  Six for MM, three for other areas. Four of these patches address
  post-6.0 issues"

* tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  memcg: fix possible use-after-free in memcg_write_event_control()
  MAINTAINERS: update Muchun Song's email
  mm/gup: fix gup_pud_range() for dax
  mmap: fix do_brk_flags() modifying obviously incorrect VMAs
  mm/swap: fix SWP_PFN_BITS with CONFIG_PHYS_ADDR_T_64BIT on 32bit
  tmpfs: fix data loss from failed fallocate
  kselftests: cgroup: update kmem test precision tolerance
  mm: do not BUG_ON missing brk mapping, because userspace can unmap it
  mailmap: update Matti Vaittinen's email address
parents 296a7b7e 4a7ba45b
...@@ -287,6 +287,7 @@ Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com> ...@@ -287,6 +287,7 @@ Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org> Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu> Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
Matthieu CASTET <castet.matthieu@free.fr> Matthieu CASTET <castet.matthieu@free.fr>
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com> Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
...@@ -372,6 +373,8 @@ Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com> ...@@ -372,6 +373,8 @@ Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com> Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com> Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru> Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com>
Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com> Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
Rudolf Marek <R.Marek@sh.cvut.cz> Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt> Rui Saraiva <rmps@joel.ist.utl.pt>
......
...@@ -5299,7 +5299,7 @@ M: Johannes Weiner <hannes@cmpxchg.org> ...@@ -5299,7 +5299,7 @@ M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org> M: Michal Hocko <mhocko@kernel.org>
M: Roman Gushchin <roman.gushchin@linux.dev> M: Roman Gushchin <roman.gushchin@linux.dev>
M: Shakeel Butt <shakeelb@google.com> M: Shakeel Butt <shakeelb@google.com>
R: Muchun Song <songmuchun@bytedance.com> R: Muchun Song <muchun.song@linux.dev>
L: cgroups@vger.kernel.org L: cgroups@vger.kernel.org
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
...@@ -9439,7 +9439,7 @@ F: drivers/net/ethernet/huawei/hinic/ ...@@ -9439,7 +9439,7 @@ F: drivers/net/ethernet/huawei/hinic/
HUGETLB SUBSYSTEM HUGETLB SUBSYSTEM
M: Mike Kravetz <mike.kravetz@oracle.com> M: Mike Kravetz <mike.kravetz@oracle.com>
M: Muchun Song <songmuchun@bytedance.com> M: Muchun Song <muchun.song@linux.dev>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
......
...@@ -33,11 +33,13 @@ ...@@ -33,11 +33,13 @@
* can use the extra bits to store other information besides PFN. * can use the extra bits to store other information besides PFN.
*/ */
#ifdef MAX_PHYSMEM_BITS #ifdef MAX_PHYSMEM_BITS
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
#else /* MAX_PHYSMEM_BITS */ #else /* MAX_PHYSMEM_BITS */
#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT) #define SWP_PFN_BITS min_t(int, \
sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
SWP_TYPE_SHIFT)
#endif /* MAX_PHYSMEM_BITS */ #endif /* MAX_PHYSMEM_BITS */
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
/** /**
* Migration swap entry specific bitfield definitions. Layout: * Migration swap entry specific bitfield definitions. Layout:
......
...@@ -2852,7 +2852,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo ...@@ -2852,7 +2852,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (unlikely(!pud_present(pud))) if (unlikely(!pud_present(pud)))
return 0; return 0;
if (unlikely(pud_huge(pud))) { if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, flags, if (!gup_huge_pud(pud, pudp, addr, next, flags,
pages, nr)) pages, nr))
return 0; return 0;
......
...@@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) ...@@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/* Search one past newbrk */ /* Search one past newbrk */
mas_set(&mas, newbrk); mas_set(&mas, newbrk);
brkvma = mas_find(&mas, oldbrk); brkvma = mas_find(&mas, oldbrk);
BUG_ON(brkvma == NULL); if (!brkvma || brkvma->vm_start >= oldbrk)
if (brkvma->vm_start >= oldbrk)
goto out; /* mapping intersects with an existing non-brk vma. */ goto out; /* mapping intersects with an existing non-brk vma. */
/* /*
* mm->brk must be protected by write mmap_lock. * mm->brk must be protected by write mmap_lock.
...@@ -2946,9 +2945,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, ...@@ -2946,9 +2945,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
* Expand the existing vma if possible; Note that singular lists do not * Expand the existing vma if possible; Note that singular lists do not
* occur after forking, so the expand will only happen on new VMAs. * occur after forking, so the expand will only happen on new VMAs.
*/ */
if (vma && if (vma && vma->vm_end == addr && !vma_policy(vma) &&
(!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) && can_vma_merge_after(vma, flags, NULL, NULL,
((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) { addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
mas_set_range(mas, vma->vm_start, addr + len - 1); mas_set_range(mas, vma->vm_start, addr + len - 1);
if (mas_preallocate(mas, vma, GFP_KERNEL)) if (mas_preallocate(mas, vma, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
...@@ -3035,11 +3034,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) ...@@ -3035,11 +3034,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
goto munmap_failed; goto munmap_failed;
vma = mas_prev(&mas, 0); vma = mas_prev(&mas, 0);
if (!vma || vma->vm_end != addr || vma_policy(vma) ||
!can_vma_merge_after(vma, flags, NULL, NULL,
addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL))
vma = NULL;
ret = do_brk_flags(&mas, vma, addr, len, flags); ret = do_brk_flags(&mas, vma, addr, len, flags);
populate = ((mm->def_flags & VM_LOCKED) != 0); populate = ((mm->def_flags & VM_LOCKED) != 0);
mmap_write_unlock(mm); mmap_write_unlock(mm);
......
...@@ -948,6 +948,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -948,6 +948,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index++; index++;
} }
/*
* When undoing a failed fallocate, we want none of the partial folio
* zeroing and splitting below, but shall want to truncate the whole
* folio when !uptodate indicates that it was added by this fallocate,
* even when [lstart, lend] covers only a part of the folio.
*/
if (unfalloc)
goto whole_folios;
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
if (folio) { if (folio) {
...@@ -973,6 +982,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -973,6 +982,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
folio_put(folio); folio_put(folio);
} }
whole_folios:
index = start; index = start;
while (index < end) { while (index < end) {
cond_resched(); cond_resched();
......
...@@ -19,12 +19,12 @@ ...@@ -19,12 +19,12 @@
/* /*
* Memory cgroup charging is performed using percpu batches 32 pages * Memory cgroup charging is performed using percpu batches 64 pages
* big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So * big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
* the maximum discrepancy between charge and vmstat entries is number * the maximum discrepancy between charge and vmstat entries is number
* of cpus multiplied by 32 pages. * of cpus multiplied by 64 pages.
*/ */
#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs()) #define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs())
static int alloc_dcache(const char *cgroup, void *arg) static int alloc_dcache(const char *cgroup, void *arg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment