Commit 5972d97c authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm/mmap: use PHYS_PFN in mmap_region()

Instead of shifting the length by PAGE_SIZE, use PHYS_PFN.  Also use the
existing local variable everywhere instead of some of the time.

Link: https://lkml.kernel.org/r/20240830040101.822209-17-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: default avatarLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: default avatarSuren Baghdasaryan <surenb@google.com>
Cc: Bert Karwatzki <spasswolf@web.de>
Cc: Jeff Xu <jeffxu@chromium.org>
Cc: Jiri Olsa <olsajiri@gmail.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Paul Moore <paul@paul-moore.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4f87153e
......@@ -1364,7 +1364,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct vm_area_struct *next, *prev, *merge;
pgoff_t pglen = len >> PAGE_SHIFT;
pgoff_t pglen = PHYS_PFN(len);
unsigned long charged = 0;
struct vma_munmap_struct vms;
struct ma_state mas_detach;
......@@ -1384,7 +1384,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* MAP_FIXED may remove pages of mappings that intersects with requested
* mapping. Account for the pages it would unmap.
*/
if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages))
if (!may_expand_vm(mm, vm_flags, pglen - nr_pages))
return -ENOMEM;
/* Find the first overlapping VMA */
......@@ -1413,7 +1413,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
charged = pglen;
charged -= nr_accounted;
if (security_vm_enough_memory_mm(mm, charged))
goto abort_munmap;
......@@ -1574,14 +1574,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Unmap any existing mapping in the area */
vms_complete_munmap_vmas(&vms, &mas_detach);
vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
vm_stat_account(mm, vm_flags, pglen);
if (vm_flags & VM_LOCKED) {
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm))
vm_flags_clear(vma, VM_LOCKED_MASK);
else
mm->locked_vm += (len >> PAGE_SHIFT);
mm->locked_vm += pglen;
}
if (file)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment