Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
1301f931
Commit
1301f931
authored
Jan 18, 2023
by
Andrew Morton
Browse files
Options
Browse Files
Download
Plain Diff
Pull mm-hotfixes-stable dependencies into mm-stable.
Merge branch 'mm-hotfixes-stable' into mm-stable
parents
0e18a6b4
7e3ce3f8
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
163 additions
and
96 deletions
+163
-96
.mailmap
.mailmap
+1
-0
Documentation/admin-guide/mm/zswap.rst
Documentation/admin-guide/mm/zswap.rst
+1
-3
MAINTAINERS
MAINTAINERS
+3
-3
fs/nilfs2/btree.c
fs/nilfs2/btree.c
+12
-3
fs/userfaultfd.c
fs/userfaultfd.c
+22
-6
include/linux/mm.h
include/linux/mm.h
+3
-3
include/linux/mm_inline.h
include/linux/mm_inline.h
+1
-2
include/linux/mm_types.h
include/linux/mm_types.h
+1
-1
include/linux/page_ref.h
include/linux/page_ref.h
+1
-1
init/Kconfig
init/Kconfig
+1
-1
lib/win_minmax.c
lib/win_minmax.c
+1
-1
mm/hugetlb.c
mm/hugetlb.c
+67
-35
mm/kasan/report.c
mm/kasan/report.c
+1
-1
mm/khugepaged.c
mm/khugepaged.c
+7
-9
mm/madvise.c
mm/madvise.c
+1
-1
mm/memory.c
mm/memory.c
+7
-7
mm/mmap.c
mm/mmap.c
+6
-2
mm/mprotect.c
mm/mprotect.c
+7
-1
mm/nommu.c
mm/nommu.c
+6
-3
mm/shmem.c
mm/shmem.c
+2
-4
tools/testing/selftests/proc/proc-empty-vm.c
tools/testing/selftests/proc/proc-empty-vm.c
+7
-5
tools/testing/selftests/proc/proc-pid-vm.c
tools/testing/selftests/proc/proc-pid-vm.c
+5
-4
No files found.
.mailmap
View file @
1301f931
...
...
@@ -371,6 +371,7 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
...
...
Documentation/admin-guide/mm/zswap.rst
View file @
1301f931
...
...
@@ -70,9 +70,7 @@ e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs
The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which
means the compression ratio will always be 2:1 or worse (because of half-full
zbud pages). The zsmalloc type zpool has a more complex compressed page
storage method, and it can achieve greater storage densities. However,
zsmalloc does not implement compressed page eviction, so once zswap fills it
cannot evict the oldest page, it can only reject new pages.
storage method, and it can achieve greater storage densities.
When a swap page is passed from frontswap to zswap, zswap maintains a mapping
of the swap entry, a combination of the swap type and swap offset, to the zpool
...
...
MAINTAINERS
View file @
1301f931
...
...
@@ -6948,7 +6948,7 @@ F: drivers/gpu/drm/atmel-hlcdc/
DRM DRIVERS FOR BRIDGE CHIPS
M: Andrzej Hajda <andrzej.hajda@intel.com>
M: Neil Armstrong <neil.armstrong@linaro.org>
M: Robert Foss <r
obert.foss@linaro
.org>
M: Robert Foss <r
foss@kernel
.org>
R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
...
...
@@ -17238,7 +17238,7 @@ F: Documentation/devicetree/bindings/net/qcom,bam-dmux.yaml
F: drivers/net/wwan/qcom_bam_dmux.c
QUALCOMM CAMERA SUBSYSTEM DRIVER
M: Robert Foss <r
obert.foss@linaro
.org>
M: Robert Foss <r
foss@kernel
.org>
M: Todor Tomov <todor.too@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
...
...
@@ -17318,7 +17318,7 @@ F: drivers/dma/qcom/hidma*
QUALCOMM I2C CCI DRIVER
M: Loic Poulain <loic.poulain@linaro.org>
M: Robert Foss <r
obert.foss@linaro
.org>
M: Robert Foss <r
foss@kernel
.org>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
...
...
fs/nilfs2/btree.c
View file @
1301f931
...
...
@@ -480,9 +480,18 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
ret
=
nilfs_btnode_submit_block
(
btnc
,
ptr
,
0
,
REQ_OP_READ
,
&
bh
,
&
submit_ptr
);
if
(
ret
)
{
if
(
ret
!=
-
EEXIST
)
return
ret
;
goto
out_check
;
if
(
likely
(
ret
==
-
EEXIST
))
goto
out_check
;
if
(
ret
==
-
ENOENT
)
{
/*
* Block address translation failed due to invalid
* value of 'ptr'. In this case, return internal code
* -EINVAL (broken bmap) to notify bmap layer of fatal
* metadata corruption.
*/
ret
=
-
EINVAL
;
}
return
ret
;
}
if
(
ra
)
{
...
...
fs/userfaultfd.c
View file @
1301f931
...
...
@@ -108,6 +108,21 @@ static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
return
ctx
->
features
&
UFFD_FEATURE_INITIALIZED
;
}
static
void
userfaultfd_set_vm_flags
(
struct
vm_area_struct
*
vma
,
vm_flags_t
flags
)
{
const
bool
uffd_wp_changed
=
(
vma
->
vm_flags
^
flags
)
&
VM_UFFD_WP
;
vma
->
vm_flags
=
flags
;
/*
* For shared mappings, we want to enable writenotify while
* userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
* recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
*/
if
((
vma
->
vm_flags
&
VM_SHARED
)
&&
uffd_wp_changed
)
vma_set_page_prot
(
vma
);
}
static
int
userfaultfd_wake_function
(
wait_queue_entry_t
*
wq
,
unsigned
mode
,
int
wake_flags
,
void
*
key
)
{
...
...
@@ -618,7 +633,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
for_each_vma
(
vmi
,
vma
)
{
if
(
vma
->
vm_userfaultfd_ctx
.
ctx
==
release_new_ctx
)
{
vma
->
vm_userfaultfd_ctx
=
NULL_VM_UFFD_CTX
;
vma
->
vm_flags
&=
~
__VM_UFFD_FLAGS
;
userfaultfd_set_vm_flags
(
vma
,
vma
->
vm_flags
&
~
__VM_UFFD_FLAGS
);
}
}
mmap_write_unlock
(
mm
);
...
...
@@ -652,7 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
octx
=
vma
->
vm_userfaultfd_ctx
.
ctx
;
if
(
!
octx
||
!
(
octx
->
features
&
UFFD_FEATURE_EVENT_FORK
))
{
vma
->
vm_userfaultfd_ctx
=
NULL_VM_UFFD_CTX
;
vma
->
vm_flags
&=
~
__VM_UFFD_FLAGS
;
userfaultfd_set_vm_flags
(
vma
,
vma
->
vm_flags
&
~
__VM_UFFD_FLAGS
)
;
return
0
;
}
...
...
@@ -733,7 +749,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
}
else
{
/* Drop uffd context if remap feature not enabled */
vma
->
vm_userfaultfd_ctx
=
NULL_VM_UFFD_CTX
;
vma
->
vm_flags
&=
~
__VM_UFFD_FLAGS
;
userfaultfd_set_vm_flags
(
vma
,
vma
->
vm_flags
&
~
__VM_UFFD_FLAGS
)
;
}
}
...
...
@@ -895,7 +911,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
prev
=
vma
;
}
vma
->
vm_flags
=
new_flags
;
userfaultfd_set_vm_flags
(
vma
,
new_flags
)
;
vma
->
vm_userfaultfd_ctx
=
NULL_VM_UFFD_CTX
;
}
mmap_write_unlock
(
mm
);
...
...
@@ -1463,7 +1479,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma
->
vm_flags
=
new_flags
;
userfaultfd_set_vm_flags
(
vma
,
new_flags
)
;
vma
->
vm_userfaultfd_ctx
.
ctx
=
ctx
;
if
(
is_vm_hugetlb_page
(
vma
)
&&
uffd_disable_huge_pmd_share
(
vma
))
...
...
@@ -1651,7 +1667,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma
->
vm_flags
=
new_flags
;
userfaultfd_set_vm_flags
(
vma
,
new_flags
)
;
vma
->
vm_userfaultfd_ctx
=
NULL_VM_UFFD_CTX
;
skip:
...
...
include/linux/mm.h
View file @
1301f931
...
...
@@ -1270,10 +1270,10 @@ static inline void folio_put_refs(struct folio *folio, int refs)
__folio_put
(
folio
);
}
/*
*
*
release_pages - release
an array of pages or folios
/*
*
union release_pages_arg -
an array of pages or folios
*
*
This just
releases a simple array of multiple pages, and
*
release_pages()
releases a simple array of multiple pages, and
* accepts various different forms of said page array: either
* a regular old boring array of pages, an array of folios, or
* an array of encoded page pointers.
...
...
include/linux/mm_inline.h
View file @
1301f931
...
...
@@ -413,8 +413,7 @@ static inline void free_anon_vma_name(struct vm_area_struct *vma)
* Not using anon_vma_name because it generates a warning if mmap_lock
* is not held, which might be the case here.
*/
if
(
!
vma
->
vm_file
)
anon_vma_name_put
(
vma
->
anon_name
);
anon_vma_name_put
(
vma
->
anon_name
);
}
static
inline
bool
anon_vma_name_eq
(
struct
anon_vma_name
*
anon_name1
,
...
...
include/linux/mm_types.h
View file @
1301f931
...
...
@@ -581,7 +581,7 @@ struct vm_area_struct {
/*
* For private and shared anonymous mappings, a pointer to a null
* terminated string containing the name given to the vma, or NULL if
* unnamed. Serialized by mmap_
sem
. Use anon_vma_name to access.
* unnamed. Serialized by mmap_
lock
. Use anon_vma_name to access.
*/
struct
anon_vma_name
*
anon_name
;
#endif
...
...
include/linux/page_ref.h
View file @
1301f931
...
...
@@ -301,7 +301,7 @@ static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
*
* You can also use this function if you're holding a lock that prevents
* pages being frozen & removed; eg the i_pages lock for the page cache
* or the mmap_
sem
or page table lock for page tables. In this case,
* or the mmap_
lock
or page table lock for page tables. In this case,
* it will always succeed, and you could have used a plain folio_get(),
* but it's sometimes more convenient to have a common function called
* from both locked and RCU-protected contexts.
...
...
init/Kconfig
View file @
1301f931
...
...
@@ -776,7 +776,7 @@ config PRINTK_SAFE_LOG_BUF_SHIFT
depends on PRINTK
help
Select the size of an alternate printk per-CPU buffer where messages
printed from usafe contexts are temporary stored. One example would
printed from u
n
safe contexts are temporary stored. One example would
be NMI messages, another one - printk recursion. The messages are
copied to the main log buffer in a safe context to avoid a deadlock.
The value defines the size as a power of 2.
...
...
lib/win_minmax.c
View file @
1301f931
// SPDX-License-Identifier: GPL-2.0
/*
*
/*
* lib/minmax.c: windowed min/max tracker
*
* Kathleen Nichols' algorithm for tracking the minimum (or maximum)
...
...
mm/hugetlb.c
View file @
1301f931
...
...
@@ -94,6 +94,8 @@ static int hugetlb_acct_memory(struct hstate *h, long delta);
static
void
hugetlb_vma_lock_free
(
struct
vm_area_struct
*
vma
);
static
void
hugetlb_vma_lock_alloc
(
struct
vm_area_struct
*
vma
);
static
void
__hugetlb_vma_unlock_write_free
(
struct
vm_area_struct
*
vma
);
static
void
hugetlb_unshare_pmds
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
static
inline
bool
subpool_is_free
(
struct
hugepage_subpool
*
spool
)
{
...
...
@@ -1181,7 +1183,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
/*
* Reset and decrement one ref on hugepage private reservation.
* Called with mm->mmap_
sem
writer semaphore held.
* Called with mm->mmap_
lock
writer semaphore held.
* This function should be only used by move_vma() and operate on
* same sized vma. It should never come here with last ref on the
* reservation.
...
...
@@ -4834,6 +4836,25 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{
if
(
addr
&
~
(
huge_page_mask
(
hstate_vma
(
vma
))))
return
-
EINVAL
;
/*
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
*/
if
(
addr
&
~
PUD_MASK
)
{
/*
* hugetlb_vm_op_split is called right before we attempt to
* split the VMA. We will need to unshare PMDs in the old and
* new VMAs, so let's unshare before we split.
*/
unsigned
long
floor
=
addr
&
PUD_MASK
;
unsigned
long
ceil
=
floor
+
PUD_SIZE
;
if
(
floor
>=
vma
->
vm_start
&&
ceil
<=
vma
->
vm_end
)
hugetlb_unshare_pmds
(
vma
,
floor
,
ceil
);
}
return
0
;
}
...
...
@@ -5030,6 +5051,9 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
entry
=
huge_pte_clear_uffd_wp
(
entry
);
set_huge_pte_at
(
dst
,
addr
,
dst_pte
,
entry
);
}
else
if
(
unlikely
(
is_pte_marker
(
entry
)))
{
/* No swap on hugetlb */
WARN_ON_ONCE
(
is_swapin_error_entry
(
pte_to_swp_entry
(
entry
)));
/*
* We copy the pte marker only if the dst vma has
* uffd-wp enabled.
...
...
@@ -5131,7 +5155,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
/*
* We don't have to worry about the ordering of src and dst ptlocks
* because exclusive mmap_
sem
(or the i_mmap_lock) prevents deadlock.
* because exclusive mmap_
lock
(or the i_mmap_lock) prevents deadlock.
*/
if
(
src_ptl
!=
dst_ptl
)
spin_lock_nested
(
src_ptl
,
SINGLE_DEPTH_NESTING
);
...
...
@@ -6639,8 +6663,17 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
spinlock_t
*
ptl
;
ptep
=
huge_pte_offset
(
mm
,
address
,
psize
);
if
(
!
ptep
)
{
address
|=
last_addr_mask
;
continue
;
if
(
!
uffd_wp
)
{
address
|=
last_addr_mask
;
continue
;
}
/*
* Userfaultfd wr-protect requires pgtable
* pre-allocations to install pte markers.
*/
ptep
=
huge_pte_alloc
(
mm
,
vma
,
address
,
psize
);
if
(
!
ptep
)
break
;
}
ptl
=
huge_pte_lock
(
h
,
mm
,
ptep
);
if
(
huge_pmd_unshare
(
mm
,
vma
,
address
,
ptep
))
{
...
...
@@ -6658,16 +6691,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
}
pte
=
huge_ptep_get
(
ptep
);
if
(
unlikely
(
is_hugetlb_entry_hwpoisoned
(
pte
)))
{
spin_unlock
(
ptl
);
continue
;
}
if
(
unlikely
(
is_hugetlb_entry_migration
(
pte
)))
{
/* Nothing to do. */
}
else
if
(
unlikely
(
is_hugetlb_entry_migration
(
pte
)))
{
swp_entry_t
entry
=
pte_to_swp_entry
(
pte
);
struct
page
*
page
=
pfn_swap_entry_to_page
(
entry
);
pte_t
newpte
=
pte
;
if
(
!
is_readable_migration_entry
(
entry
))
{
pte_t
newpte
;
if
(
is_writable_migration_entry
(
entry
))
{
if
(
PageAnon
(
page
))
entry
=
make_readable_exclusive_migration_entry
(
swp_offset
(
entry
));
...
...
@@ -6675,25 +6705,22 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
entry
=
make_readable_migration_entry
(
swp_offset
(
entry
));
newpte
=
swp_entry_to_pte
(
entry
);
if
(
uffd_wp
)
newpte
=
pte_swp_mkuffd_wp
(
newpte
);
else
if
(
uffd_wp_resolve
)
newpte
=
pte_swp_clear_uffd_wp
(
newpte
);
set_huge_pte_at
(
mm
,
address
,
ptep
,
newpte
);
pages
++
;
}
spin_unlock
(
ptl
);
continue
;
}
if
(
unlikely
(
pte_marker_uffd_wp
(
pte
)))
{
/*
* This is changing a non-present pte into a none pte,
* no need for huge_ptep_modify_prot_start/commit().
*/
if
(
uffd_wp
)
newpte
=
pte_swp_mkuffd_wp
(
newpte
);
else
if
(
uffd_wp_resolve
)
newpte
=
pte_swp_clear_uffd_wp
(
newpte
);
if
(
!
pte_same
(
pte
,
newpte
))
set_huge_pte_at
(
mm
,
address
,
ptep
,
newpte
);
}
else
if
(
unlikely
(
is_pte_marker
(
pte
)))
{
/* No other markers apply for now. */
WARN_ON_ONCE
(
!
pte_marker_uffd_wp
(
pte
));
if
(
uffd_wp_resolve
)
/* Safe to modify directly (non-present->none). */
huge_pte_clear
(
mm
,
address
,
ptep
,
psize
);
}
if
(
!
huge_pte_none
(
pte
))
{
}
else
if
(
!
huge_pte_none
(
pte
))
{
pte_t
old_pte
;
unsigned
int
shift
=
huge_page_shift
(
hstate_vma
(
vma
));
...
...
@@ -7328,26 +7355,21 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
}
}
/*
* This function will unconditionally remove all the shared pmd pgtable entries
* within the specific vma for a hugetlbfs memory range.
*/
void
hugetlb_unshare_all_pmds
(
struct
vm_area_struct
*
vma
)
static
void
hugetlb_unshare_pmds
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
hstate
*
h
=
hstate_vma
(
vma
);
unsigned
long
sz
=
huge_page_size
(
h
);
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
struct
mmu_notifier_range
range
;
unsigned
long
address
,
start
,
end
;
unsigned
long
address
;
spinlock_t
*
ptl
;
pte_t
*
ptep
;
if
(
!
(
vma
->
vm_flags
&
VM_MAYSHARE
))
return
;
start
=
ALIGN
(
vma
->
vm_start
,
PUD_SIZE
);
end
=
ALIGN_DOWN
(
vma
->
vm_end
,
PUD_SIZE
);
if
(
start
>=
end
)
return
;
...
...
@@ -7379,6 +7401,16 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
mmu_notifier_invalidate_range_end
(
&
range
);
}
/*
* This function will unconditionally remove all the shared pmd pgtable entries
* within the specific vma for a hugetlbfs memory range.
*/
void
hugetlb_unshare_all_pmds
(
struct
vm_area_struct
*
vma
)
{
hugetlb_unshare_pmds
(
vma
,
ALIGN
(
vma
->
vm_start
,
PUD_SIZE
),
ALIGN_DOWN
(
vma
->
vm_end
,
PUD_SIZE
));
}
#ifdef CONFIG_CMA
static
bool
cma_reserve_called
__initdata
;
...
...
mm/kasan/report.c
View file @
1301f931
...
...
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
* Whether the KASAN KUnit test suite is currently being executed.
* Updated in kasan_test.c.
*/
bool
kasan_kunit_executing
;
static
bool
kasan_kunit_executing
;
void
kasan_kunit_test_suite_start
(
void
)
{
...
...
mm/khugepaged.c
View file @
1301f931
...
...
@@ -1460,14 +1460,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if
(
!
hugepage_vma_check
(
vma
,
vma
->
vm_flags
,
false
,
false
,
false
))
return
SCAN_VMA_CHECK
;
/*
* Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
* that got written to. Without this, we'd have to also lock the
* anon_vma if one exists.
*/
if
(
vma
->
anon_vma
)
return
SCAN_VMA_CHECK
;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
if
(
userfaultfd_wp
(
vma
))
return
SCAN_PTE_UFFD_WP
;
...
...
@@ -1567,8 +1559,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
}
/* step 4: remove pte entries */
/* we make no change to anon, but protect concurrent anon page lookup */
if
(
vma
->
anon_vma
)
anon_vma_lock_write
(
vma
->
anon_vma
);
collapse_and_free_pmd
(
mm
,
vma
,
haddr
,
pmd
);
if
(
vma
->
anon_vma
)
anon_vma_unlock_write
(
vma
->
anon_vma
);
i_mmap_unlock_write
(
vma
->
vm_file
->
f_mapping
);
maybe_install_pmd:
...
...
@@ -2649,7 +2647,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
goto
out_nolock
;
}
hend
=
vma
->
vm_end
&
HPAGE_PMD_MASK
;
hend
=
min
(
hend
,
vma
->
vm_end
&
HPAGE_PMD_MASK
)
;
}
mmap_assert_locked
(
mm
);
memset
(
cc
->
node_load
,
0
,
sizeof
(
cc
->
node_load
));
...
...
mm/madvise.c
View file @
1301f931
...
...
@@ -130,7 +130,7 @@ static int replace_anon_vma_name(struct vm_area_struct *vma,
#endif
/* CONFIG_ANON_VMA_NAME */
/*
* Update the vm_flags on region of a vma, splitting it or merging it as
* necessary. Must be called with mmap_
sem
held for writing;
* necessary. Must be called with mmap_
lock
held for writing;
* Caller should ensure anon_name stability by raising its refcount even when
* anon_name belongs to a valid vma because this function might free that vma.
*/
...
...
mm/memory.c
View file @
1301f931
...
...
@@ -828,12 +828,8 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return
-
EBUSY
;
return
-
ENOENT
;
}
else
if
(
is_pte_marker_entry
(
entry
))
{
/*
* We're copying the pgtable should only because dst_vma has
* uffd-wp enabled, do sanity check.
*/
WARN_ON_ONCE
(
!
userfaultfd_wp
(
dst_vma
));
set_pte_at
(
dst_mm
,
addr
,
dst_pte
,
pte
);
if
(
is_swapin_error_entry
(
entry
)
||
userfaultfd_wp
(
dst_vma
))
set_pte_at
(
dst_mm
,
addr
,
dst_pte
,
pte
);
return
0
;
}
if
(
!
userfaultfd_wp
(
dst_vma
))
...
...
@@ -3629,8 +3625,12 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
/*
* Be careful so that we will only recover a special uffd-wp pte into a
* none pte. Otherwise it means the pte could have changed, so retry.
*
* This should also cover the case where e.g. the pte changed
* quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_SWAPIN_ERROR.
* So is_pte_marker() check is not enough to safely drop the pte.
*/
if
(
is_pte_marker
(
*
vmf
->
pte
))
if
(
pte_same
(
vmf
->
orig_pte
,
*
vmf
->
pte
))
pte_clear
(
vmf
->
vma
->
vm_mm
,
vmf
->
address
,
vmf
->
pte
);
pte_unmap_unlock
(
vmf
->
pte
,
vmf
->
ptl
);
return
0
;
...
...
mm/mmap.c
View file @
1301f931
...
...
@@ -1524,6 +1524,10 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
if
(
vma_soft_dirty_enabled
(
vma
)
&&
!
is_vm_hugetlb_page
(
vma
))
return
1
;
/* Do we need write faults for uffd-wp tracking? */
if
(
userfaultfd_wp
(
vma
))
return
1
;
/* Specialty mapping? */
if
(
vm_flags
&
VM_PFNMAP
)
return
0
;
...
...
@@ -2290,7 +2294,7 @@ static inline int munmap_sidetree(struct vm_area_struct *vma,
* @start: The aligned start address to munmap.
* @end: The aligned end address to munmap.
* @uf: The userfaultfd list_head
* @downgrade: Set to true to attempt a write downgrade of the mmap_
sem
* @downgrade: Set to true to attempt a write downgrade of the mmap_
lock
*
* If @downgrade is true, check return code for potential release of the lock.
*/
...
...
@@ -2465,7 +2469,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
* @len: The length of the range to munmap
* @uf: The userfaultfd list_head
* @downgrade: set to true if the user wants to attempt to write_downgrade the
* mmap_
sem
* mmap_
lock
*
* This function takes a @mas that is either pointing to the previous VMA or set
* to MA_START and sets it up to remove the mapping(s). The @len will be
...
...
mm/mprotect.c
View file @
1301f931
...
...
@@ -245,7 +245,13 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
newpte
=
pte_swp_mksoft_dirty
(
newpte
);
if
(
pte_swp_uffd_wp
(
oldpte
))
newpte
=
pte_swp_mkuffd_wp
(
newpte
);
}
else
if
(
pte_marker_entry_uffd_wp
(
entry
))
{
}
else
if
(
is_pte_marker_entry
(
entry
))
{
/*
* Ignore swapin errors unconditionally,
* because any access should sigbus anyway.
*/
if
(
is_swapin_error_entry
(
entry
))
continue
;
/*
* If this is uffd-wp pte marker and we'd like
* to unprotect it, drop it; the next page
...
...
mm/nommu.c
View file @
1301f931
...
...
@@ -559,7 +559,6 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
static
void
setup_vma_to_mm
(
struct
vm_area_struct
*
vma
,
struct
mm_struct
*
mm
)
{
mm
->
map_count
++
;
vma
->
vm_mm
=
mm
;
/* add the VMA to the mapping */
...
...
@@ -587,6 +586,7 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
BUG_ON
(
!
vma
->
vm_region
);
setup_vma_to_mm
(
vma
,
mm
);
mm
->
map_count
++
;
/* add the VMA to the tree */
vma_mas_store
(
vma
,
mas
);
...
...
@@ -1240,6 +1240,7 @@ unsigned long do_mmap(struct file *file,
error_just_free:
up_write
(
&
nommu_region_sem
);
error:
mas_destroy
(
&
mas
);
if
(
region
->
vm_file
)
fput
(
region
->
vm_file
);
kmem_cache_free
(
vm_region_jar
,
region
);
...
...
@@ -1250,7 +1251,6 @@ unsigned long do_mmap(struct file *file,
sharing_violation:
up_write
(
&
nommu_region_sem
);
mas_destroy
(
&
mas
);
pr_warn
(
"Attempt to share mismatched mappings
\n
"
);
ret
=
-
EINVAL
;
goto
error
;
...
...
@@ -1347,6 +1347,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if
(
vma
->
vm_file
)
return
-
ENOMEM
;
mm
=
vma
->
vm_mm
;
if
(
mm
->
map_count
>=
sysctl_max_map_count
)
return
-
ENOMEM
;
...
...
@@ -1398,6 +1399,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
mas_set_range
(
&
mas
,
vma
->
vm_start
,
vma
->
vm_end
-
1
);
mas_store
(
&
mas
,
vma
);
vma_mas_store
(
new
,
&
mas
);
mm
->
map_count
++
;
return
0
;
err_mas_preallocate:
...
...
@@ -1509,7 +1511,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
erase_whole_vma:
if
(
delete_vma_from_mm
(
vma
))
ret
=
-
ENOMEM
;
delete_vma
(
mm
,
vma
);
else
delete_vma
(
mm
,
vma
);
return
ret
;
}
...
...
mm/shmem.c
View file @
1301f931
...
...
@@ -478,12 +478,10 @@ bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
if
(
vma
&&
((
vma
->
vm_flags
&
VM_NOHUGEPAGE
)
||
test_bit
(
MMF_DISABLE_THP
,
&
vma
->
vm_mm
->
flags
)))
return
false
;
if
(
shmem_huge_force
)
return
true
;
if
(
shmem_huge
==
SHMEM_HUGE_FORCE
)
return
true
;
if
(
shmem_huge
==
SHMEM_HUGE_DENY
)
return
false
;
if
(
shmem_huge_force
||
shmem_huge
==
SHMEM_HUGE_FORCE
)
return
true
;
switch
(
SHMEM_SB
(
inode
->
i_sb
)
->
huge
)
{
case
SHMEM_HUGE_ALWAYS
:
...
...
tools/testing/selftests/proc/proc-empty-vm.c
View file @
1301f931
...
...
@@ -25,6 +25,7 @@
#undef NDEBUG
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
...
...
@@ -41,7 +42,7 @@
* 1: vsyscall VMA is --xp vsyscall=xonly
* 2: vsyscall VMA is r-xp vsyscall=emulate
*/
static
int
g_vsyscall
;
static
volatile
int
g_vsyscall
;
static
const
char
*
g_proc_pid_maps_vsyscall
;
static
const
char
*
g_proc_pid_smaps_vsyscall
;
...
...
@@ -147,11 +148,12 @@ static void vsyscall(void)
g_vsyscall
=
0
;
/* gettimeofday(NULL, NULL); */
uint64_t
rax
=
0xffffffffff600000
;
asm
volatile
(
"call
%P0
"
:
:
"
i"
(
0xffffffffff600000
),
"
D"
(
NULL
),
"S"
(
NULL
)
:
"r
ax"
,
"r
cx"
,
"r11"
"call
*%[rax]
"
:
[
rax
]
"+a"
(
rax
)
:
"D"
(
NULL
),
"S"
(
NULL
)
:
"rcx"
,
"r11"
);
g_vsyscall
=
1
;
...
...
tools/testing/selftests/proc/proc-pid-vm.c
View file @
1301f931
...
...
@@ -257,11 +257,12 @@ static void vsyscall(void)
g_vsyscall
=
0
;
/* gettimeofday(NULL, NULL); */
uint64_t
rax
=
0xffffffffff600000
;
asm
volatile
(
"call
%P0
"
:
:
"
i"
(
0xffffffffff600000
),
"
D"
(
NULL
),
"S"
(
NULL
)
:
"r
ax"
,
"r
cx"
,
"r11"
"call
*%[rax]
"
:
[
rax
]
"+a"
(
rax
)
:
"D"
(
NULL
),
"S"
(
NULL
)
:
"rcx"
,
"r11"
);
g_vsyscall
=
1
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment