Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
1f1183c4
Commit
1f1183c4
authored
Feb 23, 2024
by
Andrew Morton
Browse files
Options
Browse Files
Download
Plain Diff
merge mm-hotfixes-stable into mm-nonmm-stable to pick up stackdepot changes
parents
7d8cebb9
720da1e5
Changes
10
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
197 additions
and
225 deletions
+197
-225
MAINTAINERS
MAINTAINERS
+11
-0
include/linux/poison.h
include/linux/poison.h
+3
-0
lib/stackdepot.c
lib/stackdepot.c
+127
-123
mm/debug_vm_pgtable.c
mm/debug_vm_pgtable.c
+8
-0
mm/filemap.c
mm/filemap.c
+26
-25
mm/kasan/common.c
mm/kasan/common.c
+3
-5
mm/kasan/generic.c
mm/kasan/generic.c
+7
-61
mm/kasan/kasan.h
mm/kasan/kasan.h
+0
-10
mm/kasan/quarantine.c
mm/kasan/quarantine.c
+4
-1
mm/migrate.c
mm/migrate.c
+8
-0
No files found.
MAINTAINERS
View file @
1f1183c4
...
...
@@ -14111,6 +14111,17 @@ F: mm/
F: tools/mm/
F: tools/testing/selftests/mm/
MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
R: Lorenzo Stoakes <lstoakes@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: mm/mmap.c
MEMORY TECHNOLOGY DEVICES (MTD)
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at>
...
...
include/linux/poison.h
View file @
1f1183c4
...
...
@@ -92,4 +92,7 @@
/********** VFS **********/
#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
/********** lib/stackdepot.c **********/
#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
#endif
lib/stackdepot.c
View file @
1f1183c4
This diff is collapsed.
Click to expand it.
mm/debug_vm_pgtable.c
View file @
1f1183c4
...
...
@@ -362,6 +362,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
vaddr
&=
HPAGE_PUD_MASK
;
pud
=
pfn_pud
(
args
->
pud_pfn
,
args
->
page_prot
);
/*
* Some architectures have debug checks to make sure
* huge pud mapping are only found with devmap entries
* For now test with only devmap entries.
*/
pud
=
pud_mkdevmap
(
pud
);
set_pud_at
(
args
->
mm
,
vaddr
,
args
->
pudp
,
pud
);
flush_dcache_page
(
page
);
pudp_set_wrprotect
(
args
->
mm
,
vaddr
,
args
->
pudp
);
...
...
@@ -374,6 +380,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
WARN_ON
(
!
pud_none
(
pud
));
#endif
/* __PAGETABLE_PMD_FOLDED */
pud
=
pfn_pud
(
args
->
pud_pfn
,
args
->
page_prot
);
pud
=
pud_mkdevmap
(
pud
);
pud
=
pud_wrprotect
(
pud
);
pud
=
pud_mkclean
(
pud
);
set_pud_at
(
args
->
mm
,
vaddr
,
args
->
pudp
,
pud
);
...
...
@@ -391,6 +398,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
#endif
/* __PAGETABLE_PMD_FOLDED */
pud
=
pfn_pud
(
args
->
pud_pfn
,
args
->
page_prot
);
pud
=
pud_mkdevmap
(
pud
);
pud
=
pud_mkyoung
(
pud
);
set_pud_at
(
args
->
mm
,
vaddr
,
args
->
pudp
,
pud
);
flush_dcache_page
(
page
);
...
...
mm/filemap.c
View file @
1f1183c4
...
...
@@ -4111,18 +4111,25 @@ static void filemap_cachestat(struct address_space *mapping,
rcu_read_lock
();
xas_for_each
(
&
xas
,
folio
,
last_index
)
{
int
order
;
unsigned
long
nr_pages
;
pgoff_t
folio_first_index
,
folio_last_index
;
/*
* Don't deref the folio. It is not pinned, and might
* get freed (and reused) underneath us.
*
* We *could* pin it, but that would be expensive for
* what should be a fast and lightweight syscall.
*
* Instead, derive all information of interest from
* the rcu-protected xarray.
*/
if
(
xas_retry
(
&
xas
,
folio
))
continue
;
if
(
xa_is_value
(
folio
))
{
/* page is evicted */
void
*
shadow
=
(
void
*
)
folio
;
bool
workingset
;
/* not used */
int
order
=
xa_get_order
(
xas
.
xa
,
xas
.
xa_index
);
order
=
xa_get_order
(
xas
.
xa
,
xas
.
xa_index
);
nr_pages
=
1
<<
order
;
folio_first_index
=
round_down
(
xas
.
xa_index
,
1
<<
order
);
folio_last_index
=
folio_first_index
+
nr_pages
-
1
;
...
...
@@ -4134,6 +4141,11 @@ static void filemap_cachestat(struct address_space *mapping,
if
(
folio_last_index
>
last_index
)
nr_pages
-=
folio_last_index
-
last_index
;
if
(
xa_is_value
(
folio
))
{
/* page is evicted */
void
*
shadow
=
(
void
*
)
folio
;
bool
workingset
;
/* not used */
cs
->
nr_evicted
+=
nr_pages
;
#ifdef CONFIG_SWAP
/* implies CONFIG_MMU */
...
...
@@ -4150,24 +4162,13 @@ static void filemap_cachestat(struct address_space *mapping,
goto
resched
;
}
nr_pages
=
folio_nr_pages
(
folio
);
folio_first_index
=
folio_pgoff
(
folio
);
folio_last_index
=
folio_first_index
+
nr_pages
-
1
;
/* Folios might straddle the range boundaries, only count covered pages */
if
(
folio_first_index
<
first_index
)
nr_pages
-=
first_index
-
folio_first_index
;
if
(
folio_last_index
>
last_index
)
nr_pages
-=
folio_last_index
-
last_index
;
/* page is in cache */
cs
->
nr_cache
+=
nr_pages
;
if
(
folio_test_dirty
(
folio
))
if
(
xas_get_mark
(
&
xas
,
PAGECACHE_TAG_DIRTY
))
cs
->
nr_dirty
+=
nr_pages
;
if
(
folio_test_writeback
(
folio
))
if
(
xas_get_mark
(
&
xas
,
PAGECACHE_TAG_WRITEBACK
))
cs
->
nr_writeback
+=
nr_pages
;
resched:
...
...
mm/kasan/common.c
View file @
1f1183c4
...
...
@@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
{
depot_stack_handle_t
stack
;
stack
=
kasan_save_stack
(
flags
,
STACK_DEPOT_FLAG_CAN_ALLOC
|
STACK_DEPOT_FLAG_GET
);
stack
=
kasan_save_stack
(
flags
,
STACK_DEPOT_FLAG_CAN_ALLOC
);
kasan_set_track
(
track
,
stack
);
}
...
...
@@ -266,10 +265,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return
true
;
/*
*
If the object is not put into quarantine, it will likely be quickly
*
reallocated. Thus, release its metadata now
.
*
Note: Keep per-object metadata to allow KASAN print stack traces for
*
use-after-free-before-realloc bugs
.
*/
kasan_release_object_meta
(
cache
,
object
);
/* Let slab put the object onto the freelist. */
return
false
;
...
...
mm/kasan/generic.c
View file @
1f1183c4
...
...
@@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
if
(
alloc_meta
)
{
/* Zero out alloc meta to mark it as invalid. */
__memset
(
alloc_meta
,
0
,
sizeof
(
*
alloc_meta
));
/*
* Prepare the lock for saving auxiliary stack traces.
* Temporarily disable KASAN bug reporting to allow instrumented
* raw_spin_lock_init to access aux_lock, which resides inside
* of a redzone.
*/
kasan_disable_current
();
raw_spin_lock_init
(
&
alloc_meta
->
aux_lock
);
kasan_enable_current
();
}
/*
...
...
@@ -506,18 +496,8 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
static
void
release_alloc_meta
(
struct
kasan_alloc_meta
*
meta
)
{
/* Evict the stack traces from stack depot. */
stack_depot_put
(
meta
->
alloc_track
.
stack
);
stack_depot_put
(
meta
->
aux_stack
[
0
]);
stack_depot_put
(
meta
->
aux_stack
[
1
]);
/*
* Zero out alloc meta to mark it as invalid but keep aux_lock
* initialized to avoid having to reinitialize it when another object
* is allocated in the same slot.
*/
__memset
(
&
meta
->
alloc_track
,
0
,
sizeof
(
meta
->
alloc_track
));
__memset
(
meta
->
aux_stack
,
0
,
sizeof
(
meta
->
aux_stack
));
/* Zero out alloc meta to mark it as invalid. */
__memset
(
meta
,
0
,
sizeof
(
*
meta
));
}
static
void
release_free_meta
(
const
void
*
object
,
struct
kasan_free_meta
*
meta
)
...
...
@@ -529,27 +509,10 @@ static void release_free_meta(const void *object, struct kasan_free_meta *meta)
if
(
*
(
u8
*
)
kasan_mem_to_shadow
(
object
)
!=
KASAN_SLAB_FREE_META
)
return
;
/* Evict the stack trace from the stack depot. */
stack_depot_put
(
meta
->
free_track
.
stack
);
/* Mark free meta as invalid. */
*
(
u8
*
)
kasan_mem_to_shadow
(
object
)
=
KASAN_SLAB_FREE
;
}
void
kasan_release_object_meta
(
struct
kmem_cache
*
cache
,
const
void
*
object
)
{
struct
kasan_alloc_meta
*
alloc_meta
;
struct
kasan_free_meta
*
free_meta
;
alloc_meta
=
kasan_get_alloc_meta
(
cache
,
object
);
if
(
alloc_meta
)
release_alloc_meta
(
alloc_meta
);
free_meta
=
kasan_get_free_meta
(
cache
,
object
);
if
(
free_meta
)
release_free_meta
(
object
,
free_meta
);
}
size_t
kasan_metadata_size
(
struct
kmem_cache
*
cache
,
bool
in_object
)
{
struct
kasan_cache
*
info
=
&
cache
->
kasan_info
;
...
...
@@ -574,8 +537,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
struct
kmem_cache
*
cache
;
struct
kasan_alloc_meta
*
alloc_meta
;
void
*
object
;
depot_stack_handle_t
new_handle
,
old_handle
;
unsigned
long
flags
;
if
(
is_kfence_address
(
addr
)
||
!
slab
)
return
;
...
...
@@ -586,33 +547,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if
(
!
alloc_meta
)
return
;
new_handle
=
kasan_save_stack
(
0
,
depot_flags
);
/*
* Temporarily disable KASAN bug reporting to allow instrumented
* spinlock functions to access aux_lock, which resides inside of a
* redzone.
*/
kasan_disable_current
();
raw_spin_lock_irqsave
(
&
alloc_meta
->
aux_lock
,
flags
);
old_handle
=
alloc_meta
->
aux_stack
[
1
];
alloc_meta
->
aux_stack
[
1
]
=
alloc_meta
->
aux_stack
[
0
];
alloc_meta
->
aux_stack
[
0
]
=
new_handle
;
raw_spin_unlock_irqrestore
(
&
alloc_meta
->
aux_lock
,
flags
);
kasan_enable_current
();
stack_depot_put
(
old_handle
);
alloc_meta
->
aux_stack
[
0
]
=
kasan_save_stack
(
0
,
depot_flags
);
}
void
kasan_record_aux_stack
(
void
*
addr
)
{
return
__kasan_record_aux_stack
(
addr
,
STACK_DEPOT_FLAG_CAN_ALLOC
|
STACK_DEPOT_FLAG_GET
);
return
__kasan_record_aux_stack
(
addr
,
STACK_DEPOT_FLAG_CAN_ALLOC
);
}
void
kasan_record_aux_stack_noalloc
(
void
*
addr
)
{
return
__kasan_record_aux_stack
(
addr
,
STACK_DEPOT_FLAG_GET
);
return
__kasan_record_aux_stack
(
addr
,
0
);
}
void
kasan_save_alloc_info
(
struct
kmem_cache
*
cache
,
void
*
object
,
gfp_t
flags
)
...
...
@@ -623,7 +569,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
if
(
!
alloc_meta
)
return
;
/*
Evict
previous stack traces (might exist for krealloc or mempool). */
/*
Invalidate
previous stack traces (might exist for krealloc or mempool). */
release_alloc_meta
(
alloc_meta
);
kasan_save_track
(
&
alloc_meta
->
alloc_track
,
flags
);
...
...
@@ -637,7 +583,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if
(
!
free_meta
)
return
;
/*
Evict
previous stack trace (might exist for mempool). */
/*
Invalidate
previous stack trace (might exist for mempool). */
release_free_meta
(
object
,
free_meta
);
kasan_save_track
(
&
free_meta
->
free_track
,
0
);
...
...
mm/kasan/kasan.h
View file @
1f1183c4
...
...
@@ -6,7 +6,6 @@
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
#include <linux/spinlock.h>
#include <linux/stackdepot.h>
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
...
...
@@ -265,13 +264,6 @@ struct kasan_global {
struct
kasan_alloc_meta
{
struct
kasan_track
alloc_track
;
/* Free track is stored in kasan_free_meta. */
/*
* aux_lock protects aux_stack from accesses from concurrent
* kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
* on RT kernels, as kasan_record_aux_stack_noalloc can be called from
* non-sleepable contexts.
*/
raw_spinlock_t
aux_lock
;
depot_stack_handle_t
aux_stack
[
2
];
};
...
...
@@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
struct
kasan_free_meta
*
kasan_get_free_meta
(
struct
kmem_cache
*
cache
,
const
void
*
object
);
void
kasan_init_object_meta
(
struct
kmem_cache
*
cache
,
const
void
*
object
);
void
kasan_release_object_meta
(
struct
kmem_cache
*
cache
,
const
void
*
object
);
#else
static
inline
void
kasan_init_object_meta
(
struct
kmem_cache
*
cache
,
const
void
*
object
)
{
}
static
inline
void
kasan_release_object_meta
(
struct
kmem_cache
*
cache
,
const
void
*
object
)
{
}
#endif
depot_stack_handle_t
kasan_save_stack
(
gfp_t
flags
,
depot_flags_t
depot_flags
);
...
...
mm/kasan/quarantine.c
View file @
1f1183c4
...
...
@@ -145,7 +145,10 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
void
*
object
=
qlink_to_object
(
qlink
,
cache
);
struct
kasan_free_meta
*
free_meta
=
kasan_get_free_meta
(
cache
,
object
);
kasan_release_object_meta
(
cache
,
object
);
/*
* Note: Keep per-object metadata to allow KASAN print stack traces for
* use-after-free-before-realloc bugs.
*/
/*
* If init_on_free is enabled and KASAN's free metadata is stored in
...
...
mm/migrate.c
View file @
1f1183c4
...
...
@@ -2522,6 +2522,14 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
if
(
managed_zone
(
pgdat
->
node_zones
+
z
))
break
;
}
/*
* If there are no managed zones, it should not proceed
* further.
*/
if
(
z
<
0
)
return
0
;
wakeup_kswapd
(
pgdat
->
node_zones
+
z
,
0
,
folio_order
(
folio
),
ZONE_MOVABLE
);
return
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment