Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
46e387bb
Commit
46e387bb
authored
Oct 22, 2010
by
Andi Kleen
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'hwpoison-hugepages' into hwpoison
Conflicts: mm/memory-failure.c
parents
e9d08567
3ef8fd7f
Changes
10
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
551 additions
and
125 deletions
+551
-125
arch/x86/mm/fault.c
arch/x86/mm/fault.c
+13
-6
fs/hugetlbfs/inode.c
fs/hugetlbfs/inode.c
+15
-0
include/linux/hugetlb.h
include/linux/hugetlb.h
+15
-2
include/linux/migrate.h
include/linux/migrate.h
+16
-0
include/linux/mm.h
include/linux/mm.h
+10
-2
mm/hugetlb.c
mm/hugetlb.c
+163
-70
mm/memory-failure.c
mm/memory-failure.c
+93
-9
mm/memory.c
mm/memory.c
+2
-1
mm/migrate.c
mm/migrate.c
+216
-18
mm/rmap.c
mm/rmap.c
+8
-17
No files found.
arch/x86/mm/fault.c
View file @
46e387bb
...
@@ -11,6 +11,7 @@
...
@@ -11,6 +11,7 @@
#include <linux/kprobes.h>
/* __kprobes, ... */
#include <linux/kprobes.h>
/* __kprobes, ... */
#include <linux/mmiotrace.h>
/* kmmio_handler, ... */
#include <linux/mmiotrace.h>
/* kmmio_handler, ... */
#include <linux/perf_event.h>
/* perf_sw_event */
#include <linux/perf_event.h>
/* perf_sw_event */
#include <linux/hugetlb.h>
/* hstate_index_to_shift */
#include <asm/traps.h>
/* dotraplinkage, ... */
#include <asm/traps.h>
/* dotraplinkage, ... */
#include <asm/pgalloc.h>
/* pgd_*(), ... */
#include <asm/pgalloc.h>
/* pgd_*(), ... */
...
@@ -160,15 +161,20 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
...
@@ -160,15 +161,20 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
static
void
static
void
force_sig_info_fault
(
int
si_signo
,
int
si_code
,
unsigned
long
address
,
force_sig_info_fault
(
int
si_signo
,
int
si_code
,
unsigned
long
address
,
struct
task_struct
*
tsk
)
struct
task_struct
*
tsk
,
int
fault
)
{
{
unsigned
lsb
=
0
;
siginfo_t
info
;
siginfo_t
info
;
info
.
si_signo
=
si_signo
;
info
.
si_signo
=
si_signo
;
info
.
si_errno
=
0
;
info
.
si_errno
=
0
;
info
.
si_code
=
si_code
;
info
.
si_code
=
si_code
;
info
.
si_addr
=
(
void
__user
*
)
address
;
info
.
si_addr
=
(
void
__user
*
)
address
;
info
.
si_addr_lsb
=
si_code
==
BUS_MCEERR_AR
?
PAGE_SHIFT
:
0
;
if
(
fault
&
VM_FAULT_HWPOISON_LARGE
)
lsb
=
hstate_index_to_shift
(
VM_FAULT_GET_HINDEX
(
fault
));
if
(
fault
&
VM_FAULT_HWPOISON
)
lsb
=
PAGE_SHIFT
;
info
.
si_addr_lsb
=
lsb
;
force_sig_info
(
si_signo
,
&
info
,
tsk
);
force_sig_info
(
si_signo
,
&
info
,
tsk
);
}
}
...
@@ -722,7 +728,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
...
@@ -722,7 +728,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
tsk
->
thread
.
error_code
=
error_code
|
(
address
>=
TASK_SIZE
);
tsk
->
thread
.
error_code
=
error_code
|
(
address
>=
TASK_SIZE
);
tsk
->
thread
.
trap_no
=
14
;
tsk
->
thread
.
trap_no
=
14
;
force_sig_info_fault
(
SIGSEGV
,
si_code
,
address
,
tsk
);
force_sig_info_fault
(
SIGSEGV
,
si_code
,
address
,
tsk
,
0
);
return
;
return
;
}
}
...
@@ -807,14 +813,14 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
...
@@ -807,14 +813,14 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
tsk
->
thread
.
trap_no
=
14
;
tsk
->
thread
.
trap_no
=
14
;
#ifdef CONFIG_MEMORY_FAILURE
#ifdef CONFIG_MEMORY_FAILURE
if
(
fault
&
VM_FAULT_HWPOISON
)
{
if
(
fault
&
(
VM_FAULT_HWPOISON
|
VM_FAULT_HWPOISON_LARGE
)
)
{
printk
(
KERN_ERR
printk
(
KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx
\n
"
,
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx
\n
"
,
tsk
->
comm
,
tsk
->
pid
,
address
);
tsk
->
comm
,
tsk
->
pid
,
address
);
code
=
BUS_MCEERR_AR
;
code
=
BUS_MCEERR_AR
;
}
}
#endif
#endif
force_sig_info_fault
(
SIGBUS
,
code
,
address
,
tsk
);
force_sig_info_fault
(
SIGBUS
,
code
,
address
,
tsk
,
fault
);
}
}
static
noinline
void
static
noinline
void
...
@@ -824,7 +830,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
...
@@ -824,7 +830,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if
(
fault
&
VM_FAULT_OOM
)
{
if
(
fault
&
VM_FAULT_OOM
)
{
out_of_memory
(
regs
,
error_code
,
address
);
out_of_memory
(
regs
,
error_code
,
address
);
}
else
{
}
else
{
if
(
fault
&
(
VM_FAULT_SIGBUS
|
VM_FAULT_HWPOISON
))
if
(
fault
&
(
VM_FAULT_SIGBUS
|
VM_FAULT_HWPOISON
|
VM_FAULT_HWPOISON_LARGE
))
do_sigbus
(
regs
,
error_code
,
address
,
fault
);
do_sigbus
(
regs
,
error_code
,
address
,
fault
);
else
else
BUG
();
BUG
();
...
...
fs/hugetlbfs/inode.c
View file @
46e387bb
...
@@ -31,6 +31,7 @@
...
@@ -31,6 +31,7 @@
#include <linux/statfs.h>
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/magic.h>
#include <linux/migrate.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
...
@@ -573,6 +574,19 @@ static int hugetlbfs_set_page_dirty(struct page *page)
...
@@ -573,6 +574,19 @@ static int hugetlbfs_set_page_dirty(struct page *page)
return
0
;
return
0
;
}
}
static
int
hugetlbfs_migrate_page
(
struct
address_space
*
mapping
,
struct
page
*
newpage
,
struct
page
*
page
)
{
int
rc
;
rc
=
migrate_huge_page_move_mapping
(
mapping
,
newpage
,
page
);
if
(
rc
)
return
rc
;
migrate_page_copy
(
newpage
,
page
);
return
0
;
}
static
int
hugetlbfs_statfs
(
struct
dentry
*
dentry
,
struct
kstatfs
*
buf
)
static
int
hugetlbfs_statfs
(
struct
dentry
*
dentry
,
struct
kstatfs
*
buf
)
{
{
struct
hugetlbfs_sb_info
*
sbinfo
=
HUGETLBFS_SB
(
dentry
->
d_sb
);
struct
hugetlbfs_sb_info
*
sbinfo
=
HUGETLBFS_SB
(
dentry
->
d_sb
);
...
@@ -659,6 +673,7 @@ static const struct address_space_operations hugetlbfs_aops = {
...
@@ -659,6 +673,7 @@ static const struct address_space_operations hugetlbfs_aops = {
.
write_begin
=
hugetlbfs_write_begin
,
.
write_begin
=
hugetlbfs_write_begin
,
.
write_end
=
hugetlbfs_write_end
,
.
write_end
=
hugetlbfs_write_end
,
.
set_page_dirty
=
hugetlbfs_set_page_dirty
,
.
set_page_dirty
=
hugetlbfs_set_page_dirty
,
.
migratepage
=
hugetlbfs_migrate_page
,
};
};
...
...
include/linux/hugetlb.h
View file @
46e387bb
...
@@ -43,7 +43,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
...
@@ -43,7 +43,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct
vm_area_struct
*
vma
,
struct
vm_area_struct
*
vma
,
int
acctflags
);
int
acctflags
);
void
hugetlb_unreserve_pages
(
struct
inode
*
inode
,
long
offset
,
long
freed
);
void
hugetlb_unreserve_pages
(
struct
inode
*
inode
,
long
offset
,
long
freed
);
void
__isolate_hwpoisoned_huge_page
(
struct
page
*
page
);
int
dequeue_hwpoisoned_huge_page
(
struct
page
*
page
);
void
copy_huge_page
(
struct
page
*
dst
,
struct
page
*
src
);
extern
unsigned
long
hugepages_treat_as_movable
;
extern
unsigned
long
hugepages_treat_as_movable
;
extern
const
unsigned
long
hugetlb_zero
,
hugetlb_infinity
;
extern
const
unsigned
long
hugetlb_zero
,
hugetlb_infinity
;
...
@@ -101,7 +102,10 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
...
@@ -101,7 +102,10 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define huge_pte_offset(mm, address) 0
#define huge_pte_offset(mm, address) 0
#define __isolate_hwpoisoned_huge_page(page) 0
#define dequeue_hwpoisoned_huge_page(page) 0
static
inline
void
copy_huge_page
(
struct
page
*
dst
,
struct
page
*
src
)
{
}
#define hugetlb_change_protection(vma, address, end, newprot)
#define hugetlb_change_protection(vma, address, end, newprot)
...
@@ -228,6 +232,8 @@ struct huge_bootmem_page {
...
@@ -228,6 +232,8 @@ struct huge_bootmem_page {
struct
hstate
*
hstate
;
struct
hstate
*
hstate
;
};
};
struct
page
*
alloc_huge_page_node
(
struct
hstate
*
h
,
int
nid
);
/* arch callback */
/* arch callback */
int
__init
alloc_bootmem_huge_page
(
struct
hstate
*
h
);
int
__init
alloc_bootmem_huge_page
(
struct
hstate
*
h
);
...
@@ -301,8 +307,14 @@ static inline struct hstate *page_hstate(struct page *page)
...
@@ -301,8 +307,14 @@ static inline struct hstate *page_hstate(struct page *page)
return
size_to_hstate
(
PAGE_SIZE
<<
compound_order
(
page
));
return
size_to_hstate
(
PAGE_SIZE
<<
compound_order
(
page
));
}
}
static
inline
unsigned
hstate_index_to_shift
(
unsigned
index
)
{
return
hstates
[
index
].
order
+
PAGE_SHIFT
;
}
#else
#else
struct
hstate
{};
struct
hstate
{};
#define alloc_huge_page_node(h, nid) NULL
#define alloc_bootmem_huge_page(h) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_file(f) NULL
#define hstate_vma(v) NULL
#define hstate_vma(v) NULL
...
@@ -317,6 +329,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
...
@@ -317,6 +329,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
{
{
return
1
;
return
1
;
}
}
#define hstate_index_to_shift(index) 0
#endif
#endif
#endif
/* _LINUX_HUGETLB_H */
#endif
/* _LINUX_HUGETLB_H */
include/linux/migrate.h
View file @
46e387bb
...
@@ -14,6 +14,8 @@ extern int migrate_page(struct address_space *,
...
@@ -14,6 +14,8 @@ extern int migrate_page(struct address_space *,
struct
page
*
,
struct
page
*
);
struct
page
*
,
struct
page
*
);
extern
int
migrate_pages
(
struct
list_head
*
l
,
new_page_t
x
,
extern
int
migrate_pages
(
struct
list_head
*
l
,
new_page_t
x
,
unsigned
long
private
,
int
offlining
);
unsigned
long
private
,
int
offlining
);
extern
int
migrate_huge_pages
(
struct
list_head
*
l
,
new_page_t
x
,
unsigned
long
private
,
int
offlining
);
extern
int
fail_migrate_page
(
struct
address_space
*
,
extern
int
fail_migrate_page
(
struct
address_space
*
,
struct
page
*
,
struct
page
*
);
struct
page
*
,
struct
page
*
);
...
@@ -23,12 +25,17 @@ extern int migrate_prep_local(void);
...
@@ -23,12 +25,17 @@ extern int migrate_prep_local(void);
extern
int
migrate_vmas
(
struct
mm_struct
*
mm
,
extern
int
migrate_vmas
(
struct
mm_struct
*
mm
,
const
nodemask_t
*
from
,
const
nodemask_t
*
to
,
const
nodemask_t
*
from
,
const
nodemask_t
*
to
,
unsigned
long
flags
);
unsigned
long
flags
);
extern
void
migrate_page_copy
(
struct
page
*
newpage
,
struct
page
*
page
);
extern
int
migrate_huge_page_move_mapping
(
struct
address_space
*
mapping
,
struct
page
*
newpage
,
struct
page
*
page
);
#else
#else
#define PAGE_MIGRATION 0
#define PAGE_MIGRATION 0
static
inline
void
putback_lru_pages
(
struct
list_head
*
l
)
{}
static
inline
void
putback_lru_pages
(
struct
list_head
*
l
)
{}
static
inline
int
migrate_pages
(
struct
list_head
*
l
,
new_page_t
x
,
static
inline
int
migrate_pages
(
struct
list_head
*
l
,
new_page_t
x
,
unsigned
long
private
,
int
offlining
)
{
return
-
ENOSYS
;
}
unsigned
long
private
,
int
offlining
)
{
return
-
ENOSYS
;
}
static
inline
int
migrate_huge_pages
(
struct
list_head
*
l
,
new_page_t
x
,
unsigned
long
private
,
int
offlining
)
{
return
-
ENOSYS
;
}
static
inline
int
migrate_prep
(
void
)
{
return
-
ENOSYS
;
}
static
inline
int
migrate_prep
(
void
)
{
return
-
ENOSYS
;
}
static
inline
int
migrate_prep_local
(
void
)
{
return
-
ENOSYS
;
}
static
inline
int
migrate_prep_local
(
void
)
{
return
-
ENOSYS
;
}
...
@@ -40,6 +47,15 @@ static inline int migrate_vmas(struct mm_struct *mm,
...
@@ -40,6 +47,15 @@ static inline int migrate_vmas(struct mm_struct *mm,
return
-
ENOSYS
;
return
-
ENOSYS
;
}
}
static
inline
void
migrate_page_copy
(
struct
page
*
newpage
,
struct
page
*
page
)
{}
static
inline
int
migrate_huge_page_move_mapping
(
struct
address_space
*
mapping
,
struct
page
*
newpage
,
struct
page
*
page
)
{
return
-
ENOSYS
;
}
/* Possible settings for the migrate_page() method in address_operations */
/* Possible settings for the migrate_page() method in address_operations */
#define migrate_page NULL
#define migrate_page NULL
#define fail_migrate_page NULL
#define fail_migrate_page NULL
...
...
include/linux/mm.h
View file @
46e387bb
...
@@ -718,12 +718,20 @@ static inline int page_mapped(struct page *page)
...
@@ -718,12 +718,20 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_SIGBUS 0x0002
#define VM_FAULT_SIGBUS 0x0002
#define VM_FAULT_MAJOR 0x0004
#define VM_FAULT_MAJOR 0x0004
#define VM_FAULT_WRITE 0x0008
/* Special case for get_user_pages */
#define VM_FAULT_WRITE 0x0008
/* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010
/* Hit poisoned page */
#define VM_FAULT_HWPOISON 0x0010
/* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020
/* Hit poisoned large page. Index encoded in upper bits */
#define VM_FAULT_NOPAGE 0x0100
/* ->fault installed the pte, not return page */
#define VM_FAULT_NOPAGE 0x0100
/* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200
/* ->fault locked the returned page */
#define VM_FAULT_LOCKED 0x0200
/* ->fault locked the returned page */
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
/* encodes hpage index for large hwpoison */
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
VM_FAULT_HWPOISON_LARGE)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
/*
/*
* Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
* Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
...
...
mm/hugetlb.c
View file @
46e387bb
This diff is collapsed.
Click to expand it.
mm/memory-failure.c
View file @
46e387bb
...
@@ -697,11 +697,10 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn)
...
@@ -697,11 +697,10 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn)
* Issues:
* Issues:
* - Error on hugepage is contained in hugepage unit (not in raw page unit.)
* - Error on hugepage is contained in hugepage unit (not in raw page unit.)
* To narrow down kill region to one page, we need to break up pmd.
* To narrow down kill region to one page, we need to break up pmd.
* - To support soft-offlining for hugepage, we need to support hugepage
* migration.
*/
*/
static
int
me_huge_page
(
struct
page
*
p
,
unsigned
long
pfn
)
static
int
me_huge_page
(
struct
page
*
p
,
unsigned
long
pfn
)
{
{
int
res
=
0
;
struct
page
*
hpage
=
compound_head
(
p
);
struct
page
*
hpage
=
compound_head
(
p
);
/*
/*
* We can safely recover from error on free or reserved (i.e.
* We can safely recover from error on free or reserved (i.e.
...
@@ -714,7 +713,8 @@ static int me_huge_page(struct page *p, unsigned long pfn)
...
@@ -714,7 +713,8 @@ static int me_huge_page(struct page *p, unsigned long pfn)
* so there is no race between isolation and mapping/unmapping.
* so there is no race between isolation and mapping/unmapping.
*/
*/
if
(
!
(
page_mapping
(
hpage
)
||
PageAnon
(
hpage
)))
{
if
(
!
(
page_mapping
(
hpage
)
||
PageAnon
(
hpage
)))
{
__isolate_hwpoisoned_huge_page
(
hpage
);
res
=
dequeue_hwpoisoned_huge_page
(
hpage
);
if
(
!
res
)
return
RECOVERED
;
return
RECOVERED
;
}
}
return
DELAYED
;
return
DELAYED
;
...
@@ -972,7 +972,10 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
...
@@ -972,7 +972,10 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
* We need/can do nothing about count=0 pages.
* We need/can do nothing about count=0 pages.
* 1) it's a free page, and therefore in safe hand:
* 1) it's a free page, and therefore in safe hand:
* prep_new_page() will be the gate keeper.
* prep_new_page() will be the gate keeper.
* 2) it's part of a non-compound high order page.
* 2) it's a free hugepage, which is also safe:
* an affected hugepage will be dequeued from hugepage freelist,
* so there's no concern about reusing it ever after.
* 3) it's part of a non-compound high order page.
* Implies some kernel user: cannot stop them from
* Implies some kernel user: cannot stop them from
* R/W the page; let's pray that the page has been
* R/W the page; let's pray that the page has been
* used and will be freed some time later.
* used and will be freed some time later.
...
@@ -984,6 +987,24 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
...
@@ -984,6 +987,24 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
if
(
is_free_buddy_page
(
p
))
{
if
(
is_free_buddy_page
(
p
))
{
action_result
(
pfn
,
"free buddy"
,
DELAYED
);
action_result
(
pfn
,
"free buddy"
,
DELAYED
);
return
0
;
return
0
;
}
else
if
(
PageHuge
(
hpage
))
{
/*
* Check "just unpoisoned", "filter hit", and
* "race with other subpage."
*/
lock_page_nosync
(
hpage
);
if
(
!
PageHWPoison
(
hpage
)
||
(
hwpoison_filter
(
p
)
&&
TestClearPageHWPoison
(
p
))
||
(
p
!=
hpage
&&
TestSetPageHWPoison
(
hpage
)))
{
atomic_long_sub
(
nr_pages
,
&
mce_bad_pages
);
return
0
;
}
set_page_hwpoison_huge_page
(
hpage
);
res
=
dequeue_hwpoisoned_huge_page
(
hpage
);
action_result
(
pfn
,
"free huge"
,
res
?
IGNORED
:
DELAYED
);
unlock_page
(
hpage
);
return
res
;
}
else
{
}
else
{
action_result
(
pfn
,
"high order kernel"
,
IGNORED
);
action_result
(
pfn
,
"high order kernel"
,
IGNORED
);
return
-
EBUSY
;
return
-
EBUSY
;
...
@@ -1145,6 +1166,16 @@ int unpoison_memory(unsigned long pfn)
...
@@ -1145,6 +1166,16 @@ int unpoison_memory(unsigned long pfn)
nr_pages
=
1
<<
compound_order
(
page
);
nr_pages
=
1
<<
compound_order
(
page
);
if
(
!
get_page_unless_zero
(
page
))
{
if
(
!
get_page_unless_zero
(
page
))
{
/*
* Since HWPoisoned hugepage should have non-zero refcount,
* race between memory failure and unpoison seems to happen.
* In such case unpoison fails and memory failure runs
* to the end.
*/
if
(
PageHuge
(
page
))
{
pr_debug
(
"MCE: Memory failure is now running on free hugepage %#lx
\n
"
,
pfn
);
return
0
;
}
if
(
TestClearPageHWPoison
(
p
))
if
(
TestClearPageHWPoison
(
p
))
atomic_long_sub
(
nr_pages
,
&
mce_bad_pages
);
atomic_long_sub
(
nr_pages
,
&
mce_bad_pages
);
pr_info
(
"MCE: Software-unpoisoned free page %#lx
\n
"
,
pfn
);
pr_info
(
"MCE: Software-unpoisoned free page %#lx
\n
"
,
pfn
);
...
@@ -1162,9 +1193,9 @@ int unpoison_memory(unsigned long pfn)
...
@@ -1162,9 +1193,9 @@ int unpoison_memory(unsigned long pfn)
pr_info
(
"MCE: Software-unpoisoned page %#lx
\n
"
,
pfn
);
pr_info
(
"MCE: Software-unpoisoned page %#lx
\n
"
,
pfn
);
atomic_long_sub
(
nr_pages
,
&
mce_bad_pages
);
atomic_long_sub
(
nr_pages
,
&
mce_bad_pages
);
freeit
=
1
;
freeit
=
1
;
}
if
(
PageHuge
(
page
))
if
(
PageHuge
(
p
))
clear_page_hwpoison_huge_page
(
page
);
clear_page_hwpoison_huge_page
(
page
);
}
unlock_page
(
page
);
unlock_page
(
page
);
put_page
(
page
);
put_page
(
page
);
...
@@ -1178,6 +1209,10 @@ EXPORT_SYMBOL(unpoison_memory);
...
@@ -1178,6 +1209,10 @@ EXPORT_SYMBOL(unpoison_memory);
static
struct
page
*
new_page
(
struct
page
*
p
,
unsigned
long
private
,
int
**
x
)
static
struct
page
*
new_page
(
struct
page
*
p
,
unsigned
long
private
,
int
**
x
)
{
{
int
nid
=
page_to_nid
(
p
);
int
nid
=
page_to_nid
(
p
);
if
(
PageHuge
(
p
))
return
alloc_huge_page_node
(
page_hstate
(
compound_head
(
p
)),
nid
);
else
return
alloc_pages_exact_node
(
nid
,
GFP_HIGHUSER_MOVABLE
,
0
);
return
alloc_pages_exact_node
(
nid
,
GFP_HIGHUSER_MOVABLE
,
0
);
}
}
...
@@ -1206,8 +1241,15 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
...
@@ -1206,8 +1241,15 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
* was free.
* was free.
*/
*/
set_migratetype_isolate
(
p
);
set_migratetype_isolate
(
p
);
/*
* When the target page is a free hugepage, just remove it
* from free hugepage list.
*/
if
(
!
get_page_unless_zero
(
compound_head
(
p
)))
{
if
(
!
get_page_unless_zero
(
compound_head
(
p
)))
{
if
(
is_free_buddy_page
(
p
))
{
if
(
PageHuge
(
p
))
{
pr_info
(
"get_any_page: %#lx free huge page
\n
"
,
pfn
);
ret
=
dequeue_hwpoisoned_huge_page
(
compound_head
(
p
));
}
else
if
(
is_free_buddy_page
(
p
))
{
pr_info
(
"get_any_page: %#lx free buddy page
\n
"
,
pfn
);
pr_info
(
"get_any_page: %#lx free buddy page
\n
"
,
pfn
);
/* Set hwpoison bit while page is still isolated */
/* Set hwpoison bit while page is still isolated */
SetPageHWPoison
(
p
);
SetPageHWPoison
(
p
);
...
@@ -1226,6 +1268,45 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
...
@@ -1226,6 +1268,45 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
return
ret
;
return
ret
;
}
}
static
int
soft_offline_huge_page
(
struct
page
*
page
,
int
flags
)
{
int
ret
;
unsigned
long
pfn
=
page_to_pfn
(
page
);
struct
page
*
hpage
=
compound_head
(
page
);
LIST_HEAD
(
pagelist
);
ret
=
get_any_page
(
page
,
pfn
,
flags
);
if
(
ret
<
0
)
return
ret
;
if
(
ret
==
0
)
goto
done
;
if
(
PageHWPoison
(
hpage
))
{
put_page
(
hpage
);
pr_debug
(
"soft offline: %#lx hugepage already poisoned
\n
"
,
pfn
);
return
-
EBUSY
;
}
/* Keep page count to indicate a given hugepage is isolated. */
list_add
(
&
hpage
->
lru
,
&
pagelist
);
ret
=
migrate_huge_pages
(
&
pagelist
,
new_page
,
MPOL_MF_MOVE_ALL
,
0
);
if
(
ret
)
{
pr_debug
(
"soft offline: %#lx: migration failed %d, type %lx
\n
"
,
pfn
,
ret
,
page
->
flags
);
if
(
ret
>
0
)
ret
=
-
EIO
;
return
ret
;
}
done:
if
(
!
PageHWPoison
(
hpage
))
atomic_long_add
(
1
<<
compound_order
(
hpage
),
&
mce_bad_pages
);
set_page_hwpoison_huge_page
(
hpage
);
dequeue_hwpoisoned_huge_page
(
hpage
);
/* keep elevated page count for bad page */
return
ret
;
}
/**
/**
* soft_offline_page - Soft offline a page.
* soft_offline_page - Soft offline a page.
* @page: page to offline
* @page: page to offline
...
@@ -1253,6 +1334,9 @@ int soft_offline_page(struct page *page, int flags)
...
@@ -1253,6 +1334,9 @@ int soft_offline_page(struct page *page, int flags)
int
ret
;
int
ret
;
unsigned
long
pfn
=
page_to_pfn
(
page
);
unsigned
long
pfn
=
page_to_pfn
(
page
);
if
(
PageHuge
(
page
))
return
soft_offline_huge_page
(
page
,
flags
);
ret
=
get_any_page
(
page
,
pfn
,
flags
);
ret
=
get_any_page
(
page
,
pfn
,
flags
);
if
(
ret
<
0
)
if
(
ret
<
0
)
return
ret
;
return
ret
;
...
...
mm/memory.c
View file @
46e387bb
...
@@ -1450,7 +1450,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
...
@@ -1450,7 +1450,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if
(
ret
&
VM_FAULT_OOM
)
if
(
ret
&
VM_FAULT_OOM
)
return
i
?
i
:
-
ENOMEM
;
return
i
?
i
:
-
ENOMEM
;
if
(
ret
&
if
(
ret
&
(
VM_FAULT_HWPOISON
|
VM_FAULT_SIGBUS
))
(
VM_FAULT_HWPOISON
|
VM_FAULT_HWPOISON_LARGE
|
VM_FAULT_SIGBUS
))
return
i
?
i
:
-
EFAULT
;
return
i
?
i
:
-
EFAULT
;
BUG
();
BUG
();
}
}
...
...
mm/migrate.c
View file @
46e387bb
...
@@ -32,6 +32,7 @@
...
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/security.h>
#include <linux/memcontrol.h>
#include <linux/memcontrol.h>
#include <linux/syscalls.h>
#include <linux/syscalls.h>
#include <linux/hugetlb.h>
#include <linux/gfp.h>
#include <linux/gfp.h>
#include "internal.h"
#include "internal.h"
...
@@ -95,6 +96,12 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
...
@@ -95,6 +96,12 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
pte_t
*
ptep
,
pte
;
pte_t
*
ptep
,
pte
;
spinlock_t
*
ptl
;
spinlock_t
*
ptl
;
if
(
unlikely
(
PageHuge
(
new
)))
{
ptep
=
huge_pte_offset
(
mm
,
addr
);
if
(
!
ptep
)
goto
out
;
ptl
=
&
mm
->
page_table_lock
;
}
else
{
pgd
=
pgd_offset
(
mm
,
addr
);
pgd
=
pgd_offset
(
mm
,
addr
);
if
(
!
pgd_present
(
*
pgd
))
if
(
!
pgd_present
(
*
pgd
))
goto
out
;
goto
out
;
...
@@ -115,6 +122,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
...
@@ -115,6 +122,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
}
}
ptl
=
pte_lockptr
(
mm
,
pmd
);
ptl
=
pte_lockptr
(
mm
,
pmd
);
}
spin_lock
(
ptl
);
spin_lock
(
ptl
);
pte
=
*
ptep
;
pte
=
*
ptep
;
if
(
!
is_swap_pte
(
pte
))
if
(
!
is_swap_pte
(
pte
))
...
@@ -130,10 +139,19 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
...
@@ -130,10 +139,19 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
pte
=
pte_mkold
(
mk_pte
(
new
,
vma
->
vm_page_prot
));
pte
=
pte_mkold
(
mk_pte
(
new
,
vma
->
vm_page_prot
));
if
(
is_write_migration_entry
(
entry
))
if
(
is_write_migration_entry
(
entry
))
pte
=
pte_mkwrite
(
pte
);
pte
=
pte_mkwrite
(
pte
);
#ifdef CONFIG_HUGETLB_PAGE
if
(
PageHuge
(
new
))
pte
=
pte_mkhuge
(
pte
);
#endif
flush_cache_page
(
vma
,
addr
,
pte_pfn
(
pte
));
flush_cache_page
(
vma
,
addr
,
pte_pfn
(
pte
));
set_pte_at
(
mm
,
addr
,
ptep
,
pte
);
set_pte_at
(
mm
,
addr
,
ptep
,
pte
);
if
(
PageHuge
(
new
))
{
if
(
PageAnon
(
new
))
if
(
PageAnon
(
new
))
hugepage_add_anon_rmap
(
new
,
vma
,
addr
);
else
page_dup_rmap
(
new
);
}
else
if
(
PageAnon
(
new
))
page_add_anon_rmap
(
new
,
vma
,
addr
);
page_add_anon_rmap
(
new
,
vma
,
addr
);
else
else
page_add_file_rmap
(
new
);
page_add_file_rmap
(
new
);
...
@@ -275,11 +293,59 @@ static int migrate_page_move_mapping(struct address_space *mapping,
...
@@ -275,11 +293,59 @@ static int migrate_page_move_mapping(struct address_space *mapping,
return
0
;
return
0
;
}
}
/*
* The expected number of remaining references is the same as that
* of migrate_page_move_mapping().
*/
int
migrate_huge_page_move_mapping
(
struct
address_space
*
mapping
,
struct
page
*
newpage
,
struct
page
*
page
)
{
int
expected_count
;
void
**
pslot
;
if
(
!
mapping
)
{
if
(
page_count
(
page
)
!=
1
)
return
-
EAGAIN
;
return
0
;
}
spin_lock_irq
(
&
mapping
->
tree_lock
);
pslot
=
radix_tree_lookup_slot
(
&
mapping
->
page_tree
,
page_index
(
page
));
expected_count
=
2
+
page_has_private
(
page
);
if
(
page_count
(
page
)
!=
expected_count
||
(
struct
page
*
)
radix_tree_deref_slot
(
pslot
)
!=
page
)
{
spin_unlock_irq
(
&
mapping
->
tree_lock
);
return
-
EAGAIN
;
}
if
(
!
page_freeze_refs
(
page
,
expected_count
))
{
spin_unlock_irq
(
&
mapping
->
tree_lock
);
return
-
EAGAIN
;
}
get_page
(
newpage
);
radix_tree_replace_slot
(
pslot
,
newpage
);
page_unfreeze_refs
(
page
,
expected_count
);
__put_page
(
page
);
spin_unlock_irq
(
&
mapping
->
tree_lock
);
return
0
;
}
/*
/*
* Copy the page to its new location
* Copy the page to its new location
*/
*/
static
void
migrate_page_copy
(
struct
page
*
newpage
,
struct
page
*
page
)
void
migrate_page_copy
(
struct
page
*
newpage
,
struct
page
*
page
)
{
{
if
(
PageHuge
(
page
))
copy_huge_page
(
newpage
,
page
);
else
copy_highpage
(
newpage
,
page
);
copy_highpage
(
newpage
,
page
);
if
(
PageError
(
page
))
if
(
PageError
(
page
))
...
@@ -723,6 +789,92 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
...
@@ -723,6 +789,92 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
return
rc
;
return
rc
;
}
}
/*
* Counterpart of unmap_and_move_page() for hugepage migration.
*
* This function doesn't wait the completion of hugepage I/O
* because there is no race between I/O and migration for hugepage.
* Note that currently hugepage I/O occurs only in direct I/O
* where no lock is held and PG_writeback is irrelevant,
* and writeback status of all subpages are counted in the reference
* count of the head page (i.e. if all subpages of a 2MB hugepage are
* under direct I/O, the reference of the head page is 512 and a bit more.)
* This means that when we try to migrate hugepage whose subpages are
* doing direct I/O, some references remain after try_to_unmap() and
* hugepage migration fails without data corruption.
*
* There is also no race when direct I/O is issued on the page under migration,
* because then pte is replaced with migration swap entry and direct I/O code
* will wait in the page fault for migration to complete.
*/
static
int
unmap_and_move_huge_page
(
new_page_t
get_new_page
,
unsigned
long
private
,
struct
page
*
hpage
,
int
force
,
int
offlining
)
{
int
rc
=
0
;
int
*
result
=
NULL
;
struct
page
*
new_hpage
=
get_new_page
(
hpage
,
private
,
&
result
);
int
rcu_locked
=
0
;
struct
anon_vma
*
anon_vma
=
NULL
;
if
(
!
new_hpage
)
return
-
ENOMEM
;
rc
=
-
EAGAIN
;
if
(
!
trylock_page
(
hpage
))
{
if
(
!
force
)
goto
out
;
lock_page
(
hpage
);
}
if
(
PageAnon
(
hpage
))
{
rcu_read_lock
();
rcu_locked
=
1
;
if
(
page_mapped
(
hpage
))
{
anon_vma
=
page_anon_vma
(
hpage
);
atomic_inc
(
&
anon_vma
->
external_refcount
);
}
}
try_to_unmap
(
hpage
,
TTU_MIGRATION
|
TTU_IGNORE_MLOCK
|
TTU_IGNORE_ACCESS
);
if
(
!
page_mapped
(
hpage
))
rc
=
move_to_new_page
(
new_hpage
,
hpage
,
1
);
if
(
rc
)
remove_migration_ptes
(
hpage
,
hpage
);
if
(
anon_vma
&&
atomic_dec_and_lock
(
&
anon_vma
->
external_refcount
,
&
anon_vma
->
lock
))
{
int
empty
=
list_empty
(
&
anon_vma
->
head
);
spin_unlock
(
&
anon_vma
->
lock
);
if
(
empty
)
anon_vma_free
(
anon_vma
);
}
if
(
rcu_locked
)
rcu_read_unlock
();
out:
unlock_page
(
hpage
);
if
(
rc
!=
-
EAGAIN
)
{
list_del
(
&
hpage
->
lru
);
put_page
(
hpage
);
}
put_page
(
new_hpage
);
if
(
result
)
{
if
(
rc
)
*
result
=
rc
;
else
*
result
=
page_to_nid
(
new_hpage
);
}
return
rc
;
}
/*
/*
* migrate_pages
* migrate_pages
*
*
...
@@ -788,6 +940,52 @@ int migrate_pages(struct list_head *from,
...
@@ -788,6 +940,52 @@ int migrate_pages(struct list_head *from,
return
nr_failed
+
retry
;
return
nr_failed
+
retry
;
}
}
int
migrate_huge_pages
(
struct
list_head
*
from
,
new_page_t
get_new_page
,
unsigned
long
private
,
int
offlining
)
{
int
retry
=
1
;
int
nr_failed
=
0
;
int
pass
=
0
;
struct
page
*
page
;
struct
page
*
page2
;
int
rc
;
for
(
pass
=
0
;
pass
<
10
&&
retry
;
pass
++
)
{
retry
=
0
;
list_for_each_entry_safe
(
page
,
page2
,
from
,
lru
)
{
cond_resched
();
rc
=
unmap_and_move_huge_page
(
get_new_page
,
private
,
page
,
pass
>
2
,
offlining
);
switch
(
rc
)
{
case
-
ENOMEM
:
goto
out
;
case
-
EAGAIN
:
retry
++
;
break
;
case
0
:
break
;
default:
/* Permanent failure */
nr_failed
++
;
break
;
}
}
}
rc
=
0
;
out:
list_for_each_entry_safe
(
page
,
page2
,
from
,
lru
)
put_page
(
page
);
if
(
rc
)
return
rc
;
return
nr_failed
+
retry
;
}
#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
/*
/*
* Move a list of individual pages
* Move a list of individual pages
...
...
mm/rmap.c
View file @
46e387bb
...
@@ -780,10 +780,10 @@ void page_move_anon_rmap(struct page *page,
...
@@ -780,10 +780,10 @@ void page_move_anon_rmap(struct page *page,
}
}
/**
/**
* __page_set_anon_rmap - setup new anonymous rmap
* __page_set_anon_rmap - set
up new anonymous rmap
* @page:
the page to add the mapping to
* @page:
Page to add to rmap
* @vma:
the vm area in which the mapping is added
* @vma:
VM area to add page to.
* @address:
the user virtual address mapped
* @address:
User virtual address of the mapping
* @exclusive: the page is exclusively owned by the current process
* @exclusive: the page is exclusively owned by the current process
*/
*/
static
void
__page_set_anon_rmap
(
struct
page
*
page
,
static
void
__page_set_anon_rmap
(
struct
page
*
page
,
...
@@ -793,25 +793,16 @@ static void __page_set_anon_rmap(struct page *page,
...
@@ -793,25 +793,16 @@ static void __page_set_anon_rmap(struct page *page,
BUG_ON
(
!
anon_vma
);
BUG_ON
(
!
anon_vma
);
if
(
PageAnon
(
page
))
return
;
/*
/*
* If the page isn't exclusively mapped into this vma,
* If the page isn't exclusively mapped into this vma,
* we must use the _oldest_ possible anon_vma for the
* we must use the _oldest_ possible anon_vma for the
* page mapping!
* page mapping!
*/
*/
if
(
!
exclusive
)
{
if
(
!
exclusive
)
if
(
PageAnon
(
page
))
return
;
anon_vma
=
anon_vma
->
root
;
anon_vma
=
anon_vma
->
root
;
}
else
{
/*
* In this case, swapped-out-but-not-discarded swap-cache
* is remapped. So, no need to update page->mapping here.
* We convice anon_vma poitned by page->mapping is not obsolete
* because vma->anon_vma is necessary to be a family of it.
*/
if
(
PageAnon
(
page
))
return
;
}
anon_vma
=
(
void
*
)
anon_vma
+
PAGE_MAPPING_ANON
;
anon_vma
=
(
void
*
)
anon_vma
+
PAGE_MAPPING_ANON
;
page
->
mapping
=
(
struct
address_space
*
)
anon_vma
;
page
->
mapping
=
(
struct
address_space
*
)
anon_vma
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment