memory-failure.c 68.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6
/*
 * Copyright (C) 2008, 2009 Intel Corporation
 * Authors: Andi Kleen, Fengguang Wu
 *
 * High level machine check handler. Handles pages reported by the
7
 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
8
 * failure.
9 10 11
 * 
 * In addition there is a "soft offline" entry point that allows stop using
 * not-yet-corrupted-by-suspicious pages without killing anything.
12 13
 *
 * Handles page cache pages in various states.	The tricky part
14 15 16 17 18 19
 * here is that we can access any page asynchronously in respect to 
 * other VM users, because memory failures could happen anytime and 
 * anywhere. This could violate some of their assumptions. This is why 
 * this code has to be extremely careful. Generally it tries to use 
 * normal locking rules, as in get the standard locks, even if that means 
 * the error handling takes potentially a long time.
20 21 22 23 24 25 26 27
 *
 * It can be very tempting to add handling for obscure cases here.
 * In general any code for handling new cases should only be added iff:
 * - You know how to test it.
 * - You have a test that can be added to mce-test
 *   https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
 * - The case actually shows up as a frequent (top 10) page state in
 *   tools/vm/page-types when running a real workload.
28 29 30 31 32 33 34
 * 
 * There are several operations here with exponential complexity because
 * of unsuitable VM data structures. For example the operation to map back 
 * from RMAP chains to processes has to walk the complete process list and 
 * has non linear complexity with the number. But since memory corruptions
 * are rare we hope to get away with this. This avoids impacting the core 
 * VM.
35
 */
36 37 38

#define pr_fmt(fmt) "Memory failure: " fmt

39 40 41
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
42
#include <linux/kernel-page-flags.h>
43
#include <linux/sched/signal.h>
44
#include <linux/sched/task.h>
45
#include <linux/dax.h>
46
#include <linux/ksm.h>
47
#include <linux/rmap.h>
48
#include <linux/export.h>
49 50 51
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/backing-dev.h>
52 53
#include <linux/migrate.h>
#include <linux/suspend.h>
54
#include <linux/slab.h>
55
#include <linux/swapops.h>
56
#include <linux/hugetlb.h>
57
#include <linux/memory_hotplug.h>
58
#include <linux/mm_inline.h>
59
#include <linux/memremap.h>
60
#include <linux/kfifo.h>
61
#include <linux/ratelimit.h>
62
#include <linux/page-isolation.h>
63
#include <linux/pagewalk.h>
64
#include <linux/shmem_fs.h>
65
#include "swap.h"
66
#include "internal.h"
67
#include "ras/ras_event.h"
68 69 70 71 72

int sysctl_memory_failure_early_kill __read_mostly = 0;

int sysctl_memory_failure_recovery __read_mostly = 1;

73
atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
74

75 76
static bool hw_memory_failure __read_mostly = false;

77 78 79 80 81 82 83
/*
 * Return values:
 *   1:   the page is dissolved (if needed) and taken off from buddy,
 *   0:   the page is dissolved (if needed) and not taken off from buddy,
 *   < 0: failed to dissolve.
 */
static int __page_handle_poison(struct page *page)
84
{
85
	int ret;
86 87 88 89 90 91 92

	zone_pcp_disable(page_zone(page));
	ret = dissolve_free_huge_page(page);
	if (!ret)
		ret = take_page_off_buddy(page);
	zone_pcp_enable(page_zone(page));

93
	return ret;
94 95
}

96
static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
97
{
98 99 100 101 102
	if (hugepage_or_freepage) {
		/*
		 * Doing this check for free pages is also fine since dissolve_free_huge_page
		 * returns 0 for non-hugetlb pages as well.
		 */
103
		if (__page_handle_poison(page) <= 0)
104 105
			/*
			 * We could fail to take off the target page from buddy
Ingo Molnar's avatar
Ingo Molnar committed
106
			 * for example due to racy page allocation, but that's
107 108 109 110 111 112 113
			 * acceptable because soft-offlined page is not broken
			 * and if someone really want to use it, they should
			 * take it.
			 */
			return false;
	}

114
	SetPageHWPoison(page);
115 116
	if (release)
		put_page(page);
117 118
	page_ref_inc(page);
	num_poisoned_pages_inc();
119 120

	return true;
121 122
}

123 124
#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)

125
u32 hwpoison_filter_enable = 0;
126 127
u32 hwpoison_filter_dev_major = ~0U;
u32 hwpoison_filter_dev_minor = ~0U;
128 129
u64 hwpoison_filter_flags_mask;
u64 hwpoison_filter_flags_value;
130
EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
131 132
EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
133 134
EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159

static int hwpoison_filter_dev(struct page *p)
{
	struct address_space *mapping;
	dev_t dev;

	if (hwpoison_filter_dev_major == ~0U &&
	    hwpoison_filter_dev_minor == ~0U)
		return 0;

	mapping = page_mapping(p);
	if (mapping == NULL || mapping->host == NULL)
		return -EINVAL;

	dev = mapping->host->i_sb->s_dev;
	if (hwpoison_filter_dev_major != ~0U &&
	    hwpoison_filter_dev_major != MAJOR(dev))
		return -EINVAL;
	if (hwpoison_filter_dev_minor != ~0U &&
	    hwpoison_filter_dev_minor != MINOR(dev))
		return -EINVAL;

	return 0;
}

160 161 162 163 164 165 166 167 168 169 170 171
static int hwpoison_filter_flags(struct page *p)
{
	if (!hwpoison_filter_flags_mask)
		return 0;

	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
				    hwpoison_filter_flags_value)
		return 0;
	else
		return -EINVAL;
}

172 173 174 175 176 177 178 179 180 181
/*
 * This allows stress tests to limit test scope to a collection of tasks
 * by putting them under some memcg. This prevents killing unrelated/important
 * processes such as /sbin/init. Note that the target task may share clean
 * pages with init (eg. libc text), which is harmless. If the target task
 * share _dirty_ pages with another task B, the test scheme must make sure B
 * is also included in the memcg. At last, due to race conditions this filter
 * can only guarantee that the page either belongs to the memcg tasks, or is
 * a freed page.
 */
182
#ifdef CONFIG_MEMCG
183 184 185 186 187 188 189
u64 hwpoison_filter_memcg;
EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
static int hwpoison_filter_task(struct page *p)
{
	if (!hwpoison_filter_memcg)
		return 0;

190
	if (page_cgroup_ino(p) != hwpoison_filter_memcg)
191 192 193 194 195 196 197 198
		return -EINVAL;

	return 0;
}
#else
static int hwpoison_filter_task(struct page *p) { return 0; }
#endif

199 200
int hwpoison_filter(struct page *p)
{
201 202 203
	if (!hwpoison_filter_enable)
		return 0;

204 205 206
	if (hwpoison_filter_dev(p))
		return -EINVAL;

207 208 209
	if (hwpoison_filter_flags(p))
		return -EINVAL;

210 211 212
	if (hwpoison_filter_task(p))
		return -EINVAL;

213 214
	return 0;
}
215 216 217 218 219 220 221
#else
int hwpoison_filter(struct page *p)
{
	return 0;
}
#endif

222 223
EXPORT_SYMBOL_GPL(hwpoison_filter);

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
/*
 * Kill all processes that have a poisoned page mapped and then isolate
 * the page.
 *
 * General strategy:
 * Find all processes having the page mapped and kill them.
 * But we keep a page reference around so that the page is not
 * actually freed yet.
 * Then stash the page away
 *
 * There's no convenient way to get back to mapped processes
 * from the VMAs. So do a brute-force search over all
 * running processes.
 *
 * Remember that machine checks are not common (or rather
 * if they are common you have other problems), so this shouldn't
 * be a performance issue.
 *
 * Also there are some races possible while we get from the
 * error detection to actually handle it.
 */

struct to_kill {
	struct list_head nd;
	struct task_struct *tsk;
	unsigned long addr;
	short size_shift;
};

253
/*
254 255 256
 * Send all the processes who have the page mapped a signal.
 * ``action optional'' if they are not immediately affected by the error
 * ``action required'' if error happened in current execution context
257
 */
258
static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
259
{
260 261
	struct task_struct *t = tk->tsk;
	short addr_lsb = tk->size_shift;
262
	int ret = 0;
263

264
	pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
265
			pfn, t->comm, t->pid);
266

267 268 269 270
	if ((flags & MF_ACTION_REQUIRED) && (t == current))
		ret = force_sig_mceerr(BUS_MCEERR_AR,
				 (void __user *)tk->addr, addr_lsb);
	else
271
		/*
272 273
		 * Signal other processes sharing the page if they have
		 * PF_MCE_EARLY set.
274 275 276 277 278
		 * Don't use force here, it's convenient if the signal
		 * can be temporarily blocked.
		 * This could cause a loop when the user sets SIGBUS
		 * to SIG_IGN, but hopefully no one will do that?
		 */
279
		ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
280
				      addr_lsb, t);  /* synchronous? */
281
	if (ret < 0)
282
		pr_info("Error sending signal to %s:%d: %d\n",
283
			t->comm, t->pid, ret);
284 285 286
	return ret;
}

287
/*
288
 * Unknown page type encountered. Try to check whether it can turn PageLRU by
289
 * lru_add_drain_all.
290
 */
291
void shake_page(struct page *p)
292
{
293 294 295
	if (PageHuge(p))
		return;

296 297 298 299 300
	if (!PageSlab(p)) {
		lru_add_drain_all();
		if (PageLRU(p) || is_free_buddy_page(p))
			return;
	}
301

302
	/*
303 304
	 * TODO: Could shrink slab caches here if a lightweight range-based
	 * shrinker will be available.
305 306 307 308
	 */
}
EXPORT_SYMBOL_GPL(shake_page);

309 310
static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
		unsigned long address)
311
{
312
	unsigned long ret = 0;
313 314 315 316 317 318
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

319
	VM_BUG_ON_VMA(address == -EFAULT, vma);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	pgd = pgd_offset(vma->vm_mm, address);
	if (!pgd_present(*pgd))
		return 0;
	p4d = p4d_offset(pgd, address);
	if (!p4d_present(*p4d))
		return 0;
	pud = pud_offset(p4d, address);
	if (!pud_present(*pud))
		return 0;
	if (pud_devmap(*pud))
		return PUD_SHIFT;
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;
	if (pmd_devmap(*pmd))
		return PMD_SHIFT;
	pte = pte_offset_map(pmd, address);
337 338 339 340
	if (pte_present(*pte) && pte_devmap(*pte))
		ret = PAGE_SHIFT;
	pte_unmap(pte);
	return ret;
341
}
342 343 344 345 346 347 348 349 350

/*
 * Failure handling: if we can't find or can't kill a process there's
 * not much we can do.	We just print a message and ignore otherwise.
 */

/*
 * Schedule a process for later kill.
 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
351 352 353 354
 *
 * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
 *   In other cases, such as anonymous and file-backend page, the address to be
 *   killed can be caculated by @p itself.
355 356
 */
static void add_to_kill(struct task_struct *tsk, struct page *p,
357 358
			pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
			struct list_head *to_kill)
359 360 361
{
	struct to_kill *tk;

362 363
	tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
	if (!tk) {
364
		pr_err("Out of memory while machine check handling\n");
365
		return;
366
	}
367

368
	tk->addr = page_address_in_vma(p, vma);
369 370 371 372 373 374 375 376 377
	if (is_zone_device_page(p)) {
		/*
		 * Since page->mapping is not used for fsdax, we need
		 * calculate the address based on the vma.
		 */
		if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
			tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
		tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
	} else
378
		tk->size_shift = page_shift(compound_head(p));
379 380

	/*
381 382 383 384 385 386 387 388
	 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
	 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
	 * so "tk->size_shift == 0" effectively checks no mapping on
	 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
	 * to a process' address space, it's possible not all N VMAs
	 * contain mappings for the page, but at least one VMA does.
	 * Only deliver SIGBUS with payload derived from the VMA that
	 * has a mapping for the page.
389
	 */
390
	if (tk->addr == -EFAULT) {
391
		pr_info("Unable to find user space address %lx in %s\n",
392
			page_to_pfn(p), tsk->comm);
393 394 395
	} else if (tk->size_shift == 0) {
		kfree(tk);
		return;
396
	}
397

398 399 400 401 402 403 404 405
	get_task_struct(tsk);
	tk->tsk = tsk;
	list_add_tail(&tk->nd, to_kill);
}

/*
 * Kill the processes that have been collected earlier.
 *
406 407
 * Only do anything when FORCEKILL is set, otherwise just free the
 * list (this is used for clean pages which do not need killing)
408 409 410
 * Also when FAIL is set do a force kill because something went
 * wrong earlier.
 */
411 412
static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
		unsigned long pfn, int flags)
413 414 415 416
{
	struct to_kill *tk, *next;

	list_for_each_entry_safe (tk, next, to_kill, nd) {
417
		if (forcekill) {
418
			/*
419
			 * In case something went wrong with munmapping
420 421 422
			 * make sure the process doesn't catch the
			 * signal and then access the memory. Just kill it.
			 */
423
			if (fail || tk->addr == -EFAULT) {
424
				pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
425
				       pfn, tk->tsk->comm, tk->tsk->pid);
426 427
				do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
						 tk->tsk, PIDTYPE_PID);
428 429 430 431 432 433 434 435
			}

			/*
			 * In theory the process could have mapped
			 * something else on the address in-between. We could
			 * check for that, but we need to tell the
			 * process anyways.
			 */
436
			else if (kill_proc(tk, pfn, flags) < 0)
437
				pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
438
				       pfn, tk->tsk->comm, tk->tsk->pid);
439 440 441 442 443 444
		}
		put_task_struct(tk->tsk);
		kfree(tk);
	}
}

445 446 447 448 449 450 451 452 453
/*
 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
 * on behalf of the thread group. Return task_struct of the (first found)
 * dedicated thread if found, and return NULL otherwise.
 *
 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
 * have to call rcu_read_lock/unlock() in this function.
 */
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
454
{
455 456
	struct task_struct *t;

457 458 459 460 461 462 463 464 465
	for_each_thread(tsk, t) {
		if (t->flags & PF_MCE_PROCESS) {
			if (t->flags & PF_MCE_EARLY)
				return t;
		} else {
			if (sysctl_memory_failure_early_kill)
				return t;
		}
	}
466 467 468 469 470 471 472
	return NULL;
}

/*
 * Determine whether a given process is "early kill" process which expects
 * to be signaled when some page under the process is hwpoisoned.
 * Return task_struct of the dedicated thread (main thread unless explicitly
473
 * specified) if the process is "early kill" and otherwise returns NULL.
474
 *
475 476 477 478 479
 * Note that the above is true for Action Optional case. For Action Required
 * case, it's only meaningful to the current thread which need to be signaled
 * with SIGBUS, this error is Action Optional for other non current
 * processes sharing the same error page,if the process is "early kill", the
 * task_struct of the dedicated thread will also be returned.
480 481 482 483
 */
static struct task_struct *task_early_kill(struct task_struct *tsk,
					   int force_early)
{
484
	if (!tsk->mm)
485
		return NULL;
486 487 488 489 490 491 492
	/*
	 * Comparing ->mm here because current task might represent
	 * a subthread, while tsk always points to the main thread.
	 */
	if (force_early && tsk->mm == current->mm)
		return current;

493
	return find_early_kill_thread(tsk);
494 495 496 497 498 499
}

/*
 * Collect processes when the error hit an anonymous page.
 */
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
500
				int force_early)
501
{
502
	struct folio *folio = page_folio(page);
503 504 505
	struct vm_area_struct *vma;
	struct task_struct *tsk;
	struct anon_vma *av;
506
	pgoff_t pgoff;
507

508
	av = folio_lock_anon_vma_read(folio, NULL);
509
	if (av == NULL)	/* Not actually mapped anymore */
510 511
		return;

512
	pgoff = page_to_pgoff(page);
513
	read_lock(&tasklist_lock);
514
	for_each_process (tsk) {
515
		struct anon_vma_chain *vmac;
516
		struct task_struct *t = task_early_kill(tsk, force_early);
517

518
		if (!t)
519
			continue;
520 521
		anon_vma_interval_tree_foreach(vmac, &av->rb_root,
					       pgoff, pgoff) {
522
			vma = vmac->vma;
523 524
			if (!page_mapped_in_vma(page, vma))
				continue;
525
			if (vma->vm_mm == t->mm)
526
				add_to_kill(t, page, 0, vma, to_kill);
527 528 529
		}
	}
	read_unlock(&tasklist_lock);
530
	page_unlock_anon_vma_read(av);
531 532 533 534 535 536
}

/*
 * Collect processes when the error hit a file mapped page.
 */
static void collect_procs_file(struct page *page, struct list_head *to_kill,
537
				int force_early)
538 539 540 541
{
	struct vm_area_struct *vma;
	struct task_struct *tsk;
	struct address_space *mapping = page->mapping;
542
	pgoff_t pgoff;
543

544
	i_mmap_lock_read(mapping);
545
	read_lock(&tasklist_lock);
546
	pgoff = page_to_pgoff(page);
547
	for_each_process(tsk) {
548
		struct task_struct *t = task_early_kill(tsk, force_early);
549

550
		if (!t)
551
			continue;
552
		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
553 554 555 556 557 558 559 560
				      pgoff) {
			/*
			 * Send early kill signal to tasks where a vma covers
			 * the page but the corrupted page is not necessarily
			 * mapped it in its pte.
			 * Assume applications who requested early kill want
			 * to be informed of all such data corruptions.
			 */
561
			if (vma->vm_mm == t->mm)
562
				add_to_kill(t, page, 0, vma, to_kill);
563 564 565
		}
	}
	read_unlock(&tasklist_lock);
566
	i_mmap_unlock_read(mapping);
567 568
}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
#ifdef CONFIG_FS_DAX
/*
 * Collect processes when the error hit a fsdax page.
 */
static void collect_procs_fsdax(struct page *page,
		struct address_space *mapping, pgoff_t pgoff,
		struct list_head *to_kill)
{
	struct vm_area_struct *vma;
	struct task_struct *tsk;

	i_mmap_lock_read(mapping);
	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *t = task_early_kill(tsk, true);

		if (!t)
			continue;
		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
			if (vma->vm_mm == t->mm)
				add_to_kill(t, page, pgoff, vma, to_kill);
		}
	}
	read_unlock(&tasklist_lock);
	i_mmap_unlock_read(mapping);
}
#endif /* CONFIG_FS_DAX */

597 598 599
/*
 * Collect the processes who have the corrupted page mapped to kill.
 */
600 601
static void collect_procs(struct page *page, struct list_head *tokill,
				int force_early)
602 603 604 605 606
{
	if (!page->mapping)
		return;

	if (PageAnon(page))
607
		collect_procs_anon(page, tokill, force_early);
608
	else
609
		collect_procs_file(page, tokill, force_early);
610 611
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
struct hwp_walk {
	struct to_kill tk;
	unsigned long pfn;
	int flags;
};

static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
{
	tk->addr = addr;
	tk->size_shift = shift;
}

static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
				unsigned long poisoned_pfn, struct to_kill *tk)
{
	unsigned long pfn = 0;

	if (pte_present(pte)) {
		pfn = pte_pfn(pte);
	} else {
		swp_entry_t swp = pte_to_swp_entry(pte);

		if (is_hwpoison_entry(swp))
			pfn = hwpoison_entry_to_pfn(swp);
	}

	if (!pfn || pfn != poisoned_pfn)
		return 0;

	set_to_kill(tk, addr, shift);
	return 1;
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
				      struct hwp_walk *hwp)
{
	pmd_t pmd = *pmdp;
	unsigned long pfn;
	unsigned long hwpoison_vaddr;

	if (!pmd_present(pmd))
		return 0;
	pfn = pmd_pfn(pmd);
	if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
		hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
		set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
		return 1;
	}
	return 0;
}
#else
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
				      struct hwp_walk *hwp)
{
	return 0;
}
#endif

static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
			      unsigned long end, struct mm_walk *walk)
{
674
	struct hwp_walk *hwp = walk->private;
675
	int ret = 0;
676
	pte_t *ptep, *mapped_pte;
677 678 679 680 681 682 683 684 685 686 687 688
	spinlock_t *ptl;

	ptl = pmd_trans_huge_lock(pmdp, walk->vma);
	if (ptl) {
		ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
		spin_unlock(ptl);
		goto out;
	}

	if (pmd_trans_unstable(pmdp))
		goto out;

689 690
	mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
						addr, &ptl);
691 692 693 694 695 696
	for (; addr != end; ptep++, addr += PAGE_SIZE) {
		ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
					     hwp->pfn, &hwp->tk);
		if (ret == 1)
			break;
	}
697
	pte_unmap_unlock(mapped_pte, ptl);
698 699 700 701 702 703 704 705 706 707
out:
	cond_resched();
	return ret;
}

#ifdef CONFIG_HUGETLB_PAGE
static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
			    unsigned long addr, unsigned long end,
			    struct mm_walk *walk)
{
708
	struct hwp_walk *hwp = walk->private;
709 710 711 712 713 714 715 716 717 718
	pte_t pte = huge_ptep_get(ptep);
	struct hstate *h = hstate_vma(walk->vma);

	return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
				      hwp->pfn, &hwp->tk);
}
#else
#define hwpoison_hugetlb_range	NULL
#endif

719
static const struct mm_walk_ops hwp_walk_ops = {
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
	.pmd_entry = hwpoison_pte_range,
	.hugetlb_entry = hwpoison_hugetlb_range,
};

/*
 * Sends SIGBUS to the current process with error info.
 *
 * This function is intended to handle "Action Required" MCEs on already
 * hardware poisoned pages. They could happen, for example, when
 * memory_failure() failed to unmap the error page at the first call, or
 * when multiple local machine checks happened on different CPUs.
 *
 * MCE handler currently has no easy access to the error virtual address,
 * so this function walks page table to find it. The returned virtual address
 * is proper in most cases, but it could be wrong when the application
 * process has multiple entries mapping the error page.
 */
static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
				  int flags)
{
	int ret;
	struct hwp_walk priv = {
		.pfn = pfn,
	};
	priv.tk.tsk = p;

	mmap_read_lock(p->mm);
	ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
			      (void *)&priv);
	if (ret == 1 && priv.tk.addr)
		kill_proc(&priv.tk, pfn, flags);
751 752
	else
		ret = 0;
753
	mmap_read_unlock(p->mm);
754
	return ret > 0 ? -EHWPOISON : -EFAULT;
755 756
}

757
static const char *action_name[] = {
758 759 760 761
	[MF_IGNORED] = "Ignored",
	[MF_FAILED] = "Failed",
	[MF_DELAYED] = "Delayed",
	[MF_RECOVERED] = "Recovered",
762 763 764
};

static const char * const action_page_types[] = {
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
	[MF_MSG_KERNEL]			= "reserved kernel page",
	[MF_MSG_KERNEL_HIGH_ORDER]	= "high-order kernel page",
	[MF_MSG_SLAB]			= "kernel slab page",
	[MF_MSG_DIFFERENT_COMPOUND]	= "different compound page after locking",
	[MF_MSG_HUGE]			= "huge page",
	[MF_MSG_FREE_HUGE]		= "free huge page",
	[MF_MSG_UNMAP_FAILED]		= "unmapping failed page",
	[MF_MSG_DIRTY_SWAPCACHE]	= "dirty swapcache page",
	[MF_MSG_CLEAN_SWAPCACHE]	= "clean swapcache page",
	[MF_MSG_DIRTY_MLOCKED_LRU]	= "dirty mlocked LRU page",
	[MF_MSG_CLEAN_MLOCKED_LRU]	= "clean mlocked LRU page",
	[MF_MSG_DIRTY_UNEVICTABLE_LRU]	= "dirty unevictable LRU page",
	[MF_MSG_CLEAN_UNEVICTABLE_LRU]	= "clean unevictable LRU page",
	[MF_MSG_DIRTY_LRU]		= "dirty LRU page",
	[MF_MSG_CLEAN_LRU]		= "clean LRU page",
	[MF_MSG_TRUNCATED_LRU]		= "already truncated LRU page",
	[MF_MSG_BUDDY]			= "free buddy page",
782
	[MF_MSG_DAX]			= "dax page",
783
	[MF_MSG_UNSPLIT_THP]		= "unsplit thp",
784
	[MF_MSG_UNKNOWN]		= "unknown page",
785 786
};

787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
/*
 * XXX: It is possible that a page is isolated from LRU cache,
 * and then kept in swap cache or failed to remove from page cache.
 * The page count will stop it from being freed by unpoison.
 * Stress tests should be aware of this memory leak problem.
 */
static int delete_from_lru_cache(struct page *p)
{
	if (!isolate_lru_page(p)) {
		/*
		 * Clear sensible page flags, so that the buddy system won't
		 * complain when the page is unpoison-and-freed.
		 */
		ClearPageActive(p);
		ClearPageUnevictable(p);
802 803 804 805 806

		/*
		 * Poisoned page might never drop its ref count to 0 so we have
		 * to uncharge it manually from its memcg.
		 */
807
		mem_cgroup_uncharge(page_folio(p));
808

809 810 811
		/*
		 * drop the page count elevated by isolate_lru_page()
		 */
812
		put_page(p);
813 814 815 816 817
		return 0;
	}
	return -EIO;
}

818 819 820 821 822 823 824 825 826
static int truncate_error_page(struct page *p, unsigned long pfn,
				struct address_space *mapping)
{
	int ret = MF_FAILED;

	if (mapping->a_ops->error_remove_page) {
		int err = mapping->a_ops->error_remove_page(mapping, p);

		if (err != 0) {
827
			pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
828 829
		} else if (page_has_private(p) &&
			   !try_to_release_page(p, GFP_NOIO)) {
830
			pr_info("%#lx: failed to release buffers\n", pfn);
831 832 833 834 835 836 837 838 839 840 841
		} else {
			ret = MF_RECOVERED;
		}
	} else {
		/*
		 * If the file system doesn't support it just invalidate
		 * This fails on dirty or anything with private pages
		 */
		if (invalidate_inode_page(p))
			ret = MF_RECOVERED;
		else
842
			pr_info("%#lx: Failed to invalidate\n",	pfn);
843 844 845 846 847
	}

	return ret;
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
struct page_state {
	unsigned long mask;
	unsigned long res;
	enum mf_action_page_type type;

	/* Callback ->action() has to unlock the relevant page inside it. */
	int (*action)(struct page_state *ps, struct page *p);
};

/*
 * Return true if page is still referenced by others, otherwise return
 * false.
 *
 * The extra_pins is true when one extra refcount is expected.
 */
static bool has_extra_refcount(struct page_state *ps, struct page *p,
			       bool extra_pins)
{
	int count = page_count(p) - 1;

	if (extra_pins)
		count -= 1;

	if (count > 0) {
872
		pr_err("%#lx: %s still referenced by %d users\n",
873 874 875 876 877 878 879
		       page_to_pfn(p), action_page_types[ps->type], count);
		return true;
	}

	return false;
}

880 881 882 883 884
/*
 * Error hit kernel page.
 * Do nothing, try to be lucky and not touch this instead. For a few cases we
 * could be more sophisticated.
 */
885
static int me_kernel(struct page_state *ps, struct page *p)
886
{
887
	unlock_page(p);
888
	return MF_IGNORED;
889 890 891 892 893
}

/*
 * Page in unknown state. Do nothing.
 */
894
static int me_unknown(struct page_state *ps, struct page *p)
895
{
896
	pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
897
	unlock_page(p);
898
	return MF_FAILED;
899 900 901 902 903
}

/*
 * Clean (or cleaned) page cache page.
 */
904
static int me_pagecache_clean(struct page_state *ps, struct page *p)
905
{
906
	int ret;
907
	struct address_space *mapping;
908
	bool extra_pins;
909

910 911
	delete_from_lru_cache(p);

912 913 914 915
	/*
	 * For anonymous pages we're done the only reference left
	 * should be the one m_f() holds.
	 */
916 917 918 919
	if (PageAnon(p)) {
		ret = MF_RECOVERED;
		goto out;
	}
920 921 922 923 924 925 926 927 928 929 930 931 932

	/*
	 * Now truncate the page in the page cache. This is really
	 * more like a "temporary hole punch"
	 * Don't do this for block devices when someone else
	 * has a reference, because it could be file system metadata
	 * and that's not safe to truncate.
	 */
	mapping = page_mapping(p);
	if (!mapping) {
		/*
		 * Page has been teared down in the meanwhile
		 */
933 934
		ret = MF_FAILED;
		goto out;
935 936
	}

937 938 939 940 941 942
	/*
	 * The shmem page is kept in page cache instead of truncating
	 * so is expected to have an extra refcount after error-handling.
	 */
	extra_pins = shmem_mapping(mapping);

943 944 945
	/*
	 * Truncation is a bit tricky. Enable it per file system for now.
	 *
946
	 * Open: to take i_rwsem or not for this? Right now we don't.
947
	 */
948
	ret = truncate_error_page(p, page_to_pfn(p), mapping);
949 950 951
	if (has_extra_refcount(ps, p, extra_pins))
		ret = MF_FAILED;

952 953
out:
	unlock_page(p);
954

955
	return ret;
956 957 958
}

/*
959
 * Dirty pagecache page
960 961 962
 * Issues: when the error hit a hole page the error is not properly
 * propagated.
 */
963
static int me_pagecache_dirty(struct page_state *ps, struct page *p)
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
{
	struct address_space *mapping = page_mapping(p);

	SetPageError(p);
	/* TBD: print more information about the file. */
	if (mapping) {
		/*
		 * IO error will be reported by write(), fsync(), etc.
		 * who check the mapping.
		 * This way the application knows that something went
		 * wrong with its dirty file data.
		 *
		 * There's one open issue:
		 *
		 * The EIO will be only reported on the next IO
		 * operation and then cleared through the IO map.
		 * Normally Linux has two mechanisms to pass IO error
		 * first through the AS_EIO flag in the address space
		 * and then through the PageError flag in the page.
		 * Since we drop pages on memory failure handling the
		 * only mechanism open to use is through AS_AIO.
		 *
		 * This has the disadvantage that it gets cleared on
		 * the first operation that returns an error, while
		 * the PageError bit is more sticky and only cleared
		 * when the page is reread or dropped.  If an
		 * application assumes it will always get error on
		 * fsync, but does other operations on the fd before
Lucas De Marchi's avatar
Lucas De Marchi committed
992
		 * and the page is dropped between then the error
993 994 995 996 997 998 999 1000 1001 1002 1003
		 * will not be properly reported.
		 *
		 * This can already happen even without hwpoisoned
		 * pages: first on metadata IO errors (which only
		 * report through AS_EIO) or when the page is dropped
		 * at the wrong time.
		 *
		 * So right now we assume that the application DTRT on
		 * the first EIO, but we're not worse than other parts
		 * of the kernel.
		 */
1004
		mapping_set_error(mapping, -EIO);
1005 1006
	}

1007
	return me_pagecache_clean(ps, p);
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
}

/*
 * Clean and dirty swap cache.
 *
 * Dirty swap cache page is tricky to handle. The page could live both in page
 * cache and swap cache(ie. page is freshly swapped in). So it could be
 * referenced concurrently by 2 types of PTEs:
 * normal PTEs and swap PTEs. We try to handle them consistently by calling
 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
 * and then
 *      - clear dirty bit to prevent IO
 *      - remove from LRU
 *      - but keep in the swap cache, so that when we return to it on
 *        a later page fault, we know the application is accessing
 *        corrupted data and shall be killed (we installed simple
 *        interception code in do_swap_page to catch it).
 *
 * Clean swap cache pages can be directly isolated. A later page fault will
 * bring in the known good data from disk.
 */
1029
static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1030
{
1031
	int ret;
1032
	bool extra_pins = false;
1033

1034 1035 1036 1037
	ClearPageDirty(p);
	/* Trigger EIO in shmem: */
	ClearPageUptodate(p);

1038 1039
	ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
	unlock_page(p);
1040 1041 1042 1043 1044 1045 1046

	if (ret == MF_DELAYED)
		extra_pins = true;

	if (has_extra_refcount(ps, p, extra_pins))
		ret = MF_FAILED;

1047
	return ret;
1048 1049
}

1050
static int me_swapcache_clean(struct page_state *ps, struct page *p)
1051
{
1052
	struct folio *folio = page_folio(p);
1053 1054
	int ret;

1055
	delete_from_swap_cache(folio);
1056

1057
	ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
1058
	folio_unlock(folio);
1059 1060 1061 1062

	if (has_extra_refcount(ps, p, false))
		ret = MF_FAILED;

1063
	return ret;
1064 1065 1066 1067 1068
}

/*
 * Huge pages. Needs work.
 * Issues:
1069 1070
 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
 *   To narrow down kill region to one page, we need to break up pmd.
1071
 */
1072
static int me_huge_page(struct page_state *ps, struct page *p)
1073
{
1074
	int res;
1075
	struct page *hpage = compound_head(p);
1076
	struct address_space *mapping;
1077 1078 1079 1080

	if (!PageHuge(hpage))
		return MF_DELAYED;

1081 1082
	mapping = page_mapping(hpage);
	if (mapping) {
1083
		res = truncate_error_page(hpage, page_to_pfn(p), mapping);
1084
		unlock_page(hpage);
1085 1086 1087
	} else {
		unlock_page(hpage);
		/*
1088 1089 1090
		 * migration entry prevents later access on error hugepage,
		 * so we can free and dissolve it into buddy to save healthy
		 * subpages.
1091
		 */
1092
		put_page(hpage);
1093
		if (__page_handle_poison(p) >= 0) {
1094 1095
			page_ref_inc(p);
			res = MF_RECOVERED;
1096 1097
		} else {
			res = MF_FAILED;
1098
		}
1099
	}
1100

1101 1102 1103
	if (has_extra_refcount(ps, p, false))
		res = MF_FAILED;

1104
	return res;
1105 1106 1107 1108 1109 1110 1111 1112 1113
}

/*
 * Various page states we can handle.
 *
 * A page state is defined by its current page->flags bits.
 * The table matches them in order and calls the right handler.
 *
 * This is quite tricky because we can access page at any time
Lucas De Marchi's avatar
Lucas De Marchi committed
1114
 * in its live cycle, so all accesses have to be extremely careful.
1115 1116 1117 1118 1119 1120
 *
 * This is not complete. More states could be added.
 * For any missing state don't attempt recovery.
 */

#define dirty		(1UL << PG_dirty)
1121
#define sc		((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1122 1123 1124 1125 1126 1127 1128
#define unevict		(1UL << PG_unevictable)
#define mlock		(1UL << PG_mlocked)
#define lru		(1UL << PG_lru)
#define head		(1UL << PG_head)
#define slab		(1UL << PG_slab)
#define reserved	(1UL << PG_reserved)

1129
static struct page_state error_states[] = {
1130
	{ reserved,	reserved,	MF_MSG_KERNEL,	me_kernel },
1131 1132 1133 1134
	/*
	 * free pages are specially detected outside this table:
	 * PG_buddy pages only make a small fraction of all free pages.
	 */
1135 1136 1137 1138 1139 1140

	/*
	 * Could in theory check if slab page is free or if we can drop
	 * currently unused objects without touching them. But just
	 * treat it as standard kernel for now.
	 */
1141
	{ slab,		slab,		MF_MSG_SLAB,	me_kernel },
1142

1143
	{ head,		head,		MF_MSG_HUGE,		me_huge_page },
1144

1145 1146
	{ sc|dirty,	sc|dirty,	MF_MSG_DIRTY_SWAPCACHE,	me_swapcache_dirty },
	{ sc|dirty,	sc,		MF_MSG_CLEAN_SWAPCACHE,	me_swapcache_clean },
1147

1148 1149
	{ mlock|dirty,	mlock|dirty,	MF_MSG_DIRTY_MLOCKED_LRU,	me_pagecache_dirty },
	{ mlock|dirty,	mlock,		MF_MSG_CLEAN_MLOCKED_LRU,	me_pagecache_clean },
1150

1151 1152
	{ unevict|dirty, unevict|dirty,	MF_MSG_DIRTY_UNEVICTABLE_LRU,	me_pagecache_dirty },
	{ unevict|dirty, unevict,	MF_MSG_CLEAN_UNEVICTABLE_LRU,	me_pagecache_clean },
1153

1154 1155
	{ lru|dirty,	lru|dirty,	MF_MSG_DIRTY_LRU,	me_pagecache_dirty },
	{ lru|dirty,	lru,		MF_MSG_CLEAN_LRU,	me_pagecache_clean },
1156 1157 1158 1159

	/*
	 * Catchall entry: must be at end.
	 */
1160
	{ 0,		0,		MF_MSG_UNKNOWN,	me_unknown },
1161 1162
};

1163 1164 1165 1166 1167 1168 1169 1170 1171
#undef dirty
#undef sc
#undef unevict
#undef mlock
#undef lru
#undef head
#undef slab
#undef reserved

1172 1173 1174 1175
/*
 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
 */
1176 1177
static void action_result(unsigned long pfn, enum mf_action_page_type type,
			  enum mf_result result)
1178
{
1179 1180
	trace_memory_failure_event(pfn, type, result);

1181
	num_poisoned_pages_inc();
1182
	pr_err("%#lx: recovery action for %s: %s\n",
1183
		pfn, action_page_types[type], action_name[result]);
1184 1185 1186
}

static int page_action(struct page_state *ps, struct page *p,
1187
			unsigned long pfn)
1188 1189 1190
{
	int result;

1191
	/* page p should be unlocked after returning from ps->action().  */
1192
	result = ps->action(ps, p);
1193

1194
	action_result(pfn, ps->type, result);
1195 1196 1197 1198 1199 1200

	/* Could do more checks here if page looks ok */
	/*
	 * Could adjust zone counters here to correct for the missing page.
	 */

1201
	return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1202 1203
}

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
static inline bool PageHWPoisonTakenOff(struct page *page)
{
	return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
}

void SetPageHWPoisonTakenOff(struct page *page)
{
	set_page_private(page, MAGIC_HWPOISON);
}

void ClearPageHWPoisonTakenOff(struct page *page)
{
	if (PageHWPoison(page))
		set_page_private(page, 0);
}

1220 1221 1222 1223 1224 1225
/*
 * Return true if a page type of a given page is supported by hwpoison
 * mechanism (while handling could fail), otherwise false.  This function
 * does not return true for hugetlb or device memory pages, so it's assumed
 * to be called only in the context where we never have such pages.
 */
1226
static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1227
{
1228
	/* Soft offline could migrate non-LRU movable pages */
1229
	if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1230
		return true;
1231

1232
	return PageLRU(page) || is_free_buddy_page(page);
1233 1234
}

1235
static int __get_hwpoison_page(struct page *page, unsigned long flags)
1236 1237
{
	struct page *head = compound_head(page);
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
	int ret = 0;
	bool hugetlb = false;

	ret = get_hwpoison_huge_page(head, &hugetlb);
	if (hugetlb)
		return ret;

	/*
	 * This check prevents from calling get_hwpoison_unless_zero()
	 * for any unsupported type of page in order to reduce the risk of
	 * unexpected races caused by taking a page refcount.
	 */
1250
	if (!HWPoisonHandlable(head, flags))
1251
		return -EBUSY;
1252

1253 1254 1255 1256
	if (get_page_unless_zero(head)) {
		if (head == compound_head(page))
			return 1;

1257
		pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1258 1259 1260 1261
		put_page(head);
	}

	return 0;
1262 1263
}

1264
static int get_any_page(struct page *p, unsigned long flags)
1265
{
1266 1267
	int ret = 0, pass = 0;
	bool count_increased = false;
1268

1269 1270 1271 1272
	if (flags & MF_COUNT_INCREASED)
		count_increased = true;

try_again:
1273
	if (!count_increased) {
1274
		ret = __get_hwpoison_page(p, flags);
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
		if (!ret) {
			if (page_count(p)) {
				/* We raced with an allocation, retry. */
				if (pass++ < 3)
					goto try_again;
				ret = -EBUSY;
			} else if (!PageHuge(p) && !is_free_buddy_page(p)) {
				/* We raced with put_page, retry. */
				if (pass++ < 3)
					goto try_again;
				ret = -EIO;
			}
			goto out;
		} else if (ret == -EBUSY) {
1289 1290 1291 1292 1293
			/*
			 * We raced with (possibly temporary) unhandlable
			 * page, retry.
			 */
			if (pass++ < 3) {
1294
				shake_page(p);
1295
				goto try_again;
1296 1297
			}
			ret = -EIO;
1298
			goto out;
1299
		}
1300 1301
	}

1302
	if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1303
		ret = 1;
1304
	} else {
1305 1306 1307 1308 1309
		/*
		 * A page we cannot handle. Check whether we can turn
		 * it into something we can handle.
		 */
		if (pass++ < 3) {
1310
			put_page(p);
1311
			shake_page(p);
1312 1313
			count_increased = false;
			goto try_again;
1314
		}
1315 1316
		put_page(p);
		ret = -EIO;
1317
	}
1318
out:
1319
	if (ret == -EIO)
1320
		pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1321

1322 1323 1324
	return ret;
}

1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
static int __get_unpoison_page(struct page *page)
{
	struct page *head = compound_head(page);
	int ret = 0;
	bool hugetlb = false;

	ret = get_hwpoison_huge_page(head, &hugetlb);
	if (hugetlb)
		return ret;

	/*
	 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
	 * but also isolated from buddy freelist, so need to identify the
	 * state and have to cancel both operations to unpoison.
	 */
	if (PageHWPoisonTakenOff(page))
		return -EHWPOISON;

	return get_page_unless_zero(page) ? 1 : 0;
}

1346 1347 1348 1349 1350 1351 1352
/**
 * get_hwpoison_page() - Get refcount for memory error handling
 * @p:		Raw error page (hit by memory error)
 * @flags:	Flags controlling behavior of error handling
 *
 * get_hwpoison_page() takes a page refcount of an error page to handle memory
 * error on it, after checking that the error page is in a well-defined state
Quanfa Fu's avatar
Quanfa Fu committed
1353
 * (defined as a page-type we can successfully handle the memory error on it,
1354 1355 1356 1357 1358 1359 1360 1361
 * such as LRU page and hugetlb page).
 *
 * Memory error handling could be triggered at any time on any type of page,
 * so it's prone to race with typical memory management lifecycle (like
 * allocation and free).  So to avoid such races, get_hwpoison_page() takes
 * extra care for the error page's state (as done in __get_hwpoison_page()),
 * and has some retry logic in get_any_page().
 *
1362 1363 1364 1365
 * When called from unpoison_memory(), the caller should already ensure that
 * the given page has PG_hwpoison. So it's never reused for other page
 * allocations, and __get_unpoison_page() never races with them.
 *
1366 1367 1368 1369
 * Return: 0 on failure,
 *         1 on success for in-use pages in a well-defined state,
 *         -EIO for pages on which we can not handle memory errors,
 *         -EBUSY when get_hwpoison_page() has raced with page lifecycle
1370 1371
 *         operations like allocation and free,
 *         -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1372 1373
 */
static int get_hwpoison_page(struct page *p, unsigned long flags)
1374 1375 1376 1377
{
	int ret;

	zone_pcp_disable(page_zone(p));
1378 1379 1380 1381
	if (flags & MF_UNPOISON)
		ret = __get_unpoison_page(p);
	else
		ret = get_any_page(p, flags);
1382 1383 1384 1385 1386
	zone_pcp_enable(page_zone(p));

	return ret;
}

1387 1388 1389 1390
/*
 * Do all that is necessary to remove user space mappings. Unmap
 * the pages and send SIGBUS to the processes if the data was dirty.
 */
1391
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1392
				  int flags, struct page *hpage)
1393
{
1394
	struct folio *folio = page_folio(hpage);
1395
	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
1396 1397
	struct address_space *mapping;
	LIST_HEAD(tokill);
1398
	bool unmap_success;
1399
	int kill = 1, forcekill;
1400
	bool mlocked = PageMlocked(hpage);
1401

1402 1403 1404 1405 1406
	/*
	 * Here we are interested only in user-mapped pages, so skip any
	 * other types of pages.
	 */
	if (PageReserved(p) || PageSlab(p))
1407
		return true;
1408
	if (!(PageLRU(hpage) || PageHuge(p)))
1409
		return true;
1410 1411 1412 1413 1414

	/*
	 * This check implies we don't kill processes if their pages
	 * are in the swap cache early. Those are always late kills.
	 */
1415
	if (!page_mapped(hpage))
1416
		return true;
1417

1418
	if (PageKsm(p)) {
1419
		pr_err("%#lx: can't handle KSM pages.\n", pfn);
1420
		return false;
1421
	}
1422 1423

	if (PageSwapCache(p)) {
1424
		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1425 1426 1427 1428 1429 1430
		ttu |= TTU_IGNORE_HWPOISON;
	}

	/*
	 * Propagate the dirty bit from PTEs to struct page first, because we
	 * need this to decide if we should kill or just drop the page.
1431 1432
	 * XXX: the dirty test could be racy: set_page_dirty() may not always
	 * be called inside page lock (it's recommended but not enforced).
1433
	 */
1434
	mapping = page_mapping(hpage);
1435
	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1436
	    mapping_can_writeback(mapping)) {
1437 1438
		if (page_mkclean(hpage)) {
			SetPageDirty(hpage);
1439 1440 1441
		} else {
			kill = 0;
			ttu |= TTU_IGNORE_HWPOISON;
1442
			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
				pfn);
		}
	}

	/*
	 * First collect all the processes that have the page
	 * mapped in dirty form.  This has to be done before try_to_unmap,
	 * because ttu takes the rmap data structures down.
	 *
	 * Error handling: We ignore errors here because
	 * there's nothing that can be done.
	 */
	if (kill)
1456
		collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1457

1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
	if (PageHuge(hpage) && !PageAnon(hpage)) {
		/*
		 * For hugetlb pages in shared mappings, try_to_unmap
		 * could potentially call huge_pmd_unshare.  Because of
		 * this, take semaphore in write mode here and set
		 * TTU_RMAP_LOCKED to indicate we have taken the lock
		 * at this higher level.
		 */
		mapping = hugetlb_page_mapping_lock_write(hpage);
		if (mapping) {
1468
			try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1469 1470
			i_mmap_unlock_write(mapping);
		} else
1471
			pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1472
	} else {
1473
		try_to_unmap(folio, ttu);
1474
	}
1475 1476

	unmap_success = !page_mapped(hpage);
1477
	if (!unmap_success)
1478
		pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1479
		       pfn, page_mapcount(hpage));
1480

1481 1482 1483 1484 1485
	/*
	 * try_to_unmap() might put mlocked page in lru cache, so call
	 * shake_page() again to ensure that it's flushed.
	 */
	if (mlocked)
1486
		shake_page(hpage);
1487

1488 1489 1490 1491
	/*
	 * Now that the dirty bit has been propagated to the
	 * struct page and all unmaps done we can decide if
	 * killing is needed or not.  Only kill when the page
1492 1493
	 * was dirty or the process is not restartable,
	 * otherwise the tokill list is merely
1494 1495 1496 1497
	 * freed.  When there was a problem unmapping earlier
	 * use a more force-full uncatchable kill to prevent
	 * any accesses to the poisoned memory.
	 */
1498
	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1499
	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1500

1501
	return unmap_success;
1502 1503
}

1504 1505
static int identify_page_state(unsigned long pfn, struct page *p,
				unsigned long page_flags)
1506 1507
{
	struct page_state *ps;
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526

	/*
	 * The first check uses the current page flags which may not have any
	 * relevant information. The second check with the saved page flags is
	 * carried out only if the first check can't determine the page status.
	 */
	for (ps = error_states;; ps++)
		if ((p->flags & ps->mask) == ps->res)
			break;

	page_flags |= (p->flags & (1UL << PG_dirty));

	if (!ps->mask)
		for (ps = error_states;; ps++)
			if ((page_flags & ps->mask) == ps->res)
				break;
	return page_action(ps, p, pfn);
}

1527 1528 1529
static int try_to_split_thp_page(struct page *page, const char *msg)
{
	lock_page(page);
1530
	if (unlikely(split_huge_page(page))) {
1531 1532 1533
		unsigned long pfn = page_to_pfn(page);

		unlock_page(page);
1534
		pr_info("%s: %#lx: thp split failed\n", msg, pfn);
1535 1536 1537 1538 1539 1540 1541 1542
		put_page(page);
		return -EBUSY;
	}
	unlock_page(page);

	return 0;
}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
		struct address_space *mapping, pgoff_t index, int flags)
{
	struct to_kill *tk;
	unsigned long size = 0;

	list_for_each_entry(tk, to_kill, nd)
		if (tk->size_shift)
			size = max(size, 1UL << tk->size_shift);

	if (size) {
		/*
		 * Unmap the largest mapping to avoid breaking up device-dax
		 * mappings which are constant size. The actual size of the
		 * mapping being torn down is communicated in siginfo, see
		 * kill_proc()
		 */
		loff_t start = (index << PAGE_SHIFT) & ~(size - 1);

		unmap_mapping_range(mapping, start, size, 0);
	}

	kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
}

static int mf_generic_kill_procs(unsigned long long pfn, int flags,
		struct dev_pagemap *pgmap)
{
	struct page *page = pfn_to_page(pfn);
	LIST_HEAD(to_kill);
	dax_entry_t cookie;
	int rc = 0;

	/*
	 * Pages instantiated by device-dax (not filesystem-dax)
	 * may be compound pages.
	 */
	page = compound_head(page);

	/*
	 * Prevent the inode from being freed while we are interrogating
	 * the address_space, typically this would be handled by
	 * lock_page(), but dax pages do not use the page lock. This
	 * also prevents changes to the mapping of this pfn until
	 * poison signaling is complete.
	 */
	cookie = dax_lock_page(page);
	if (!cookie)
		return -EBUSY;

	if (hwpoison_filter(page)) {
		rc = -EOPNOTSUPP;
		goto unlock;
	}

	switch (pgmap->type) {
	case MEMORY_DEVICE_PRIVATE:
	case MEMORY_DEVICE_COHERENT:
		/*
		 * TODO: Handle device pages which may need coordination
		 * with device-side memory.
		 */
		rc = -ENXIO;
		goto unlock;
	default:
		break;
	}

	/*
	 * Use this flag as an indication that the dax page has been
	 * remapped UC to prevent speculative consumption of poison.
	 */
	SetPageHWPoison(page);

	/*
	 * Unlike System-RAM there is no possibility to swap in a
	 * different physical page at a given virtual address, so all
	 * userspace consumption of ZONE_DEVICE memory necessitates
	 * SIGBUS (i.e. MF_MUST_KILL)
	 */
	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
	collect_procs(page, &to_kill, true);

	unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
unlock:
	dax_unlock_page(page, cookie);
	return rc;
}

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
#ifdef CONFIG_FS_DAX
/**
 * mf_dax_kill_procs - Collect and kill processes who are using this file range
 * @mapping:	address_space of the file in use
 * @index:	start pgoff of the range within the file
 * @count:	length of the range, in unit of PAGE_SIZE
 * @mf_flags:	memory failure flags
 */
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
		unsigned long count, int mf_flags)
{
	LIST_HEAD(to_kill);
	dax_entry_t cookie;
	struct page *page;
	size_t end = index + count;

	mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;

	for (; index < end; index++) {
		page = NULL;
		cookie = dax_lock_mapping_entry(mapping, index, &page);
		if (!cookie)
			return -EBUSY;
		if (!page)
			goto unlock;

		SetPageHWPoison(page);

		collect_procs_fsdax(page, mapping, index, &to_kill);
		unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
				index, mf_flags);
unlock:
		dax_unlock_mapping_entry(mapping, index, cookie);
	}
	return 0;
}
EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
#endif /* CONFIG_FS_DAX */

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
#ifdef CONFIG_HUGETLB_PAGE
/*
 * Struct raw_hwp_page represents information about "raw error page",
 * constructing singly linked list originated from ->private field of
 * SUBPAGE_INDEX_HWPOISON-th tail page.
 */
struct raw_hwp_page {
	struct llist_node node;
	struct page *page;
};

static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
{
	return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON);
}

1687
static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag)
1688 1689 1690
{
	struct llist_head *head;
	struct llist_node *t, *tnode;
1691
	unsigned long count = 0;
1692 1693 1694 1695 1696

	head = raw_hwp_list_head(hpage);
	llist_for_each_safe(tnode, t, head->first) {
		struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);

1697 1698
		if (move_flag)
			SetPageHWPoison(p->page);
1699
		kfree(p);
1700
		count++;
1701 1702
	}
	llist_del_all(head);
1703
	return count;
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
}

static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
{
	struct llist_head *head;
	struct raw_hwp_page *raw_hwp;
	struct llist_node *t, *tnode;
	int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0;

	/*
	 * Once the hwpoison hugepage has lost reliable raw error info,
	 * there is little meaning to keep additional error info precisely,
	 * so skip to add additional raw error info.
	 */
	if (HPageRawHwpUnreliable(hpage))
		return -EHWPOISON;
	head = raw_hwp_list_head(hpage);
	llist_for_each_safe(tnode, t, head->first) {
		struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);

		if (p->page == page)
			return -EHWPOISON;
	}

	raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
	if (raw_hwp) {
		raw_hwp->page = page;
		llist_add(&raw_hwp->node, head);
		/* the first error event will be counted in action_result(). */
		if (ret)
			num_poisoned_pages_inc();
	} else {
		/*
		 * Failed to save raw error info.  We no longer trace all
		 * hwpoisoned subpages, and we need refuse to free/dissolve
		 * this hwpoisoned hugepage.
		 */
		SetHPageRawHwpUnreliable(hpage);
		/*
		 * Once HPageRawHwpUnreliable is set, raw_hwp_page is not
		 * used any more, so free it.
		 */
1746
		__free_raw_hwp_pages(hpage, false);
1747 1748 1749 1750
	}
	return ret;
}

1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
{
	/*
	 * HPageVmemmapOptimized hugepages can't be freed because struct
	 * pages for tail pages are required but they don't exist.
	 */
	if (move_flag && HPageVmemmapOptimized(hpage))
		return 0;

	/*
	 * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
	 * definition.
	 */
	if (HPageRawHwpUnreliable(hpage))
		return 0;

	return __free_raw_hwp_pages(hpage, move_flag);
}

1770 1771 1772 1773 1774
void hugetlb_clear_page_hwpoison(struct page *hpage)
{
	if (HPageRawHwpUnreliable(hpage))
		return;
	ClearPageHWPoison(hpage);
1775
	free_raw_hwp_pages(hpage, true);
1776 1777
}

1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
/*
 * Called from hugetlb code with hugetlb_lock held.
 *
 * Return values:
 *   0             - free hugepage
 *   1             - in-use hugepage
 *   2             - not a hugepage
 *   -EBUSY        - the hugepage is busy (try to retry)
 *   -EHWPOISON    - the hugepage is already hwpoisoned
 */
int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
{
	struct page *page = pfn_to_page(pfn);
	struct page *head = compound_head(page);
	int ret = 2;	/* fallback to normal page handling */
	bool count_increased = false;

	if (!PageHeadHuge(head))
		goto out;

	if (flags & MF_COUNT_INCREASED) {
		ret = 1;
		count_increased = true;
1801 1802 1803
	} else if (HPageFreed(head)) {
		ret = 0;
	} else if (HPageMigratable(head)) {
1804 1805 1806 1807 1808
		ret = get_page_unless_zero(head);
		if (ret)
			count_increased = true;
	} else {
		ret = -EBUSY;
1809 1810
		if (!(flags & MF_NO_RETRY))
			goto out;
1811 1812
	}

1813
	if (hugetlb_set_page_hwpoison(head, page)) {
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
		ret = -EHWPOISON;
		goto out;
	}

	return ret;
out:
	if (count_increased)
		put_page(head);
	return ret;
}

/*
 * Taking refcount of hugetlb pages needs extra care about race conditions
 * with basic operations like hugepage allocation/free/demotion.
 * So some of prechecks for hwpoison (pinning, and testing/setting
 * PageHWPoison) should be done in single hugetlb_lock range.
 */
static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
1832
{
1833
	int res;
1834 1835
	struct page *p = pfn_to_page(pfn);
	struct page *head;
1836 1837
	unsigned long page_flags;

1838 1839 1840 1841 1842 1843 1844
	*hugetlb = 1;
retry:
	res = get_huge_page_for_hwpoison(pfn, flags);
	if (res == 2) { /* fallback to normal page handling */
		*hugetlb = 0;
		return 0;
	} else if (res == -EHWPOISON) {
1845
		pr_err("%#lx: already hardware poisoned\n", pfn);
1846 1847
		if (flags & MF_ACTION_REQUIRED) {
			head = compound_head(p);
1848
			res = kill_accessing_process(current, page_to_pfn(head), flags);
1849 1850 1851
		}
		return res;
	} else if (res == -EBUSY) {
1852 1853
		if (!(flags & MF_NO_RETRY)) {
			flags |= MF_NO_RETRY;
1854 1855 1856
			goto retry;
		}
		action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1857
		return res;
1858 1859
	}

1860 1861 1862 1863
	head = compound_head(p);
	lock_page(head);

	if (hwpoison_filter(p)) {
1864
		hugetlb_clear_page_hwpoison(head);
1865 1866 1867 1868 1869 1870 1871 1872 1873 1874
		res = -EOPNOTSUPP;
		goto out;
	}

	/*
	 * Handling free hugepage.  The possible race with hugepage allocation
	 * or demotion can be prevented by PageHWPoison flag.
	 */
	if (res == 0) {
		unlock_page(head);
1875
		if (__page_handle_poison(p) >= 0) {
1876 1877
			page_ref_inc(p);
			res = MF_RECOVERED;
1878 1879
		} else {
			res = MF_FAILED;
1880
		}
1881 1882
		action_result(pfn, MF_MSG_FREE_HUGE, res);
		return res == MF_RECOVERED ? 0 : -EBUSY;
1883 1884 1885 1886
	}

	page_flags = head->flags;

1887
	if (!hwpoison_user_mappings(p, pfn, flags, head)) {
1888 1889 1890 1891 1892
		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
		res = -EBUSY;
		goto out;
	}

1893
	return identify_page_state(pfn, p, page_flags);
1894 1895 1896 1897
out:
	unlock_page(head);
	return res;
}
1898

1899 1900 1901 1902 1903
#else
static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
{
	return 0;
}
1904

1905 1906 1907 1908
static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
{
	return 0;
}
1909
#endif	/* CONFIG_HUGETLB_PAGE */
1910

1911 1912 1913 1914
static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
		struct dev_pagemap *pgmap)
{
	struct page *page = pfn_to_page(pfn);
1915
	int rc = -ENXIO;
1916

1917 1918 1919 1920 1921 1922
	if (flags & MF_COUNT_INCREASED)
		/*
		 * Drop the extra refcount in case we come from madvise().
		 */
		put_page(page);

1923
	/* device metadata space is not recoverable */
1924
	if (!pgmap_pfn_valid(pgmap, pfn))
1925
		goto out;
1926

1927
	/*
1928 1929
	 * Call driver's implementation to handle the memory failure, otherwise
	 * fall back to generic handler.
1930
	 */
1931 1932
	if (pgmap->ops->memory_failure) {
		rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
1933
		/*
1934 1935
		 * Fall back to generic handler too if operation is not
		 * supported inside the driver/device/filesystem.
1936
		 */
1937 1938
		if (rc != -EOPNOTSUPP)
			goto out;
1939 1940
	}

1941
	rc = mf_generic_kill_procs(pfn, flags, pgmap);
1942 1943 1944 1945 1946 1947 1948
out:
	/* drop pgmap ref acquired in caller */
	put_dev_pagemap(pgmap);
	action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
	return rc;
}

1949 1950
static DEFINE_MUTEX(mf_mutex);

1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
/**
 * memory_failure - Handle memory failure of a page.
 * @pfn: Page Number of the corrupted page
 * @flags: fine tune action taken
 *
 * This function is called by the low level machine check code
 * of an architecture when it detects hardware memory corruption
 * of a page. It tries its best to recover, which includes
 * dropping pages, killing processes etc.
 *
 * The function is primarily of use for corruptions that
 * happen outside the current execution context (e.g. when
 * detected by a background scrubber)
 *
 * Must run in process context (e.g. a work queue) with interrupts
 * enabled and no spinlocks hold.
1967 1968
 *
 * Return: 0 for successfully handled the memory error,
1969
 *         -EOPNOTSUPP for hwpoison_filter() filtered the error event,
1970
 *         < 0(except -EOPNOTSUPP) on failure.
1971
 */
1972
int memory_failure(unsigned long pfn, int flags)
1973 1974
{
	struct page *p;
1975
	struct page *hpage;
1976
	struct dev_pagemap *pgmap;
1977
	int res = 0;
1978
	unsigned long page_flags;
1979
	bool retry = true;
1980
	int hugetlb = 0;
1981 1982

	if (!sysctl_memory_failure_recovery)
1983
		panic("Memory failure on page %lx", pfn);
1984

1985 1986
	mutex_lock(&mf_mutex);

1987 1988 1989
	if (!(flags & MF_SW_SIMULATED))
		hw_memory_failure = true;

1990 1991
	p = pfn_to_online_page(pfn);
	if (!p) {
1992 1993 1994 1995
		res = arch_memory_failure(pfn, flags);
		if (res == 0)
			goto unlock_mutex;

1996 1997
		if (pfn_valid(pfn)) {
			pgmap = get_dev_pagemap(pfn, NULL);
1998 1999 2000 2001 2002
			if (pgmap) {
				res = memory_failure_dev_pagemap(pfn, flags,
								 pgmap);
				goto unlock_mutex;
			}
2003
		}
2004
		pr_err("%#lx: memory outside kernel control\n", pfn);
2005 2006
		res = -ENXIO;
		goto unlock_mutex;
2007 2008
	}

2009
try_again:
2010 2011
	res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
	if (hugetlb)
2012 2013
		goto unlock_mutex;

2014
	if (TestSetPageHWPoison(p)) {
2015
		pr_err("%#lx: already hardware poisoned\n", pfn);
2016
		res = -EHWPOISON;
2017 2018
		if (flags & MF_ACTION_REQUIRED)
			res = kill_accessing_process(current, pfn, flags);
2019 2020
		if (flags & MF_COUNT_INCREASED)
			put_page(p);
2021
		goto unlock_mutex;
2022 2023
	}

2024
	hpage = compound_head(p);
2025 2026 2027 2028 2029

	/*
	 * We need/can do nothing about count=0 pages.
	 * 1) it's a free page, and therefore in safe hand:
	 *    prep_new_page() will be the gate keeper.
2030
	 * 2) it's part of a non-compound high order page.
2031 2032 2033 2034
	 *    Implies some kernel user: cannot stop them from
	 *    R/W the page; let's pray that the page has been
	 *    used and will be freed some time later.
	 * In fact it's dangerous to directly bump up page count from 0,
2035
	 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
2036
	 */
2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
	if (!(flags & MF_COUNT_INCREASED)) {
		res = get_hwpoison_page(p, flags);
		if (!res) {
			if (is_free_buddy_page(p)) {
				if (take_page_off_buddy(p)) {
					page_ref_inc(p);
					res = MF_RECOVERED;
				} else {
					/* We lost the race, try again */
					if (retry) {
						ClearPageHWPoison(p);
						retry = false;
						goto try_again;
					}
					res = MF_FAILED;
2052
				}
2053 2054 2055 2056 2057
				action_result(pfn, MF_MSG_BUDDY, res);
				res = res == MF_RECOVERED ? 0 : -EBUSY;
			} else {
				action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
				res = -EBUSY;
2058
			}
2059 2060 2061
			goto unlock_mutex;
		} else if (res < 0) {
			action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2062
			res = -EBUSY;
2063
			goto unlock_mutex;
2064
		}
2065 2066
	}

2067
	if (PageTransHuge(hpage)) {
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
		/*
		 * The flag must be set after the refcount is bumped
		 * otherwise it may race with THP split.
		 * And the flag can't be set in get_hwpoison_page() since
		 * it is called by soft offline too and it is just called
		 * for !MF_COUNT_INCREASE.  So here seems to be the best
		 * place.
		 *
		 * Don't need care about the above error handling paths for
		 * get_hwpoison_page() since they handle either free page
		 * or unhandlable page.  The refcount is bumped iff the
		 * page is a valid handlable page.
		 */
		SetPageHasHWPoisoned(hpage);
2082 2083
		if (try_to_split_thp_page(p, "Memory Failure") < 0) {
			action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
2084 2085
			res = -EBUSY;
			goto unlock_mutex;
2086
		}
2087 2088 2089
		VM_BUG_ON_PAGE(!page_count(p), p);
	}

2090 2091 2092
	/*
	 * We ignore non-LRU pages for good reasons.
	 * - PG_locked is only well defined for LRU pages and a few others
2093
	 * - to avoid races with __SetPageLocked()
2094 2095 2096 2097
	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
	 * The check (unnecessarily) ignores LRU pages being isolated and
	 * walked by the page reclaim code, however that's not a big loss.
	 */
2098
	shake_page(p);
2099

2100
	lock_page(p);
2101

2102
	/*
2103 2104 2105 2106
	 * We're only intended to deal with the non-Compound page here.
	 * However, the page could have changed compound pages due to
	 * race window. If this happens, we could try again to hopefully
	 * handle the page next round.
2107
	 */
2108 2109
	if (PageCompound(p)) {
		if (retry) {
2110
			ClearPageHWPoison(p);
2111 2112 2113 2114 2115 2116
			unlock_page(p);
			put_page(p);
			flags &= ~MF_COUNT_INCREASED;
			retry = false;
			goto try_again;
		}
2117
		action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2118
		res = -EBUSY;
2119
		goto unlock_page;
2120 2121
	}

2122 2123 2124 2125 2126 2127 2128
	/*
	 * We use page flags to determine what action should be taken, but
	 * the flags can be modified by the error containment action.  One
	 * example is an mlocked page, where PG_mlocked is cleared by
	 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
	 * correctly, we save a copy of the page flags at this time.
	 */
2129
	page_flags = p->flags;
2130

2131
	if (hwpoison_filter(p)) {
2132
		TestClearPageHWPoison(p);
2133
		unlock_page(p);
2134
		put_page(p);
2135
		res = -EOPNOTSUPP;
2136
		goto unlock_mutex;
2137
	}
2138

2139 2140 2141 2142 2143
	/*
	 * __munlock_pagevec may clear a writeback page's LRU flag without
	 * page_lock. We need wait writeback completion for this page or it
	 * may trigger vfs BUG while evict inode.
	 */
2144
	if (!PageLRU(p) && !PageWriteback(p))
2145 2146
		goto identify_page_state;

2147 2148 2149 2150
	/*
	 * It's very difficult to mess with pages currently under IO
	 * and in many cases impossible, so we just avoid it here.
	 */
2151 2152 2153 2154
	wait_on_page_writeback(p);

	/*
	 * Now take care of user space mappings.
2155
	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2156
	 */
2157
	if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2158
		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2159
		res = -EBUSY;
2160
		goto unlock_page;
2161
	}
2162 2163 2164 2165

	/*
	 * Torn down by someone else?
	 */
2166
	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
2167
		action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2168
		res = -EBUSY;
2169
		goto unlock_page;
2170 2171
	}

2172
identify_page_state:
2173
	res = identify_page_state(pfn, p, page_flags);
2174 2175
	mutex_unlock(&mf_mutex);
	return res;
2176
unlock_page:
2177
	unlock_page(p);
2178 2179
unlock_mutex:
	mutex_unlock(&mf_mutex);
2180 2181
	return res;
}
2182
EXPORT_SYMBOL_GPL(memory_failure);
2183

2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
#define MEMORY_FAILURE_FIFO_ORDER	4
#define MEMORY_FAILURE_FIFO_SIZE	(1 << MEMORY_FAILURE_FIFO_ORDER)

struct memory_failure_entry {
	unsigned long pfn;
	int flags;
};

struct memory_failure_cpu {
	DECLARE_KFIFO(fifo, struct memory_failure_entry,
		      MEMORY_FAILURE_FIFO_SIZE);
	spinlock_t lock;
	struct work_struct work;
};

static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);

/**
 * memory_failure_queue - Schedule handling memory failure of a page.
 * @pfn: Page Number of the corrupted page
 * @flags: Flags for memory failure handling
 *
 * This function is called by the low level hardware error handler
 * when it detects hardware memory corruption of a page. It schedules
 * the recovering of error page, including dropping pages, killing
 * processes etc.
 *
 * The function is primarily of use for corruptions that
 * happen outside the current execution context (e.g. when
 * detected by a background scrubber)
 *
 * Can run in IRQ context.
 */
2217
void memory_failure_queue(unsigned long pfn, int flags)
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
{
	struct memory_failure_cpu *mf_cpu;
	unsigned long proc_flags;
	struct memory_failure_entry entry = {
		.pfn =		pfn,
		.flags =	flags,
	};

	mf_cpu = &get_cpu_var(memory_failure_cpu);
	spin_lock_irqsave(&mf_cpu->lock, proc_flags);
Stefani Seibold's avatar
Stefani Seibold committed
2228
	if (kfifo_put(&mf_cpu->fifo, entry))
2229 2230
		schedule_work_on(smp_processor_id(), &mf_cpu->work);
	else
2231
		pr_err("buffer overflow when queuing memory failure at %#lx\n",
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244
		       pfn);
	spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
	put_cpu_var(memory_failure_cpu);
}
EXPORT_SYMBOL_GPL(memory_failure_queue);

static void memory_failure_work_func(struct work_struct *work)
{
	struct memory_failure_cpu *mf_cpu;
	struct memory_failure_entry entry = { 0, };
	unsigned long proc_flags;
	int gotten;

2245
	mf_cpu = container_of(work, struct memory_failure_cpu, work);
2246 2247 2248 2249 2250 2251
	for (;;) {
		spin_lock_irqsave(&mf_cpu->lock, proc_flags);
		gotten = kfifo_get(&mf_cpu->fifo, &entry);
		spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
		if (!gotten)
			break;
2252
		if (entry.flags & MF_SOFT_OFFLINE)
2253
			soft_offline_page(entry.pfn, entry.flags);
2254
		else
2255
			memory_failure(entry.pfn, entry.flags);
2256 2257 2258
	}
}

2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
/*
 * Process memory_failure work queued on the specified CPU.
 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
 */
void memory_failure_queue_kick(int cpu)
{
	struct memory_failure_cpu *mf_cpu;

	mf_cpu = &per_cpu(memory_failure_cpu, cpu);
	cancel_work_sync(&mf_cpu->work);
	memory_failure_work_func(&mf_cpu->work);
}

2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
static int __init memory_failure_init(void)
{
	struct memory_failure_cpu *mf_cpu;
	int cpu;

	for_each_possible_cpu(cpu) {
		mf_cpu = &per_cpu(memory_failure_cpu, cpu);
		spin_lock_init(&mf_cpu->lock);
		INIT_KFIFO(mf_cpu->fifo);
		INIT_WORK(&mf_cpu->work, memory_failure_work_func);
	}

	return 0;
}
core_initcall(memory_failure_init);

2288 2289
#undef pr_fmt
#define pr_fmt(fmt)	"" fmt
2290 2291 2292 2293 2294 2295
#define unpoison_pr_info(fmt, pfn, rs)			\
({							\
	if (__ratelimit(rs))				\
		pr_info(fmt, pfn);			\
})

2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
/**
 * unpoison_memory - Unpoison a previously poisoned page
 * @pfn: Page number of the to be unpoisoned page
 *
 * Software-unpoison a page that has been poisoned by
 * memory_failure() earlier.
 *
 * This is only done on the software-level, so it only works
 * for linux injected failures, not real hardware failures
 *
 * Returns 0 for success, otherwise -errno.
 */
int unpoison_memory(unsigned long pfn)
{
	struct page *page;
	struct page *p;
2312
	int ret = -EBUSY;
2313
	int freeit = 0;
2314
	unsigned long count = 1;
2315 2316
	static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
					DEFAULT_RATELIMIT_BURST);
2317 2318 2319 2320 2321 2322 2323

	if (!pfn_valid(pfn))
		return -ENXIO;

	p = pfn_to_page(pfn);
	page = compound_head(p);

2324 2325
	mutex_lock(&mf_mutex);

2326 2327 2328 2329 2330 2331 2332
	if (hw_memory_failure) {
		unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
				 pfn, &unpoison_rs);
		ret = -EOPNOTSUPP;
		goto unlock_mutex;
	}

2333
	if (!PageHWPoison(p)) {
2334
		unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
2335
				 pfn, &unpoison_rs);
2336
		goto unlock_mutex;
2337 2338
	}

2339
	if (page_count(page) > 1) {
2340
		unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2341
				 pfn, &unpoison_rs);
2342
		goto unlock_mutex;
2343 2344 2345
	}

	if (page_mapped(page)) {
2346
		unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2347
				 pfn, &unpoison_rs);
2348
		goto unlock_mutex;
2349 2350 2351
	}

	if (page_mapping(page)) {
2352
		unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2353
				 pfn, &unpoison_rs);
2354
		goto unlock_mutex;
2355 2356
	}

2357
	if (PageSlab(page) || PageTable(page))
2358
		goto unlock_mutex;
2359

2360 2361
	ret = get_hwpoison_page(p, MF_UNPOISON);
	if (!ret) {
2362 2363 2364 2365 2366 2367 2368
		if (PageHuge(p)) {
			count = free_raw_hwp_pages(page, false);
			if (count == 0) {
				ret = -EBUSY;
				goto unlock_mutex;
			}
		}
2369
		ret = TestClearPageHWPoison(page) ? 0 : -EBUSY;
2370 2371
	} else if (ret < 0) {
		if (ret == -EHWPOISON) {
2372
			ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2373 2374 2375 2376
		} else
			unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
					 pfn, &unpoison_rs);
	} else {
2377 2378 2379 2380 2381 2382 2383
		if (PageHuge(p)) {
			count = free_raw_hwp_pages(page, false);
			if (count == 0) {
				ret = -EBUSY;
				goto unlock_mutex;
			}
		}
2384
		freeit = !!TestClearPageHWPoison(p);
2385

2386
		put_page(page);
2387 2388 2389 2390 2391
		if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) {
			put_page(page);
			ret = 0;
		}
	}
2392

2393 2394
unlock_mutex:
	mutex_unlock(&mf_mutex);
2395
	if (!ret || freeit) {
2396
		num_poisoned_pages_sub(count);
2397 2398 2399
		unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
				 page_to_pfn(p), &unpoison_rs);
	}
2400
	return ret;
2401 2402
}
EXPORT_SYMBOL(unpoison_memory);
2403

2404
static bool isolate_page(struct page *page, struct list_head *pagelist)
2405
{
2406 2407
	bool isolated = false;
	bool lru = PageLRU(page);
2408

2409
	if (PageHuge(page)) {
2410
		isolated = !isolate_hugetlb(page, pagelist);
2411 2412 2413 2414 2415 2416 2417 2418
	} else {
		if (lru)
			isolated = !isolate_lru_page(page);
		else
			isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE);

		if (isolated)
			list_add(&page->lru, pagelist);
2419
	}
2420

2421 2422 2423 2424
	if (isolated && lru)
		inc_node_page_state(page, NR_ISOLATED_ANON +
				    page_is_file_lru(page));

2425
	/*
2426 2427 2428 2429 2430
	 * If we succeed to isolate the page, we grabbed another refcount on
	 * the page, so we can safely drop the one we got from get_any_pages().
	 * If we failed to isolate the page, it means that we cannot go further
	 * and we will return an error, so drop the reference we got from
	 * get_any_pages() as well.
2431
	 */
2432 2433
	put_page(page);
	return isolated;
2434 2435
}

2436 2437 2438 2439 2440 2441
/*
 * __soft_offline_page handles hugetlb-pages and non-hugetlb pages.
 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
 * If the page is mapped, it migrates the contents over.
 */
static int __soft_offline_page(struct page *page)
2442
{
2443
	long ret = 0;
2444
	unsigned long pfn = page_to_pfn(page);
2445 2446 2447 2448
	struct page *hpage = compound_head(page);
	char const *msg_page[] = {"page", "hugepage"};
	bool huge = PageHuge(page);
	LIST_HEAD(pagelist);
2449 2450 2451 2452
	struct migration_target_control mtc = {
		.nid = NUMA_NO_NODE,
		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
	};
2453

2454
	lock_page(page);
2455 2456
	if (!PageHuge(page))
		wait_on_page_writeback(page);
2457 2458
	if (PageHWPoison(page)) {
		unlock_page(page);
2459
		put_page(page);
2460
		pr_info("soft offline: %#lx page already poisoned\n", pfn);
2461
		return 0;
2462
	}
2463

2464
	if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page))
2465 2466 2467 2468 2469
		/*
		 * Try to invalidate first. This should work for
		 * non dirty unmapped page cache pages.
		 */
		ret = invalidate_inode_page(page);
2470
	unlock_page(page);
2471 2472

	if (ret) {
2473
		pr_info("soft_offline: %#lx: invalidated\n", pfn);
2474
		page_handle_poison(page, false, true);
2475
		return 0;
2476 2477
	}

2478
	if (isolate_page(hpage, &pagelist)) {
2479
		ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2480
			(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2481
		if (!ret) {
2482 2483 2484 2485
			bool release = !huge;

			if (!page_handle_poison(page, huge, release))
				ret = -EBUSY;
2486
		} else {
2487 2488
			if (!list_empty(&pagelist))
				putback_movable_pages(&pagelist);
2489

2490
			pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
2491
				pfn, msg_page[huge], ret, &page->flags);
2492
			if (ret > 0)
2493
				ret = -EBUSY;
2494 2495
		}
	} else {
2496 2497
		pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
			pfn, msg_page[huge], page_count(page), &page->flags);
2498
		ret = -EBUSY;
2499 2500 2501
	}
	return ret;
}
2502

2503
static int soft_offline_in_use_page(struct page *page)
2504 2505 2506
{
	struct page *hpage = compound_head(page);

2507 2508
	if (!PageHuge(page) && PageTransHuge(hpage))
		if (try_to_split_thp_page(page, "soft offline") < 0)
2509
			return -EBUSY;
2510
	return __soft_offline_page(page);
2511 2512
}

2513
static int soft_offline_free_page(struct page *page)
2514
{
2515
	int rc = 0;
2516

2517 2518
	if (!page_handle_poison(page, true, false))
		rc = -EBUSY;
2519

2520
	return rc;
2521 2522
}

2523 2524 2525 2526 2527 2528
static void put_ref_page(struct page *page)
{
	if (page)
		put_page(page);
}

2529 2530
/**
 * soft_offline_page - Soft offline a page.
2531
 * @pfn: pfn to soft-offline
2532 2533
 * @flags: flags. Same as memory_failure().
 *
2534 2535 2536
 * Returns 0 on success
 *         -EOPNOTSUPP for hwpoison_filter() filtered the error event
 *         < 0 otherwise negated errno.
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
 *
 * Soft offline a page, by migration or invalidation,
 * without killing anything. This is for the case when
 * a page is not corrupted yet (so it's still valid to access),
 * but has had a number of corrected errors and is better taken
 * out.
 *
 * The actual policy on when to do that is maintained by
 * user space.
 *
 * This should never impact any application or cause data loss,
 * however it might take some time.
 *
 * This is not a 100% solution for all memory, but tries to be
 * ``good enough'' for the majority of memory.
 */
2553
int soft_offline_page(unsigned long pfn, int flags)
2554 2555
{
	int ret;
2556
	bool try_again = true;
2557 2558 2559
	struct page *page, *ref_page = NULL;

	WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
2560

2561 2562
	if (!pfn_valid(pfn))
		return -ENXIO;
2563 2564 2565
	if (flags & MF_COUNT_INCREASED)
		ref_page = pfn_to_page(pfn);

2566 2567
	/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
	page = pfn_to_online_page(pfn);
2568 2569
	if (!page) {
		put_ref_page(ref_page);
2570
		return -EIO;
2571
	}
2572

2573 2574
	mutex_lock(&mf_mutex);

2575
	if (PageHWPoison(page)) {
2576
		pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2577
		put_ref_page(ref_page);
2578
		mutex_unlock(&mf_mutex);
2579
		return 0;
2580 2581
	}

2582
retry:
2583
	get_online_mems();
2584
	ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2585
	put_online_mems();
2586

2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
	if (hwpoison_filter(page)) {
		if (ret > 0)
			put_page(page);
		else
			put_ref_page(ref_page);

		mutex_unlock(&mf_mutex);
		return -EOPNOTSUPP;
	}

2597
	if (ret > 0) {
2598
		ret = soft_offline_in_use_page(page);
2599
	} else if (ret == 0) {
2600 2601
		if (soft_offline_free_page(page) && try_again) {
			try_again = false;
2602
			flags &= ~MF_COUNT_INCREASED;
2603 2604
			goto retry;
		}
2605
	}
2606

2607 2608
	mutex_unlock(&mf_mutex);

2609 2610
	return ret;
}
2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631

void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
	int i;

	/*
	 * A further optimization is to have per section refcounted
	 * num_poisoned_pages.  But that would need more space per memmap, so
	 * for now just do a quick global check to speed up this routine in the
	 * absence of bad pages.
	 */
	if (atomic_long_read(&num_poisoned_pages) == 0)
		return;

	for (i = 0; i < nr_pages; i++) {
		if (PageHWPoison(&memmap[i])) {
			num_poisoned_pages_dec();
			ClearPageHWPoison(&memmap[i]);
		}
	}
}