Commit ac87ca0e authored by Dan Williams's avatar Dan Williams Committed by Andrew Morton

mm/memory-failure: fall back to vma_address() when ->notify_failure() fails

In the case where a filesystem is polled to take over the memory failure
and receives -EOPNOTSUPP it indicates that page->index and page->mapping
are valid for reverse mapping the failure address.  Introduce
FSDAX_INVALID_PGOFF to distinguish when add_to_kill() is being called from
mf_dax_kill_procs() by a filesytem vs the typical memory_failure() path.

Otherwise, vma_pgoff_address() is called with an invalid fsdax_pgoff which
then trips this failing signature:

 kernel BUG at mm/memory-failure.c:319!
 invalid opcode: 0000 [#1] PREEMPT SMP PTI
 CPU: 13 PID: 1262 Comm: dax-pmd Tainted: G           OE    N 6.0.0-rc2+ #62
 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
 RIP: 0010:add_to_kill.cold+0x19d/0x209
 [..]
 Call Trace:
  <TASK>
  collect_procs.part.0+0x2c4/0x460
  memory_failure+0x71b/0xba0
  ? _printk+0x58/0x73
  do_madvise.part.0.cold+0xaf/0xc5

Link: https://lkml.kernel.org/r/166153429427.2758201.14605968329933175594.stgit@dwillia2-xfh.jf.intel.com
Fixes: c36e2024 ("mm: introduce mf_dax_kill_procs() for fsdax case")
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Shiyang Ruan <ruansy.fnst@fujitsu.com>
Cc: Darrick J. Wong <djwong@kernel.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Goldwyn Rodrigues <rgoldwyn@suse.de>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ritesh Harjani <riteshh@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 65d3440e
...@@ -345,13 +345,17 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, ...@@ -345,13 +345,17 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
* not much we can do. We just print a message and ignore otherwise. * not much we can do. We just print a message and ignore otherwise.
*/ */
#define FSDAX_INVALID_PGOFF ULONG_MAX
/* /*
* Schedule a process for later kill. * Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
* *
* Notice: @fsdax_pgoff is used only when @p is a fsdax page. * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
* In other cases, such as anonymous and file-backend page, the address to be * filesystem with a memory failure handler has claimed the
* killed can be caculated by @p itself. * memory_failure event. In all other cases, page->index and
* page->mapping are sufficient for mapping the page back to its
* corresponding user virtual address.
*/ */
static void add_to_kill(struct task_struct *tsk, struct page *p, static void add_to_kill(struct task_struct *tsk, struct page *p,
pgoff_t fsdax_pgoff, struct vm_area_struct *vma, pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
...@@ -367,11 +371,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, ...@@ -367,11 +371,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = page_address_in_vma(p, vma); tk->addr = page_address_in_vma(p, vma);
if (is_zone_device_page(p)) { if (is_zone_device_page(p)) {
/* if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
* Since page->mapping is not used for fsdax, we need
* calculate the address based on the vma.
*/
if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
} else } else
...@@ -523,7 +523,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, ...@@ -523,7 +523,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (!page_mapped_in_vma(page, vma)) if (!page_mapped_in_vma(page, vma))
continue; continue;
if (vma->vm_mm == t->mm) if (vma->vm_mm == t->mm)
add_to_kill(t, page, 0, vma, to_kill); add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
to_kill);
} }
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
...@@ -559,7 +560,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, ...@@ -559,7 +560,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* to be informed of all such data corruptions. * to be informed of all such data corruptions.
*/ */
if (vma->vm_mm == t->mm) if (vma->vm_mm == t->mm)
add_to_kill(t, page, 0, vma, to_kill); add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
to_kill);
} }
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment