Commit 4d0bd657 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew Morton)

Merge fixes from Andrew Morton:
 "10 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  Josh has moved
  kexec: export free_huge_page to VMCOREINFO
  mm: fix filemap.c pagecache_get_page() kernel-doc warnings
  mm: debugfs: move rounddown_pow_of_two() out from do_fault path
  memcg: oom_notify use-after-free fix
  hwpoison: call action_result() in failure path of hwpoison_user_mappings()
  hwpoison: fix hugetlbfs/thp precheck in hwpoison_user_mappings()
  rapidio/tsi721_dma: fix failure to obtain transaction descriptor
  mm, thp: do not allow thp faults to avoid cpuset restrictions
  mm/page-writeback.c: fix divide by zero in bdi_dirty_limits()
parents 26bcd8b7 e0198b29
......@@ -62,6 +62,11 @@ Jeff Garzik <jgarzik@pretzel.yyz.us>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
John Stultz <johnstul@us.ibm.com>
<josh@joshtriplett.org> <josh@freedesktop.org>
<josh@joshtriplett.org> <josh@kernel.org>
<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
<josh@joshtriplett.org> <josht@us.ibm.com>
<josh@joshtriplett.org> <josht@vnet.ibm.com>
Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
......
......@@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615
S: Australia
N: Josh Triplett
E: josh@freedesktop.org
P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB
D: rcutorture maintainer
E: josh@joshtriplett.org
P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C 1E67 0ED9 A3DF 8AFF 873D
D: RCU and rcutorture
D: lock annotations, finding and fixing lock bugs
D: kernel tinification
N: Winfried Trümper
E: winni@xpilot.org
......
......@@ -7424,7 +7424,7 @@ S: Orphan
F: drivers/net/wireless/ray*
RCUTORTURE MODULE
M: Josh Triplett <josh@freedesktop.org>
M: Josh Triplett <josh@joshtriplett.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
L: linux-kernel@vger.kernel.org
S: Supported
......
......@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
"desc %p not ACKed\n", tx_desc);
}
if (ret == NULL) {
dev_dbg(bdma_chan->dchan.device->dev,
"%s: unable to obtain tx descriptor\n", __func__);
goto err_out;
}
i = bdma_chan->wr_count_next % bdma_chan->bd_num;
if (i == bdma_chan->bd_num - 1) {
i = 0;
......@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
tx_desc->txd.phys = bdma_chan->bd_phys +
i * sizeof(struct tsi721_dma_desc);
tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
err_out:
spin_unlock_bh(&bdma_chan->lock);
return ret;
......
......@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
......
......@@ -33,6 +33,7 @@
#include <linux/swap.h>
#include <linux/syscore_ops.h>
#include <linux/compiler.h>
#include <linux/hugetlb.h>
#include <asm/page.h>
#include <asm/uaccess.h>
......@@ -1619,6 +1620,7 @@ static int __init crash_save_vmcoreinfo_init(void)
#endif
VMCOREINFO_NUMBER(PG_head_mask);
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
VMCOREINFO_SYMBOL(free_huge_page);
arch_crash_save_vmcoreinfo();
update_vmcoreinfo_note();
......
......@@ -18,7 +18,7 @@
* Copyright (C) IBM Corporation, 2005, 2006
*
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
* Josh Triplett <josh@freedesktop.org>
* Josh Triplett <josh@joshtriplett.org>
*
* See also: Documentation/RCU/torture.txt
*/
......@@ -51,7 +51,7 @@
#include <linux/torture.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
torture_param(int, fqs_duration, 0,
......
......@@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry);
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
* @gfp_mask: gfp mask to use if a page is to be allocated
* @cache_gfp_mask: gfp mask to use for the page cache data page allocation
* @radix_gfp_mask: gfp mask to use for radix tree node allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
* PCG flags modify how the page is returned
* PCG flags modify how the page is returned.
*
* FGP_ACCESSED: the page will be marked accessed
* FGP_LOCK: Page is return locked
* FGP_CREAT: If page is not present then a new page is allocated using
* @gfp_mask and added to the page cache and the VM's LRU
* list. The page is returned locked and with an increased
* refcount. Otherwise, %NULL is returned.
* @cache_gfp_mask and added to the page cache and the VM's LRU
* list. If radix tree nodes are allocated during page cache
* insertion then @radix_gfp_mask is used. The page is returned
* locked and with an increased refcount. Otherwise, %NULL is
* returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
......
......@@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size)
return NULL;
}
static void free_huge_page(struct page *page)
void free_huge_page(struct page *page)
{
/*
* Can't pass hstate in here because it is called from the
......
......@@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
{
struct mem_cgroup_eventfd_list *ev;
spin_lock(&memcg_oom_lock);
list_for_each_entry(ev, &memcg->oom_notify, list)
eventfd_signal(ev->eventfd, 1);
spin_unlock(&memcg_oom_lock);
return 0;
}
......
......@@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct page *hpage = *hpagep;
struct page *ppage;
if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
/*
* Here we are interested only in user-mapped pages, so skip any
* other types of pages.
*/
if (PageReserved(p) || PageSlab(p))
return SWAP_SUCCESS;
if (!(PageLRU(hpage) || PageHuge(p)))
return SWAP_SUCCESS;
/*
......@@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (!page_mapped(hpage))
return SWAP_SUCCESS;
if (PageKsm(p))
if (PageKsm(p)) {
pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
return SWAP_FAIL;
}
if (PageSwapCache(p)) {
printk(KERN_ERR
......@@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
!= SWAP_SUCCESS) {
printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
action_result(pfn, "unmapping failed", IGNORED);
res = -EBUSY;
goto out;
}
......
......@@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
update_mmu_cache(vma, address, pte);
}
static unsigned long fault_around_bytes = 65536;
static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
/*
* fault_around_pages() and fault_around_mask() round down fault_around_bytes
* to nearest page order. It's what do_fault_around() expects to see.
*/
static inline unsigned long fault_around_pages(void)
{
return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
return fault_around_bytes >> PAGE_SHIFT;
}
static inline unsigned long fault_around_mask(void)
{
return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
return ~(fault_around_bytes - 1) & PAGE_MASK;
}
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
{
......@@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val)
return 0;
}
/*
* fault_around_pages() and fault_around_mask() expects fault_around_bytes
* rounded down to nearest page order. It's what do_fault_around() expects to
* see.
*/
static int fault_around_bytes_set(void *data, u64 val)
{
if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
fault_around_bytes = val;
if (val > PAGE_SIZE)
fault_around_bytes = rounddown_pow_of_two(val);
else
fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
......
......@@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
*bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
if (bdi_bg_thresh)
*bdi_bg_thresh = div_u64((u64)*bdi_thresh *
background_thresh,
dirty_thresh);
*bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
background_thresh,
dirty_thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
......
......@@ -2447,7 +2447,7 @@ static inline int
gfp_to_alloc_flags(gfp_t gfp_mask)
{
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
const gfp_t wait = gfp_mask & __GFP_WAIT;
const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
......@@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
* set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
* set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
*/
alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
if (!wait) {
if (atomic) {
/*
* Not worth trying to allocate harder for
* __GFP_NOMEMALLOC even if it can't schedule.
* Not worth trying to allocate harder for __GFP_NOMEMALLOC even
* if it can't schedule.
*/
if (!(gfp_mask & __GFP_NOMEMALLOC))
if (!(gfp_mask & __GFP_NOMEMALLOC))
alloc_flags |= ALLOC_HARDER;
/*
* Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
* Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
* comment for __cpuset_node_allowed_softwall().
*/
alloc_flags &= ~ALLOC_CPUSET;
} else if (unlikely(rt_task(current)) && !in_interrupt())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment