Commit 50a53bbe authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (Fixes from Andrew)

Merge misc fixes from Andrew Morton:
 "Seven fixes, some of them fingers-crossed :("

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (7 patches)
  drivers/rtc/rtc-tps65910.c: fix invalid pointer access on _remove()
  mm: soft offline: split thp at the beginning of soft_offline_page()
  mm: avoid waking kswapd for THP allocations when compaction is deferred or contended
  revert "Revert "mm: remove __GFP_NO_KSWAPD""
  mm: vmscan: fix endless loop in kswapd balancing
  mm/vmemmap: fix wrong use of virt_to_page
  mm: compaction: fix return value of capture_free_page()
parents 73efd00d 1430e178
...@@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev); ...@@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
* until the request succeeds or until the allocation size falls below * until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely * the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we * impact system performance, so when allocating more than one page, we
* ask the memory allocator to avoid re-trying, swapping, writing back * ask the memory allocator to avoid re-trying.
* or performing I/O.
* *
* Note, this function also makes sure that the allocated buffer is aligned to * Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
...@@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev); ...@@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
*/ */
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{ {
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
__GFP_NORETRY | __GFP_NO_KSWAPD;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf; void *kbuf;
......
...@@ -288,11 +288,11 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev) ...@@ -288,11 +288,11 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
static int __devexit tps65910_rtc_remove(struct platform_device *pdev) static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
{ {
/* leave rtc running, but disable irqs */ /* leave rtc running, but disable irqs */
struct rtc_device *rtc = platform_get_drvdata(pdev); struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
tps65910_rtc_alarm_irq_enable(&rtc->dev, 0); tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
rtc_device_unregister(rtc); rtc_device_unregister(tps_rtc->rtc);
return 0; return 0;
} }
......
...@@ -30,10 +30,9 @@ struct vm_area_struct; ...@@ -30,10 +30,9 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u #define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u #define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u #define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x200000u #define ___GFP_NOTRACK 0x100000u
#define ___GFP_NO_KSWAPD 0x400000u #define ___GFP_OTHER_NODE 0x200000u
#define ___GFP_OTHER_NODE 0x800000u #define ___GFP_WRITE 0x400000u
#define ___GFP_WRITE 0x1000000u
/* /*
* GFP bitmasks.. * GFP bitmasks..
...@@ -86,7 +85,6 @@ struct vm_area_struct; ...@@ -86,7 +85,6 @@ struct vm_area_struct;
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
...@@ -96,7 +94,7 @@ struct vm_area_struct; ...@@ -96,7 +94,7 @@ struct vm_area_struct;
*/ */
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ #define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */ /* This equals 0, but use constants in case they ever change */
...@@ -116,8 +114,7 @@ struct vm_area_struct; ...@@ -116,8 +114,7 @@ struct vm_area_struct;
__GFP_MOVABLE) __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS) #define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
__GFP_NO_KSWAPD)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT" ) : "GFP_NOWAIT"
...@@ -1476,9 +1476,17 @@ int soft_offline_page(struct page *page, int flags) ...@@ -1476,9 +1476,17 @@ int soft_offline_page(struct page *page, int flags)
{ {
int ret; int ret;
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_trans_head(page);
if (PageHuge(page)) if (PageHuge(page))
return soft_offline_huge_page(page, flags); return soft_offline_huge_page(page, flags);
if (PageTransHuge(hpage)) {
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
pr_info("soft offline: %#lx: failed to split THP\n",
pfn);
return -EBUSY;
}
}
ret = get_any_page(page, pfn, flags); ret = get_any_page(page, pfn, flags);
if (ret < 0) if (ret < 0)
......
...@@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) ...@@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
} }
} }
return 1UL << order; return 1UL << alloc_order;
} }
/* /*
...@@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) ...@@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
} }
/* Returns true if the allocation is likely for THP */
static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
{
if (order == pageblock_order &&
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
return true;
return false;
}
static inline struct page * static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx, struct zonelist *zonelist, enum zone_type high_zoneidx,
...@@ -2416,7 +2425,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2416,7 +2425,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto nopage; goto nopage;
restart: restart:
if (!(gfp_mask & __GFP_NO_KSWAPD)) /* The decision whether to wake kswapd for THP is made later */
if (!is_thp_alloc(gfp_mask, order))
wake_all_kswapd(order, zonelist, high_zoneidx, wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone)); zone_idx(preferred_zone));
...@@ -2488,16 +2498,22 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2488,16 +2498,22 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto got_pg; goto got_pg;
sync_migration = true; sync_migration = true;
if (is_thp_alloc(gfp_mask, order)) {
/* /*
* If compaction is deferred for high-order allocations, it is because * If compaction is deferred for high-order allocations, it is
* sync compaction recently failed. In this is the case and the caller * because sync compaction recently failed. If this is the case
* requested a movable allocation that does not heavily disrupt the * and the caller requested a movable allocation that does not
* system then fail the allocation instead of entering direct reclaim. * heavily disrupt the system then fail the allocation instead
* of entering direct reclaim.
*/ */
if ((deferred_compaction || contended_compaction) && if (deferred_compaction || contended_compaction)
(gfp_mask & __GFP_NO_KSWAPD))
goto nopage; goto nopage;
/* If process is willing to reclaim/compact then wake kswapd */
wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
}
/* Try direct reclaim and then allocating */ /* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order, page = __alloc_pages_direct_reclaim(gfp_mask, order,
zonelist, high_zoneidx, zonelist, high_zoneidx,
......
...@@ -617,7 +617,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) ...@@ -617,7 +617,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{ {
return; /* XXX: Not implemented yet */ return; /* XXX: Not implemented yet */
} }
static void free_map_bootmem(struct page *page, unsigned long nr_pages) static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
{ {
} }
#else #else
...@@ -658,10 +658,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) ...@@ -658,10 +658,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
get_order(sizeof(struct page) * nr_pages)); get_order(sizeof(struct page) * nr_pages));
} }
static void free_map_bootmem(struct page *page, unsigned long nr_pages) static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
{ {
unsigned long maps_section_nr, removing_section_nr, i; unsigned long maps_section_nr, removing_section_nr, i;
unsigned long magic; unsigned long magic;
struct page *page = virt_to_page(memmap);
for (i = 0; i < nr_pages; i++, page++) { for (i = 0; i < nr_pages; i++, page++) {
magic = (unsigned long) page->lru.next; magic = (unsigned long) page->lru.next;
...@@ -710,13 +711,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) ...@@ -710,13 +711,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
*/ */
if (memmap) { if (memmap) {
struct page *memmap_page;
memmap_page = virt_to_page(memmap);
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
>> PAGE_SHIFT; >> PAGE_SHIFT;
free_map_bootmem(memmap_page, nr_pages); free_map_bootmem(memmap, nr_pages);
} }
} }
......
...@@ -2414,6 +2414,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc) ...@@ -2414,6 +2414,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
} while (memcg); } while (memcg);
} }
static bool zone_balanced(struct zone *zone, int order,
unsigned long balance_gap, int classzone_idx)
{
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
balance_gap, classzone_idx, 0))
return false;
if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
return false;
return true;
}
/* /*
* pgdat_balanced is used when checking if a node is balanced for high-order * pgdat_balanced is used when checking if a node is balanced for high-order
* allocations. Only zones that meet watermarks and are in a zone allowed * allocations. Only zones that meet watermarks and are in a zone allowed
...@@ -2492,8 +2505,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, ...@@ -2492,8 +2505,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
continue; continue;
} }
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), if (!zone_balanced(zone, order, 0, i))
i, 0))
all_zones_ok = false; all_zones_ok = false;
else else
balanced += zone->present_pages; balanced += zone->present_pages;
...@@ -2602,8 +2614,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2602,8 +2614,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
break; break;
} }
if (!zone_watermark_ok_safe(zone, order, if (!zone_balanced(zone, order, 0, 0)) {
high_wmark_pages(zone), 0, 0)) {
end_zone = i; end_zone = i;
break; break;
} else { } else {
...@@ -2679,9 +2690,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2679,9 +2690,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
testorder = 0; testorder = 0;
if ((buffer_heads_over_limit && is_highmem_idx(i)) || if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
!zone_watermark_ok_safe(zone, testorder, !zone_balanced(zone, testorder,
high_wmark_pages(zone) + balance_gap, balance_gap, end_zone)) {
end_zone, 0)) {
shrink_zone(zone, &sc); shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0; reclaim_state->reclaimed_slab = 0;
...@@ -2708,8 +2718,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2708,8 +2718,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
continue; continue;
} }
if (!zone_watermark_ok_safe(zone, testorder, if (!zone_balanced(zone, testorder, 0, end_zone)) {
high_wmark_pages(zone), end_zone, 0)) {
all_zones_ok = 0; all_zones_ok = 0;
/* /*
* We are still under min water mark. This * We are still under min water mark. This
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment