Commit f46edbd1 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds

mm/sparsemem: add helpers track active portions of a section at boot

Prepare for hot{plug,remove} of sub-ranges of a section by tracking a
sub-section active bitmask, each bit representing a PMD_SIZE span of the
architecture's memory hotplug section size.

The implications of a partially populated section is that pfn_valid()
needs to go beyond a valid_section() check and either determine that the
section is an "early section", or read the sub-section active ranges
from the bitmask.  The expectation is that the bitmask (subsection_map)
fits in the same cacheline as the valid_section() / early_section()
data, so the incremental performance overhead to pfn_valid() should be
negligible.

The rationale for using early_section() to short-ciruit the
subsection_map check is that there are legacy code paths that use
pfn_valid() at section granularity before validating the pfn against
pgdat data.  So, the early_section() check allows those traditional
assumptions to persist while also permitting subsection_map to tell the
truth for purposes of populating the unused portions of early sections
with PMEM and other ZONE_DEVICE mappings.

Link: http://lkml.kernel.org/r/156092350874.979959.18185938451405518285.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Reported-by: default avatarQian Cai <cai@lca.pw>
Tested-by: default avatarJane Chu <jane.chu@oracle.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>	[ppc64]
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 326e1b8f
...@@ -1178,6 +1178,8 @@ struct mem_section_usage { ...@@ -1178,6 +1178,8 @@ struct mem_section_usage {
unsigned long pageblock_flags[0]; unsigned long pageblock_flags[0];
}; };
void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
struct page; struct page;
struct page_ext; struct page_ext;
struct mem_section { struct mem_section {
...@@ -1321,12 +1323,40 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn) ...@@ -1321,12 +1323,40 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
extern unsigned long __highest_present_section_nr; extern unsigned long __highest_present_section_nr;
static inline int subsection_map_index(unsigned long pfn)
{
return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
return test_bit(idx, ms->usage->subsection_map);
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
return 1;
}
#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID #ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn) static inline int pfn_valid(unsigned long pfn)
{ {
struct mem_section *ms;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0; return 0;
return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); ms = __nr_to_section(pfn_to_section_nr(pfn));
if (!valid_section(ms))
return 0;
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
return early_section(ms) || pfn_section_valid(ms, pfn);
} }
#endif #endif
...@@ -1358,6 +1388,7 @@ void sparse_init(void); ...@@ -1358,6 +1388,7 @@ void sparse_init(void);
#define sparse_init() do {} while (0) #define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0)
#define pfn_present pfn_valid #define pfn_present pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
/* /*
......
...@@ -7351,12 +7351,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) ...@@ -7351,12 +7351,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
(u64)zone_movable_pfn[i] << PAGE_SHIFT); (u64)zone_movable_pfn[i] << PAGE_SHIFT);
} }
/* Print out the early node map */ /*
* Print out the early node map, and initialize the
* subsection-map relative to active online memory ranges to
* enable future "sub-section" extensions of the memory map.
*/
pr_info("Early memory node ranges\n"); pr_info("Early memory node ranges\n");
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
(u64)start_pfn << PAGE_SHIFT, (u64)start_pfn << PAGE_SHIFT,
((u64)end_pfn << PAGE_SHIFT) - 1); ((u64)end_pfn << PAGE_SHIFT) - 1);
subsection_map_init(start_pfn, end_pfn - start_pfn);
}
/* Initialise every node */ /* Initialise every node */
mminit_verify_pageflags_layout(); mminit_verify_pageflags_layout();
......
...@@ -210,6 +210,41 @@ static inline unsigned long first_present_section_nr(void) ...@@ -210,6 +210,41 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1); return next_present_section_nr(-1);
} }
void subsection_mask_set(unsigned long *map, unsigned long pfn,
unsigned long nr_pages)
{
int idx = subsection_map_index(pfn);
int end = subsection_map_index(pfn + nr_pages - 1);
bitmap_set(map, idx, end - idx + 1);
}
void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
{
int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
int i, start_sec = pfn_to_section_nr(pfn);
if (!nr_pages)
return;
for (i = start_sec; i <= end_sec; i++) {
struct mem_section *ms;
unsigned long pfns;
pfns = min(nr_pages, PAGES_PER_SECTION
- (pfn & ~PAGE_SECTION_MASK));
ms = __nr_to_section(i);
subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
pfns, subsection_map_index(pfn),
subsection_map_index(pfn + pfns - 1));
pfn += pfns;
nr_pages -= pfns;
}
}
/* Record a memory area against a node. */ /* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end) void __init memory_present(int nid, unsigned long start, unsigned long end)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment