Commit ce0c0e50 authored by Andi Kleen's avatar Andi Kleen Committed by Ingo Molnar

x86, generic: CPA add statistics about state of direct mapping v4

Add information about the mapping state of the direct mapping to
/proc/meminfo. I chose /proc/meminfo because that is where all the other
memory statistics are too and it is a generally useful metric even
outside debugging situations. A lot of split kernel pages means the
kernel will run slower.

This way we can see how many large pages are really used for it and how
many are split.

Useful for general insight into the kernel.

v2: Add hotplug locking to 64bit to plug a very obscure theoretical race.
    32bit doesn't need it because it doesn't support hotadd for lowmem.
    Fix some typos
v3: Rename dpages_cnt
    Add CONFIG ifdef for count update as requested by tglx
    Expand description
v4: Fix stupid bugs added in v3
    Move update_page_count to pageattr.c
Signed-off-by: default avatarAndi Kleen <andi@firstfloor.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1b40a895
...@@ -162,6 +162,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -162,6 +162,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
unsigned pages_2m = 0, pages_4k = 0;
pgd_idx = pgd_index(PAGE_OFFSET); pgd_idx = pgd_index(PAGE_OFFSET);
pgd = pgd_base + pgd_idx; pgd = pgd_base + pgd_idx;
...@@ -197,6 +198,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -197,6 +198,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
is_kernel_text(addr2)) is_kernel_text(addr2))
prot = PAGE_KERNEL_LARGE_EXEC; prot = PAGE_KERNEL_LARGE_EXEC;
pages_2m++;
set_pmd(pmd, pfn_pmd(pfn, prot)); set_pmd(pmd, pfn_pmd(pfn, prot));
pfn += PTRS_PER_PTE; pfn += PTRS_PER_PTE;
...@@ -213,11 +215,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -213,11 +215,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
if (is_kernel_text(addr)) if (is_kernel_text(addr))
prot = PAGE_KERNEL_EXEC; prot = PAGE_KERNEL_EXEC;
pages_4k++;
set_pte(pte, pfn_pte(pfn, prot)); set_pte(pte, pfn_pte(pfn, prot));
} }
max_pfn_mapped = pfn; max_pfn_mapped = pfn;
} }
} }
update_page_count(PG_LEVEL_2M, pages_2m);
update_page_count(PG_LEVEL_4K, pages_4k);
} }
static inline int page_kills_ppro(unsigned long pagenr) static inline int page_kills_ppro(unsigned long pagenr)
......
...@@ -312,6 +312,8 @@ __meminit void early_iounmap(void *addr, unsigned long size) ...@@ -312,6 +312,8 @@ __meminit void early_iounmap(void *addr, unsigned long size)
static unsigned long __meminit static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
{ {
unsigned long pages = 0;
int i = pmd_index(address); int i = pmd_index(address);
for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
...@@ -328,9 +330,11 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) ...@@ -328,9 +330,11 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
if (pmd_val(*pmd)) if (pmd_val(*pmd))
continue; continue;
pages++;
set_pte((pte_t *)pmd, set_pte((pte_t *)pmd,
pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
} }
update_page_count(PG_LEVEL_2M, pages);
return address; return address;
} }
...@@ -350,6 +354,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) ...@@ -350,6 +354,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
static unsigned long __meminit static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
{ {
unsigned long pages = 0;
unsigned long last_map_addr = end; unsigned long last_map_addr = end;
int i = pud_index(addr); int i = pud_index(addr);
...@@ -374,6 +379,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) ...@@ -374,6 +379,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
} }
if (direct_gbpages) { if (direct_gbpages) {
pages++;
set_pte((pte_t *)pud, set_pte((pte_t *)pud,
pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
last_map_addr = (addr & PUD_MASK) + PUD_SIZE; last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
...@@ -390,6 +396,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) ...@@ -390,6 +396,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
unmap_low_page(pmd); unmap_low_page(pmd);
} }
__flush_tlb_all(); __flush_tlb_all();
update_page_count(PG_LEVEL_1G, pages);
return last_map_addr >> PAGE_SHIFT; return last_map_addr >> PAGE_SHIFT;
} }
......
...@@ -34,6 +34,19 @@ struct cpa_data { ...@@ -34,6 +34,19 @@ struct cpa_data {
unsigned force_split : 1; unsigned force_split : 1;
}; };
static unsigned long direct_pages_count[PG_LEVEL_NUM];
void __meminit update_page_count(int level, unsigned long pages)
{
#ifdef CONFIG_PROC_FS
unsigned long flags;
/* Protect against CPA */
spin_lock_irqsave(&pgd_lock, flags);
direct_pages_count[level] += pages;
spin_unlock_irqrestore(&pgd_lock, flags);
#endif
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static inline unsigned long highmap_start_pfn(void) static inline unsigned long highmap_start_pfn(void)
...@@ -500,6 +513,12 @@ static int split_large_page(pte_t *kpte, unsigned long address) ...@@ -500,6 +513,12 @@ static int split_large_page(pte_t *kpte, unsigned long address)
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
if (address >= (unsigned long)__va(0) &&
address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT)) {
direct_pages_count[level]--;
direct_pages_count[level - 1] += PTRS_PER_PTE;
}
/* /*
* Install the new, split up pagetable. Important details here: * Install the new, split up pagetable. Important details here:
* *
...@@ -1029,6 +1048,22 @@ bool kernel_page_present(struct page *page) ...@@ -1029,6 +1048,22 @@ bool kernel_page_present(struct page *page)
#endif /* CONFIG_DEBUG_PAGEALLOC */ #endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef CONFIG_PROC_FS
int arch_report_meminfo(char *page)
{
int n;
n = sprintf(page, "DirectMap4k: %8lu\n"
"DirectMap2M: %8lu\n",
direct_pages_count[PG_LEVEL_4K],
direct_pages_count[PG_LEVEL_2M]);
#ifdef CONFIG_X86_64
n += sprintf(page + n, "DirectMap1G: %8lu\n",
direct_pages_count[PG_LEVEL_1G]);
#endif
return n;
}
#endif
/* /*
* The testcases use internal knowledge of the implementation that shouldn't * The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here. * be exposed to the rest of the kernel. Include these directly here.
......
...@@ -123,6 +123,11 @@ static int uptime_read_proc(char *page, char **start, off_t off, ...@@ -123,6 +123,11 @@ static int uptime_read_proc(char *page, char **start, off_t off,
return proc_calc_metrics(page, start, off, count, eof, len); return proc_calc_metrics(page, start, off, count, eof, len);
} }
int __attribute__((weak)) arch_report_meminfo(char *page)
{
return 0;
}
static int meminfo_read_proc(char *page, char **start, off_t off, static int meminfo_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data) int count, int *eof, void *data)
{ {
...@@ -221,6 +226,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -221,6 +226,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
len += hugetlb_report_meminfo(page + len); len += hugetlb_report_meminfo(page + len);
len += arch_report_meminfo(page + len);
return proc_calc_metrics(page, start, off, count, eof, len); return proc_calc_metrics(page, start, off, count, eof, len);
#undef K #undef K
} }
......
...@@ -369,8 +369,11 @@ enum { ...@@ -369,8 +369,11 @@ enum {
PG_LEVEL_4K, PG_LEVEL_4K,
PG_LEVEL_2M, PG_LEVEL_2M,
PG_LEVEL_1G, PG_LEVEL_1G,
PG_LEVEL_NUM
}; };
void update_page_count(int level, unsigned long pages);
/* /*
* Helper function that returns the kernel pagetable entry controlling * Helper function that returns the kernel pagetable entry controlling
* the virtual address 'address'. NULL means no pagetable entry present. * the virtual address 'address'. NULL means no pagetable entry present.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment