Commit 0921af6c authored by Bibo Mao's avatar Bibo Mao Committed by Huacai Chen

LoongArch: Use static defined zero page rather than allocated

On LoongArch system, there is only one page needed for zero page (no
cache synonyms), and there is no COLOR_ZERO_PAGE, so zero_page_mask is
useless and the macro __HAVE_COLOR_ZERO_PAGE is not necessary.

Like other popular architectures, It is simpler to define the zero page
in kernel BSS code segment rather than dynamically allocate.
Signed-off-by: default avatarBibo Mao <maobibo@loongson.cn>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent 2bb20d29
...@@ -13,6 +13,4 @@ extern struct pglist_data *node_data[]; ...@@ -13,6 +13,4 @@ extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[(nid)]) #define NODE_DATA(nid) (node_data[(nid)])
extern void setup_zero_pages(void);
#endif /* _ASM_MMZONE_H_ */ #endif /* _ASM_MMZONE_H_ */
...@@ -70,12 +70,9 @@ struct vm_area_struct; ...@@ -70,12 +70,9 @@ struct vm_area_struct;
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
*/ */
extern unsigned long empty_zero_page; extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \ #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
#define __HAVE_COLOR_ZERO_PAGE
/* /*
* TLB refill handlers may also map the vmalloc area into xkvrange. * TLB refill handlers may also map the vmalloc area into xkvrange.
......
...@@ -438,7 +438,6 @@ void __init mem_init(void) ...@@ -438,7 +438,6 @@ void __init mem_init(void)
{ {
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all(); memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
} }
int pcibus_to_node(struct pci_bus *bus) int pcibus_to_node(struct pci_bus *bus)
......
...@@ -35,33 +35,8 @@ ...@@ -35,33 +35,8 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlb.h> #include <asm/tlb.h>
/* unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
* We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
*/
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(zero_page_mask);
void setup_zero_pages(void)
{
unsigned int order, i;
struct page *page;
order = 0;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
for (i = 0; i < (1 << order); i++, page++)
mark_page_reserved(page);
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
void copy_user_highpage(struct page *to, struct page *from, void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma) unsigned long vaddr, struct vm_area_struct *vma)
...@@ -106,7 +81,6 @@ void __init mem_init(void) ...@@ -106,7 +81,6 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all(); memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
} }
#endif /* !CONFIG_NUMA */ #endif /* !CONFIG_NUMA */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment