Commit e81f2d22 authored by Zhang Zhen's avatar Zhang Zhen Committed by Linus Torvalds

mm/hugetlb: reduce arch dependent code about huge_pmd_unshare

Currently we have many duplicates in definitions of huge_pmd_unshare.  In
all architectures this function just returns 0 when
CONFIG_ARCH_WANT_HUGE_PMD_SHARE is N.

This patch puts the default implementation in mm/hugetlb.c and lets these
architectures use the common code.
Signed-off-by: default avatarZhang Zhen <zhenzhang.zhang@huawei.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: David Rientjes <rientjes@google.com>
Cc: James Yang <James.Yang@freescale.com>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 36f88188
...@@ -41,11 +41,6 @@ int pud_huge(pud_t pud) ...@@ -41,11 +41,6 @@ int pud_huge(pud_t pud)
return 0; return 0;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
......
...@@ -31,13 +31,6 @@ ...@@ -31,13 +31,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#endif
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return !(pmd_val(pmd) & PMD_TABLE_BIT); return !(pmd_val(pmd) & PMD_TABLE_BIT);
......
...@@ -65,11 +65,6 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) ...@@ -65,11 +65,6 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
return pte; return pte;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
/* /*
......
...@@ -89,11 +89,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -89,11 +89,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte; return pte;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return pmd_page_shift(pmd) > PAGE_SHIFT; return pmd_page_shift(pmd) > PAGE_SHIFT;
......
...@@ -51,11 +51,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -51,11 +51,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd; return (pte_t *) pmd;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
/* /*
* This function checks for proper alignment of input addr and len parameters. * This function checks for proper alignment of input addr and len parameters.
*/ */
......
...@@ -439,11 +439,6 @@ int alloc_bootmem_huge_page(struct hstate *hstate) ...@@ -439,11 +439,6 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
} }
#endif #endif
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
#define HUGEPD_FREELIST_SIZE \ #define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
......
...@@ -193,11 +193,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -193,11 +193,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmdp; return (pte_t *) pmdp;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
if (!MACHINE_HAS_HPAGE) if (!MACHINE_HAS_HPAGE)
......
...@@ -62,11 +62,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -62,11 +62,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte; return pte;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
int pmd_huge(pmd_t pmd) int pmd_huge(pmd_t pmd)
{ {
return 0; return 0;
......
...@@ -172,11 +172,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -172,11 +172,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte; return pte;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
......
...@@ -160,11 +160,6 @@ int pud_huge(pud_t pud) ...@@ -160,11 +160,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE); return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len, unsigned long addr, unsigned long len,
......
...@@ -3789,6 +3789,11 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) ...@@ -3789,6 +3789,11 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{ {
return NULL; return NULL;
} }
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#define want_pmd_share() (0) #define want_pmd_share() (0)
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment