Commit d016bf7e authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: make FIRST_USER_ADDRESS unsigned long on all archs

LKP has triggered a compiler warning after my recent patch "mm: account
pmd page tables to the process":

    mm/mmap.c: In function 'exit_mmap':
 >> mm/mmap.c:2857:2: warning: right shift count >= width of type [enabled by default]

The code:

 > 2857                WARN_ON(mm_nr_pmds(mm) >
   2858                                round_up(FIRST_USER_ADDRESS, PUD_SIZE) >> PUD_SHIFT);

In this, on tile, we have FIRST_USER_ADDRESS defined as 0.  round_up() has
the same type -- int.  PUD_SHIFT.

I think the best way to fix it is to define FIRST_USER_ADDRESS as unsigned
long.  On every arch for consistency.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reported-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3ae3ad4e
......@@ -45,7 +45,7 @@ struct vm_area_struct;
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
......
......@@ -211,7 +211,7 @@
* No special requirements for lowest virtual address we permit any user space
* mapping to be mapped at.
*/
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/****************************************************************
......
......@@ -85,7 +85,7 @@ extern unsigned int kobjsize(const void *objp);
#define VMALLOC_START 0UL
#define VMALLOC_END 0xffffffffUL
#define FIRST_USER_ADDRESS (0)
#define FIRST_USER_ADDRESS 0UL
#include <asm-generic/pgtable.h>
......
......@@ -45,7 +45,7 @@
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
extern void __pte_error(const char *file, int line, unsigned long val);
......
......@@ -30,7 +30,7 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
......
......@@ -67,7 +67,7 @@ extern void paging_init(void);
*/
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/* zero page used for uninitialized stuff */
#ifndef __ASSEMBLY__
......
......@@ -140,7 +140,7 @@ extern unsigned long empty_zero_page;
#define PTRS_PER_PTE 4096
#define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
......
......@@ -171,7 +171,7 @@ extern unsigned long _dflt_cache_att;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
/* Seems to be zero even in architectures where the zero page is firewalled? */
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define pte_special(pte) 0
#define pte_mkspecial(pte) (pte)
......
......@@ -127,7 +127,7 @@
#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/*
* All the normal masks have the "page accessed" bits on, as any time
......
......@@ -53,7 +53,7 @@ extern unsigned long empty_zero_page[1024];
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
/* Just any arbitrary offset to the start of the vmalloc VM area: the
......
......@@ -66,7 +66,7 @@
#define PTRS_PER_PGD 128
#endif
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/* Virtual address region for use by kernel_map() */
#ifdef CONFIG_SUN3
......
......@@ -72,7 +72,7 @@ extern int mem_init_done;
#include <asm/mmu.h>
#include <asm/page.h>
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
......
......@@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START MAP_BASE
......
......@@ -65,7 +65,7 @@ extern void paging_init(void);
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
......
......@@ -24,7 +24,7 @@
#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
......
......@@ -77,7 +77,7 @@ extern void paging_init(void);
*/
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/*
* Kernels own virtual memory area.
......
......@@ -134,7 +134,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
* pgd entries used up by user/kernel:
*/
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/* NB: The tlb miss handlers make certain assumptions about the order */
/* of the following bits, so be careful (One example, bits 25-31 */
......
......@@ -45,7 +45,7 @@ extern int icache_44x_need_flush;
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
......
......@@ -12,7 +12,7 @@
#endif
#include <asm/barrier.h>
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
/*
* Size of EA range mapped by our pagetables.
......
......@@ -99,7 +99,7 @@ extern unsigned long zero_page_mask;
#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
......
......@@ -27,7 +27,7 @@ extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START (0xc0000000UL)
......
......@@ -62,7 +62,7 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
......
......@@ -44,7 +44,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4)
#define PAGE_NONE SRMMU_PAGE_NONE
......
......@@ -93,7 +93,7 @@ bool kern_addr_valid(unsigned long addr);
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
/* Kernel has a separate 44bit address space. */
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
......
......@@ -67,7 +67,7 @@ extern void pgtable_cache_init(void);
extern void paging_init(void);
extern void set_page_homes(void);
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define _PAGE_PRESENT HV_PTE_PRESENT
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
......
......@@ -23,7 +23,7 @@
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define PTRS_PER_PGD 1024
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
......
......@@ -41,7 +41,7 @@
#endif
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
......
......@@ -4,7 +4,7 @@
#include <linux/const.h>
#include <asm/page_types.h>
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
......
......@@ -57,7 +57,7 @@
#define PTRS_PER_PGD 1024
#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment