Commit 1335183b authored by David Mosberger's avatar David Mosberger Committed by Tony Luck

[IA64] sparse "long" constant cleanup patch

Sparse wants us to be clear about (unsigned) long constants.
Make it so.
Signed-off-by: default avatarDavid Mosberger-Tang <davidm@hpl.hp.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent c8cc6377
...@@ -344,7 +344,7 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -344,7 +344,7 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
__get_user(lo, (unsigned int *)&save->cw); __get_user(lo, (unsigned int *)&save->cw);
num64 = mxcsr & 0xff10; num64 = mxcsr & 0xff10;
num64 = (num64 << 32) | (lo & 0x1f3f); num64 = (num64 << 32) | (lo & 0x1f3f);
fcr = (fcr & (~0xff1000001f3f)) | num64; fcr = (fcr & (~0xff1000001f3fUL)) | num64;
/* setting bits 0..31 with sw and tag and 32..37 from mxcsr */ /* setting bits 0..31 with sw and tag and 32..37 from mxcsr */
__get_user(lo, (unsigned int *)&save->sw); __get_user(lo, (unsigned int *)&save->sw);
...@@ -355,21 +355,21 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save) ...@@ -355,21 +355,21 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
num64 = mxcsr & 0x3f; num64 = mxcsr & 0x3f;
num64 = (num64 << 16) | (hi & 0xffff); num64 = (num64 << 16) | (hi & 0xffff);
num64 = (num64 << 16) | (lo & 0xffff); num64 = (num64 << 16) | (lo & 0xffff);
fsr = (fsr & (~0x3fffffffff)) | num64; fsr = (fsr & (~0x3fffffffffUL)) | num64;
/* setting bits 0..47 with cssel and ipoff */ /* setting bits 0..47 with cssel and ipoff */
__get_user(lo, (unsigned int *)&save->ipoff); __get_user(lo, (unsigned int *)&save->ipoff);
__get_user(hi, (unsigned int *)&save->cssel); __get_user(hi, (unsigned int *)&save->cssel);
num64 = hi & 0xffff; num64 = hi & 0xffff;
num64 = (num64 << 32) | lo; num64 = (num64 << 32) | lo;
fir = (fir & (~0xffffffffffff)) | num64; fir = (fir & (~0xffffffffffffUL)) | num64;
/* setting bits 0..47 with datasel and dataoff */ /* setting bits 0..47 with datasel and dataoff */
__get_user(lo, (unsigned int *)&save->dataoff); __get_user(lo, (unsigned int *)&save->dataoff);
__get_user(hi, (unsigned int *)&save->datasel); __get_user(hi, (unsigned int *)&save->datasel);
num64 = hi & 0xffff; num64 = hi & 0xffff;
num64 = (num64 << 32) | lo; num64 = (num64 << 32) | lo;
fdr = (fdr & (~0xffffffffffff)) | num64; fdr = (fdr & (~0xffffffffffffUL)) | num64;
ia64_setreg(_IA64_REG_AR_FSR, fsr); ia64_setreg(_IA64_REG_AR_FSR, fsr);
ia64_setreg(_IA64_REG_AR_FCR, fcr); ia64_setreg(_IA64_REG_AR_FCR, fcr);
......
...@@ -439,7 +439,7 @@ void ia64_elf32_init(struct pt_regs *regs); ...@@ -439,7 +439,7 @@ void ia64_elf32_init(struct pt_regs *regs);
| ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \ | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \
| ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G)) | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G))
#define IA32_IOBASE 0x2000000000000000 /* Virtual address for I/O space */ #define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */
#define IA32_CR0 0x80000001 /* Enable PG and PE bits */ #define IA32_CR0 0x80000001 /* Enable PG and PE bits */
#define IA32_CR4 0x600 /* MMXEX and FXSR on */ #define IA32_CR4 0x600 /* MMXEX and FXSR on */
......
...@@ -1763,9 +1763,9 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *sa ...@@ -1763,9 +1763,9 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *sa
__get_user(mxcsr, (unsigned int *)&save->mxcsr); __get_user(mxcsr, (unsigned int *)&save->mxcsr);
num64 = mxcsr & 0xff10; num64 = mxcsr & 0xff10;
tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000)) | (num64<<32); tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
num64 = mxcsr & 0x3f; num64 = mxcsr & 0x3f;
tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000)) | (num64<<32); tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32)); copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
......
...@@ -195,10 +195,10 @@ apply_imm22 (struct module *mod, struct insn *insn, uint64_t val) ...@@ -195,10 +195,10 @@ apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val); printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
return 0; return 0;
} }
ia64_patch((u64) insn, 0x01fffcfe000, ( ((val & 0x200000) << 15) /* bit 21 -> 36 */ ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
| ((val & 0x1f0000) << 6) /* bit 16 -> 22 */ | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
| ((val & 0x00ff80) << 20) /* bit 7 -> 27 */ | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
| ((val & 0x00007f) << 13) /* bit 0 -> 13 */)); | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
return 1; return 1;
} }
...@@ -209,8 +209,8 @@ apply_imm21b (struct module *mod, struct insn *insn, uint64_t val) ...@@ -209,8 +209,8 @@ apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val); printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
return 0; return 0;
} }
ia64_patch((u64) insn, 0x11ffffe000, ( ((val & 0x100000) << 16) /* bit 20 -> 36 */ ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
| ((val & 0x0fffff) << 13) /* bit 0 -> 13 */)); | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
return 1; return 1;
} }
...@@ -253,9 +253,9 @@ plt_target (struct plt_entry *plt) ...@@ -253,9 +253,9 @@ plt_target (struct plt_entry *plt)
long off; long off;
b0 = b[0]; b1 = b[1]; b0 = b[0]; b1 = b[1];
off = ( ((b1 & 0x00fffff000000000) >> 36) /* imm20b -> bit 0 */ off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
| ((b0 >> 48) << 20) | ((b1 & 0x7fffff) << 36) /* imm39 -> bit 20 */ | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
| ((b1 & 0x0800000000000000) << 0)); /* i -> bit 59 */ | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
return (long) plt->bundle[1] + 16*off; return (long) plt->bundle[1] + 16*off;
} }
...@@ -739,7 +739,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, ...@@ -739,7 +739,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
if (gp_addressable(mod, val)) { if (gp_addressable(mod, val)) {
/* turn "ld8" into "mov": */ /* turn "ld8" into "mov": */
DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location); DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location);
ia64_patch((u64) location, 0x1fff80fe000, 0x10000000000); ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
} }
return 0; return 0;
......
...@@ -65,21 +65,21 @@ void ...@@ -65,21 +65,21 @@ void
ia64_patch_imm64 (u64 insn_addr, u64 val) ia64_patch_imm64 (u64 insn_addr, u64 val)
{ {
ia64_patch(insn_addr, ia64_patch(insn_addr,
0x01fffefe000, ( ((val & 0x8000000000000000) >> 27) /* bit 63 -> 36 */ 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
| ((val & 0x0000000000200000) << 0) /* bit 21 -> 21 */ | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */
| ((val & 0x00000000001f0000) << 6) /* bit 16 -> 22 */ | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */
| ((val & 0x000000000000ff80) << 20) /* bit 7 -> 27 */ | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */
| ((val & 0x000000000000007f) << 13) /* bit 0 -> 13 */)); | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */));
ia64_patch(insn_addr - 1, 0x1ffffffffff, val >> 22); ia64_patch(insn_addr - 1, 0x1ffffffffffUL, val >> 22);
} }
void void
ia64_patch_imm60 (u64 insn_addr, u64 val) ia64_patch_imm60 (u64 insn_addr, u64 val)
{ {
ia64_patch(insn_addr, ia64_patch(insn_addr,
0x011ffffe000, ( ((val & 0x0800000000000000) >> 23) /* bit 59 -> 36 */ 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
| ((val & 0x00000000000fffff) << 13) /* bit 0 -> 13 */)); | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */));
ia64_patch(insn_addr - 1, 0x1fffffffffc, val >> 18); ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18);
} }
/* /*
...@@ -130,10 +130,10 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) ...@@ -130,10 +130,10 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
while (offp < (s32 *) end) { while (offp < (s32 *) end) {
wp = (u64 *) ia64_imva((char *) offp + *offp); wp = (u64 *) ia64_imva((char *) offp + *offp);
wp[0] = 0x0000000100000000; /* nop.m 0; nop.i 0; nop.i 0 */ wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
wp[1] = 0x0004000000000200; wp[1] = 0x0004000000000200UL;
wp[2] = 0x0000000100000011; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
wp[3] = 0x0084006880000200; wp[3] = 0x0084006880000200UL;
ia64_fc(wp); ia64_fc(wp + 2); ia64_fc(wp); ia64_fc(wp + 2);
++offp; ++offp;
} }
......
...@@ -833,7 +833,7 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -833,7 +833,7 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
case PT_CFM: case PT_CFM:
urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
if (write_access) { if (write_access) {
if (((cfm ^ *data) & 0x3fffffffffU) != 0) { if (((cfm ^ *data) & 0x3fffffffffUL) != 0) {
if (ia64_sync_user_rbs(child, sw, if (ia64_sync_user_rbs(child, sw,
pt->ar_bspstore, urbs_end) < 0) pt->ar_bspstore, urbs_end) < 0)
return -1; return -1;
......
...@@ -1205,7 +1205,7 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word ...@@ -1205,7 +1205,7 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word
static inline unw_hash_index_t static inline unw_hash_index_t
hash (unsigned long ip) hash (unsigned long ip)
{ {
# define hashmagic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */ # define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE); return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
#undef hashmagic #undef hashmagic
......
...@@ -176,7 +176,7 @@ ia64_tlb_init (void) ...@@ -176,7 +176,7 @@ ia64_tlb_init (void)
if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;" printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
"defaulting to architected purge page-sizes.\n", status); "defaulting to architected purge page-sizes.\n", status);
purge.mask = 0x115557000; purge.mask = 0x115557000UL;
} }
purge.max_bits = ia64_fls(purge.mask); purge.max_bits = ia64_fls(purge.mask);
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
* the way of the program that it will "exec", and that there is * the way of the program that it will "exec", and that there is
* sufficient room for the brk. * sufficient room for the brk.
*/ */
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000) #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
#define PT_IA_64_UNWIND 0x70000001 #define PT_IA_64_UNWIND 0x70000001
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define __SLOW_DOWN_IO do { } while (0) #define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0) #define SLOW_DOWN_IO do { } while (0)
#define __IA64_UNCACHED_OFFSET 0xc000000000000000 /* region 6 */ #define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
/* /*
* The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
......
...@@ -110,7 +110,7 @@ reload_context (mm_context_t context) ...@@ -110,7 +110,7 @@ reload_context (mm_context_t context)
unsigned long rid_incr = 0; unsigned long rid_incr = 0;
unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
old_rr4 = ia64_get_rr(0x8000000000000000); old_rr4 = ia64_get_rr(0x8000000000000000UL);
rid = context << 3; /* make space for encoding the region number */ rid = context << 3; /* make space for encoding the region number */
rid_incr = 1 << 8; rid_incr = 1 << 8;
...@@ -124,11 +124,11 @@ reload_context (mm_context_t context) ...@@ -124,11 +124,11 @@ reload_context (mm_context_t context)
rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
#endif #endif
ia64_set_rr(0x0000000000000000, rr0); ia64_set_rr(0x0000000000000000UL, rr0);
ia64_set_rr(0x2000000000000000, rr1); ia64_set_rr(0x2000000000000000UL, rr1);
ia64_set_rr(0x4000000000000000, rr2); ia64_set_rr(0x4000000000000000UL, rr2);
ia64_set_rr(0x6000000000000000, rr3); ia64_set_rr(0x6000000000000000UL, rr3);
ia64_set_rr(0x8000000000000000, rr4); ia64_set_rr(0x8000000000000000UL, rr4);
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
} }
......
...@@ -123,7 +123,7 @@ typedef union ia64_va { ...@@ -123,7 +123,7 @@ typedef union ia64_va {
#define REGION_KERNEL 7 #define REGION_KERNEL 7
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
# define htlbpage_to_page(x) ((REGION_NUMBER(x) << 61) \ # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
# define is_hugepage_only_range(addr, len) \ # define is_hugepage_only_range(addr, len) \
...@@ -186,7 +186,7 @@ get_order (unsigned long size) ...@@ -186,7 +186,7 @@ get_order (unsigned long size)
# define __pgprot(x) (x) # define __pgprot(x) (x)
#endif /* !STRICT_MM_TYPECHECKS */ #endif /* !STRICT_MM_TYPECHECKS */
#define PAGE_OFFSET 0xe000000000000000 #define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
......
...@@ -206,18 +206,18 @@ ia64_phys_addr_valid (unsigned long addr) ...@@ -206,18 +206,18 @@ ia64_phys_addr_valid (unsigned long addr)
#define RGN_SIZE (1UL << 61) #define RGN_SIZE (1UL << 61)
#define RGN_KERNEL 7 #define RGN_KERNEL 7
#define VMALLOC_START 0xa000000200000000 #define VMALLOC_START 0xa000000200000000UL
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) # define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END vmalloc_end # define VMALLOC_END vmalloc_end
extern unsigned long vmalloc_end; extern unsigned long vmalloc_end;
#else #else
# define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) # define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
#endif #endif
/* fs/proc/kcore.c */ /* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000) #define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000) #define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
/* /*
* Conversion functions: convert page frame number (pfn) and a protection value to a page * Conversion functions: convert page frame number (pfn) and a protection value to a page
......
...@@ -28,8 +28,8 @@ ...@@ -28,8 +28,8 @@
#define IA64_NUM_PMC_REGS 32 #define IA64_NUM_PMC_REGS 32
#define IA64_NUM_PMD_REGS 32 #define IA64_NUM_PMD_REGS 32
#define DEFAULT_MAP_BASE 0x2000000000000000 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
#define DEFAULT_TASK_SIZE 0xa000000000000000 #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
/* /*
* TASK_SIZE really is a mis-named. It really is the maximum user * TASK_SIZE really is a mis-named. It really is the maximum user
......
...@@ -19,12 +19,12 @@ ...@@ -19,12 +19,12 @@
#include <asm/pal.h> #include <asm/pal.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#define GATE_ADDR (0xa000000000000000) #define GATE_ADDR __IA64_UL_CONST(0xa000000000000000)
/* /*
* 0xa000000000000000+2*PERCPU_PAGE_SIZE * 0xa000000000000000+2*PERCPU_PAGE_SIZE
* - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
*/ */
#define KERNEL_START 0xa000000100000000 #define KERNEL_START __IA64_UL_CONST(0xa000000100000000)
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment