Commit 0dccb048 authored by Chris Metcalf's avatar Chris Metcalf

arch/tile: support newer binutils assembler shift semantics

This change supports building the kernel with newer binutils where
a shift of greater than the word size is no longer interpreted
silently as modulo the word size, but instead generates a warning.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 325d1605
...@@ -16,10 +16,11 @@ ...@@ -16,10 +16,11 @@
#define __ARCH_INTERRUPTS_H__ #define __ARCH_INTERRUPTS_H__
/** Mask for an interrupt. */ /** Mask for an interrupt. */
#ifdef __ASSEMBLER__
/* Note: must handle breaking interrupts into high and low words manually. */ /* Note: must handle breaking interrupts into high and low words manually. */
#define INT_MASK(intno) (1 << (intno)) #define INT_MASK_LO(intno) (1 << (intno))
#else #define INT_MASK_HI(intno) (1 << ((intno) - 32))
#ifndef __ASSEMBLER__
#define INT_MASK(intno) (1ULL << (intno)) #define INT_MASK(intno) (1ULL << (intno))
#endif #endif
...@@ -89,6 +90,7 @@ ...@@ -89,6 +90,7 @@
#define NUM_INTERRUPTS 49 #define NUM_INTERRUPTS 49
#ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \ #define QUEUED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \ INT_MASK(INT_MEM_ERROR) | \
INT_MASK(INT_DMATLB_MISS) | \ INT_MASK(INT_DMATLB_MISS) | \
...@@ -301,4 +303,5 @@ ...@@ -301,4 +303,5 @@
INT_MASK(INT_DOUBLE_FAULT) | \ INT_MASK(INT_DOUBLE_FAULT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \ INT_MASK(INT_AUX_PERF_COUNT) | \
0) 0)
#endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */ #endif /* !__ARCH_INTERRUPTS_H__ */
...@@ -18,12 +18,24 @@ ...@@ -18,12 +18,24 @@
#include <arch/interrupts.h> #include <arch/interrupts.h>
#include <arch/chip.h> #include <arch/chip.h>
#if !defined(__tilegx__) && defined(__ASSEMBLY__)
/* /*
* The set of interrupts we want to allow when interrupts are nominally * The set of interrupts we want to allow when interrupts are nominally
* disabled. The remainder are effectively "NMI" interrupts from * disabled. The remainder are effectively "NMI" interrupts from
* the point of view of the generic Linux code. Note that synchronous * the point of view of the generic Linux code. Note that synchronous
* interrupts (aka "non-queued") are not blocked by the mask in any case. * interrupts (aka "non-queued") are not blocked by the mask in any case.
*/ */
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT)))
#endif
#else
#if CHIP_HAS_AUX_PERF_COUNTERS() #if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS \ #define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
...@@ -32,6 +44,8 @@ ...@@ -32,6 +44,8 @@
(~(INT_MASK(INT_PERF_COUNT))) (~(INT_MASK(INT_PERF_COUNT)))
#endif #endif
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
...@@ -224,11 +238,11 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -224,11 +238,11 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define IRQ_DISABLE(tmp0, tmp1) \ #define IRQ_DISABLE(tmp0, tmp1) \
{ \ { \
movei tmp0, -1; \ movei tmp0, -1; \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \ }; \
{ \ { \
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \ mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \ }; \
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
......
...@@ -145,7 +145,7 @@ ENTRY(empty_zero_page) ...@@ -145,7 +145,7 @@ ENTRY(empty_zero_page)
.endif .endif
.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
(HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
.endm .endm
__PAGE_ALIGNED_DATA __PAGE_ALIGNED_DATA
...@@ -158,12 +158,14 @@ ENTRY(swapper_pg_dir) ...@@ -158,12 +158,14 @@ ENTRY(swapper_pg_dir)
*/ */
.set addr, 0 .set addr, 0
.rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT
PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_WRITABLE - 32))
.set addr, addr + PGDIR_SIZE .set addr, addr + PGDIR_SIZE
.endr .endr
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_EXECUTABLE - 32))
.org swapper_pg_dir + HV_L1_SIZE .org swapper_pg_dir + HV_L1_SIZE
END(swapper_pg_dir) END(swapper_pg_dir)
...@@ -176,6 +178,7 @@ ENTRY(swapper_pg_dir) ...@@ -176,6 +178,7 @@ ENTRY(swapper_pg_dir)
__INITDATA __INITDATA
.align CHIP_L2_LINE_SIZE() .align CHIP_L2_LINE_SIZE()
ENTRY(swapper_pgprot) ENTRY(swapper_pgprot)
PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1 PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_WRITABLE - 32)), 1
.align CHIP_L2_LINE_SIZE() .align CHIP_L2_LINE_SIZE()
END(swapper_pgprot) END(swapper_pgprot)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment