Commit 73c0d752 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'stable' of...

Merge branch 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile into akpm

Pull tile bugfixes from Chris Metcalf:
 "This includes a variety of minor bug fixes, mostly to do with testing
  "make allyesconfig", "make allmodconfig", "make allnoconfig", inspired
  to Tejun Heo's observation about Kconfig.freezer not being included.

  The largest changes are just syntax changes removing the tile-specific
  use of a macro named INT_MASK, which is way too commonly redefined
  throughout driver code"

* 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  tile: tag some code with #ifdef CONFIG_COMPAT
  tile: fix memcpy_*io functions for allnoconfig
  tile: export a handful of symbols appropriately
  drm: fix compile failure by including <linux/swiotlb.h>
  tile: avoid defining INT_MASK macro in <arch/interrupts.h>
  tile: provide "screen_info" when enabling VT
  drivers/input/joystick/analog.c: enable precise timer
  tile: include kernel/Kconfig.freezer in tile Kconfig
  tile: remove an unused variable in copy_thread()
parents 983ca836 570fd501
...@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG ...@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer"
menu "Tilera-specific configuration" menu "Tilera-specific configuration"
config NR_CPUS config NR_CPUS
......
...@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr) ...@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
#define iowrite32 writel #define iowrite32 writel
#define iowrite64 writeq #define iowrite64 writeq
static inline void memset_io(void *dst, int val, size_t len) #if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
static inline void memset_io(volatile void *dst, int val, size_t len)
{ {
int x; int x;
BUG_ON((unsigned long)dst & 0x3); BUG_ON((unsigned long)dst & 0x3);
...@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, ...@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
writel(*(u32 *)(src + x), dst + x); writel(*(u32 *)(src + x), dst + x);
} }
#endif
/* /*
* The Tile architecture does not support IOPORT, even with PCI. * The Tile architecture does not support IOPORT, even with PCI.
* Unfortunately we can't yet simply not declare these methods, * Unfortunately we can't yet simply not declare these methods,
......
...@@ -18,32 +18,20 @@ ...@@ -18,32 +18,20 @@
#include <arch/interrupts.h> #include <arch/interrupts.h>
#include <arch/chip.h> #include <arch/chip.h>
#if !defined(__tilegx__) && defined(__ASSEMBLY__)
/* /*
* The set of interrupts we want to allow when interrupts are nominally * The set of interrupts we want to allow when interrupts are nominally
* disabled. The remainder are effectively "NMI" interrupts from * disabled. The remainder are effectively "NMI" interrupts from
* the point of view of the generic Linux code. Note that synchronous * the point of view of the generic Linux code. Note that synchronous
* interrupts (aka "non-queued") are not blocked by the mask in any case. * interrupts (aka "non-queued") are not blocked by the mask in any case.
*/ */
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT)))
#endif
#else
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS \ #define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT))) (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
#endif
#if CHIP_HAS_SPLIT_INTR_MASK()
/* The same macro, but for the two 32-bit SPRs separately. */
#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -126,7 +114,7 @@ ...@@ -126,7 +114,7 @@
* to know our current state. * to know our current state.
*/ */
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
/* Disable interrupts. */ /* Disable interrupts. */
#define arch_local_irq_disable() \ #define arch_local_irq_disable() \
...@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Prevent the given interrupt from being enabled next time we enable irqs. */ /* Prevent the given interrupt from being enabled next time we enable irqs. */
#define arch_local_irq_mask(interrupt) \ #define arch_local_irq_mask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
/* Prevent the given interrupt from being enabled immediately. */ /* Prevent the given interrupt from being enabled immediately. */
#define arch_local_irq_mask_now(interrupt) do { \ #define arch_local_irq_mask_now(interrupt) do { \
...@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Allow the given interrupt to be enabled next time we enable irqs. */ /* Allow the given interrupt to be enabled next time we enable irqs. */
#define arch_local_irq_unmask(interrupt) \ #define arch_local_irq_unmask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
#define arch_local_irq_unmask_now(interrupt) do { \ #define arch_local_irq_unmask_now(interrupt) do { \
...@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); ...@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Disable interrupts. */ /* Disable interrupts. */
#define IRQ_DISABLE(tmp0, tmp1) \ #define IRQ_DISABLE(tmp0, tmp1) \
{ \ { \
movei tmp0, -1; \ movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \ }; \
{ \ { \
......
This diff is collapsed.
This diff is collapsed.
...@@ -1035,7 +1035,9 @@ handle_syscall: ...@@ -1035,7 +1035,9 @@ handle_syscall:
/* Ensure that the syscall number is within the legal range. */ /* Ensure that the syscall number is within the legal range. */
{ {
moveli r20, hw2(sys_call_table) moveli r20, hw2(sys_call_table)
#ifdef CONFIG_COMPAT
blbs r30, .Lcompat_syscall blbs r30, .Lcompat_syscall
#endif
} }
{ {
cmpltu r21, TREG_SYSCALL_NR_NAME, r21 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
...@@ -1093,6 +1095,7 @@ handle_syscall: ...@@ -1093,6 +1095,7 @@ handle_syscall:
j .Lresume_userspace /* jump into middle of interrupt_return */ j .Lresume_userspace /* jump into middle of interrupt_return */
} }
#ifdef CONFIG_COMPAT
.Lcompat_syscall: .Lcompat_syscall:
/* /*
* Load the base of the compat syscall table in r20, and * Load the base of the compat syscall table in r20, and
...@@ -1117,6 +1120,7 @@ handle_syscall: ...@@ -1117,6 +1120,7 @@ handle_syscall:
{ move r15, r4; addxi r4, r4, 0 } { move r15, r4; addxi r4, r4, 0 }
{ move r16, r5; addxi r5, r5, 0 } { move r16, r5; addxi r5, r5, 0 }
j .Lload_syscall_pointer j .Lload_syscall_pointer
#endif
.Linvalid_syscall: .Linvalid_syscall:
/* Report an invalid syscall back to the user program */ /* Report an invalid syscall back to the user program */
......
...@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t); ...@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
int copy_thread(unsigned long clone_flags, unsigned long sp, int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p) unsigned long arg, struct task_struct *p)
{ {
struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); struct pt_regs *childregs = task_pt_regs(p);
unsigned long ksp; unsigned long ksp;
unsigned long *callee_regs; unsigned long *callee_regs;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/export.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <hv/hypervisor.h> #include <hv/hypervisor.h>
...@@ -49,3 +50,4 @@ void machine_restart(char *cmd) ...@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
/* No interesting distinction to be made here. */ /* No interesting distinction to be made here. */
void (*pm_power_off)(void) = NULL; void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/start_kernel.h> #include <linux/start_kernel.h>
#include <linux/screen_info.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } ...@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
/* Chip information */ /* Chip information */
char chip_model[64] __write_once; char chip_model[64] __write_once;
#ifdef CONFIG_VT
struct screen_info screen_info;
#endif
struct pglist_data node_data[MAX_NUMNODES] __read_mostly; struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_data);
......
...@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) ...@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
p->pc, p->sp, p->ex1); p->pc, p->sp, p->ex1);
p = NULL; p = NULL;
} }
if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
return p; return p;
return NULL; return NULL;
} }
...@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace) ...@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
{ {
save_stack_trace_tsk(NULL, trace); save_stack_trace_tsk(NULL, trace);
} }
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif #endif
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
* more details. * more details.
*/ */
#include <linux/export.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <arch/icache.h> #include <arch/icache.h>
...@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) ...@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
__insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
#endif #endif
} }
EXPORT_SYMBOL_GPL(finv_buffer_remote);
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/export.h>
/* /*
* Allow cropping out bits beyond the end of the array. * Allow cropping out bits beyond the end of the array.
...@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) ...@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
} while (*bp != '\0' && *bp != '\n'); } while (*bp != '\0' && *bp != '\n');
return 0; return 0;
} }
EXPORT_SYMBOL(bitmap_parselist_crop);
...@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel); ...@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
EXPORT_SYMBOL(hv_dev_close); EXPORT_SYMBOL(hv_dev_close);
EXPORT_SYMBOL(hv_sysconf); EXPORT_SYMBOL(hv_sysconf);
EXPORT_SYMBOL(hv_confstr); EXPORT_SYMBOL(hv_confstr);
EXPORT_SYMBOL(hv_get_rtc);
EXPORT_SYMBOL(hv_set_rtc);
/* libgcc.a */ /* libgcc.a */
uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
......
...@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home) ...@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
__set_pte(ptep, pte_set_home(pteval, home)); __set_pte(ptep, pte_set_home(pteval, home));
} }
} }
EXPORT_SYMBOL(homecache_change_page_home);
struct page *homecache_alloc_pages(gfp_t gfp_mask, struct page *homecache_alloc_pages(gfp_t gfp_mask,
unsigned int order, int home) unsigned int order, int home)
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
*/ */
#include <core/engine.h> #include <core/engine.h>
#include <linux/swiotlb.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/vm.h> #include <subdev/vm.h>
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <drm/radeon_drm.h> #include <drm/radeon_drm.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swiotlb.h>
#include "radeon_reg.h" #include "radeon_reg.h"
#include "radeon.h" #include "radeon.h"
......
...@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void) ...@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = get_cycles(); } while (0) #define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x)) #define DELTA(x,y) ((y)-(x))
#define TIME_NAME "PCC" #define TIME_NAME "PCC"
#elif defined(CONFIG_MN10300) #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0) #define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x, y) ((x) - (y)) #define DELTA(x, y) ((x) - (y))
#define TIME_NAME "TSC" #define TIME_NAME "TSC"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment