Commit 57e6bbcb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "A number of fixes for the merge window, fixing a number of cases
  missed when testing the uaccess code, particularly cases which only
  show up with certain compiler versions"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8431/1: fix alignement of __bug_table section entries
  arm/xen: Enable user access to the kernel before issuing a privcmd call
  ARM: domains: add memory dependencies to get_domain/set_domain
  ARM: domains: thread_info.h no longer needs asm/domains.h
  ARM: uaccess: fix undefined instruction on ARMv7M/noMMU
  ARM: uaccess: remove unneeded uaccess_save_and_disable macro
  ARM: swpan: fix nwfpe for uaccess changes
  ARM: 8429/1: disable GCC SRA optimization
parents 6ff33f39 c2172ce2
...@@ -54,6 +54,14 @@ AS += -EL ...@@ -54,6 +54,14 @@ AS += -EL
LD += -EL LD += -EL
endif endif
#
# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
# later may result in code being generated that handles signed short and signed
# char struct members incorrectly. So disable it.
# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
#
KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
# This selects which instruction set is used. # This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version # Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes # macro, but instead defines a whole series of macros which makes
......
...@@ -491,11 +491,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -491,11 +491,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif #endif
.endm .endm
.macro uaccess_save_and_disable, tmp
uaccess_save \tmp
uaccess_disable \tmp
.endm
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg .macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6
......
...@@ -40,6 +40,7 @@ do { \ ...@@ -40,6 +40,7 @@ do { \
"2:\t.asciz " #__file "\n" \ "2:\t.asciz " #__file "\n" \
".popsection\n" \ ".popsection\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"a\"\n" \
".align 2\n" \
"3:\t.word 1b, 2b\n" \ "3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \ "\t.hword " #__line ", 0\n" \
".popsection"); \ ".popsection"); \
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/thread_info.h>
#endif #endif
/* /*
...@@ -89,7 +90,8 @@ static inline unsigned int get_domain(void) ...@@ -89,7 +90,8 @@ static inline unsigned int get_domain(void)
asm( asm(
"mrc p15, 0, %0, c3, c0 @ get domain" "mrc p15, 0, %0, c3, c0 @ get domain"
: "=r" (domain)); : "=r" (domain)
: "m" (current_thread_info()->cpu_domain));
return domain; return domain;
} }
...@@ -98,7 +100,7 @@ static inline void set_domain(unsigned val) ...@@ -98,7 +100,7 @@ static inline void set_domain(unsigned val)
{ {
asm volatile( asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain" "mcr p15, 0, %0, c3, c0 @ set domain"
: : "r" (val)); : : "r" (val) : "memory");
isb(); isb();
} }
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
struct task_struct; struct task_struct;
#include <asm/types.h> #include <asm/types.h>
#include <asm/domain.h>
typedef unsigned long mm_segment_t; typedef unsigned long mm_segment_t;
......
...@@ -226,6 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -226,6 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
#ifdef CONFIG_CPU_USE_DOMAINS
/* /*
* Copy the initial value of the domain access control register * Copy the initial value of the domain access control register
* from the current thread: thread->addr_limit will have been * from the current thread: thread->addr_limit will have been
...@@ -233,6 +234,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -233,6 +234,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
* kernel/fork.c * kernel/fork.c
*/ */
thread->cpu_domain = get_domain(); thread->cpu_domain = get_domain();
#endif
if (likely(!(p->flags & PF_KTHREAD))) { if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs(); *childregs = *current_pt_regs();
......
...@@ -95,9 +95,10 @@ emulate: ...@@ -95,9 +95,10 @@ emulate:
reteq r4 @ no, return failure reteq r4 @ no, return failure
next: next:
uaccess_enable r3
.Lx1: ldrt r6, [r5], #4 @ get the next instruction and .Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@ increment PC @ increment PC
uaccess_disable r3
and r2, r6, #0x0F000000 @ test for FP insns and r2, r6, #0x0F000000 @ test for FP insns
teq r2, #0x0C000000 teq r2, #0x0C000000
teqne r2, #0x0D000000 teqne r2, #0x0D000000
......
...@@ -98,8 +98,23 @@ ENTRY(privcmd_call) ...@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
mov r1, r2 mov r1, r2
mov r2, r3 mov r2, r3
ldr r3, [sp, #8] ldr r3, [sp, #8]
/*
* Privcmd calls are issued by the userspace. We need to allow the
* kernel to access the userspace memory before issuing the hypercall.
*/
uaccess_enable r4
/* r4 is loaded now as we use it as scratch register before */
ldr r4, [sp, #4] ldr r4, [sp, #4]
__HVC(XEN_IMM) __HVC(XEN_IMM)
/*
* Disable userspace access from kernel. This is fine to do it
* unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
* called before.
*/
uaccess_disable r4
ldm sp!, {r4} ldm sp!, {r4}
ret lr ret lr
ENDPROC(privcmd_call); ENDPROC(privcmd_call);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment