Commit a8282bf0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.2-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "This is a frustratingly large batch at rc5. Some of these were sent
  earlier but were missed by me due to being distracted by other things,
  and some took a while to track down due to needing manual bisection on
  old hardware. But still we clearly need to improve our testing of KVM,
  and of 32-bit, so that we catch these earlier.

  Summary: seven fixes, all for bugs introduced this cycle.

   - The commit to add KASAN support broke booting on 32-bit SMP
     machines, due to a refactoring that moved some setup out of the
     secondary CPU path.

   - A fix for another 32-bit SMP bug introduced by the fast syscall
     entry implementation for 32-bit BOOKE. And a build fix for the same
     commit.

   - Our change to allow the DAWR to be force enabled on Power9
     introduced a bug in KVM, where we clobber r3 leading to a host
     crash.

   - The same commit also exposed a previously unreachable bug in the
     nested KVM handling of DAWR, which could lead to an oops in a
     nested host.

   - One of the DMA reworks broke the b43legacy WiFi driver on some
     people's powermacs, fix it by enabling a 30-bit ZONE_DMA on 32-bit.

   - A fix for TLB flushing in KVM introduced a new bug, as it neglected
     to also flush the ERAT, this could lead to memory corruption in the
     guest.

  Thanks to: Aaro Koskinen, Christoph Hellwig, Christophe Leroy, Larry
  Finger, Michael Neuling, Suraj Jitindar Singh"

* tag 'powerpc-5.2-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  KVM: PPC: Book3S HV: Invalidate ERAT when flushing guest TLB entries
  powerpc: enable a 30-bit ZONE_DMA for 32-bit pmac
  KVM: PPC: Book3S HV: Only write DAWR[X] when handling h_set_dawr in real mode
  KVM: PPC: Book3S HV: Fix r3 corruption in h_set_dabr()
  powerpc/32: fix build failure on book3e with KVM
  powerpc/booke: fix fast syscall entry on SMP
  powerpc/32s: fix initial setup of segment registers on secondary CPU
parents 693cd8ce 50087112
...@@ -319,6 +319,13 @@ struct vm_area_struct; ...@@ -319,6 +319,13 @@ struct vm_area_struct;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#include <asm/slice.h> #include <asm/slice.h>
/*
* Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
*/
#ifdef CONFIG_PPC32
#define ARCH_ZONE_DMA_BITS 30
#else
#define ARCH_ZONE_DMA_BITS 31 #define ARCH_ZONE_DMA_BITS 31
#endif
#endif /* _ASM_POWERPC_PAGE_H */ #endif /* _ASM_POWERPC_PAGE_H */
...@@ -752,6 +752,7 @@ __secondary_start: ...@@ -752,6 +752,7 @@ __secondary_start:
stw r0,0(r3) stw r0,0(r3)
/* load up the MMU */ /* load up the MMU */
bl load_segment_registers
bl load_up_mmu bl load_up_mmu
/* ptr to phys current thread */ /* ptr to phys current thread */
......
...@@ -83,7 +83,7 @@ END_BTB_FLUSH_SECTION ...@@ -83,7 +83,7 @@ END_BTB_FLUSH_SECTION
SAVE_4GPRS(3, r11); \ SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11) SAVE_2GPRS(7, r11)
.macro SYSCALL_ENTRY trapno intno .macro SYSCALL_ENTRY trapno intno srr1
mfspr r10, SPRN_SPRG_THREAD mfspr r10, SPRN_SPRG_THREAD
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -94,7 +94,7 @@ BEGIN_FTR_SECTION ...@@ -94,7 +94,7 @@ BEGIN_FTR_SECTION
mfspr r11, SPRN_SRR1 mfspr r11, SPRN_SRR1
mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */ mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
bf 3, 1975f bf 3, 1975f
b kvmppc_handler_BOOKE_INTERRUPT_\intno\()_SPRN_SRR1 b kvmppc_handler_\intno\()_\srr1
1975: 1975:
mr r12, r13 mr r12, r13
lwz r13, THREAD_NORMSAVE(2)(r10) lwz r13, THREAD_NORMSAVE(2)(r10)
...@@ -145,9 +145,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) ...@@ -145,9 +145,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
tophys(r11,r11) tophys(r11,r11)
addi r11,r11,global_dbcr0@l addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lwz r9,TASK_CPU(r2) lwz r10, TASK_CPU(r2)
slwi r9,r9,3 slwi r10, r10, 3
add r11,r11,r9 add r11, r11, r10
#endif #endif
lwz r12,0(r11) lwz r12,0(r11)
mtspr SPRN_DBCR0,r12 mtspr SPRN_DBCR0,r12
......
...@@ -409,7 +409,7 @@ interrupt_base: ...@@ -409,7 +409,7 @@ interrupt_base:
/* System Call Interrupt */ /* System Call Interrupt */
START_EXCEPTION(SystemCall) START_EXCEPTION(SystemCall)
SYSCALL_ENTRY 0xc00 SYSCALL SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1
/* Auxiliary Processor Unavailable Interrupt */ /* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \ EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
......
...@@ -830,6 +830,7 @@ static void flush_guest_tlb(struct kvm *kvm) ...@@ -830,6 +830,7 @@ static void flush_guest_tlb(struct kvm *kvm)
} }
} }
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
} }
void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
......
...@@ -2500,17 +2500,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2500,17 +2500,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
LOAD_REG_ADDR(r11, dawr_force_enable) LOAD_REG_ADDR(r11, dawr_force_enable)
lbz r11, 0(r11) lbz r11, 0(r11)
cmpdi r11, 0 cmpdi r11, 0
bne 3f
li r3, H_HARDWARE li r3, H_HARDWARE
beqlr blr
3:
/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
rlwimi r5, r4, 2, DAWRX_WT rlwimi r5, r4, 2, DAWRX_WT
clrrdi r4, r4, 3 clrrdi r4, r4, 3
std r4, VCPU_DAWR(r3) std r4, VCPU_DAWR(r3)
std r5, VCPU_DAWRX(r3) std r5, VCPU_DAWRX(r3)
/*
* If came in through the real mode hcall handler then it is necessary
* to write the registers since the return path won't. Otherwise it is
* sufficient to store then in the vcpu struct as they will be loaded
* next time the vcpu is run.
*/
mfmsr r6
andi. r6, r6, MSR_DR /* in real mode? */
bne 4f
mtspr SPRN_DAWR, r4 mtspr SPRN_DAWR, r4
mtspr SPRN_DAWRX, r5 mtspr SPRN_DAWRX, r5
li r3, 0 4: li r3, 0
blr blr
_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
......
...@@ -248,7 +248,8 @@ void __init paging_init(void) ...@@ -248,7 +248,8 @@ void __init paging_init(void)
(long int)((top_of_ram - total_ram) >> 20)); (long int)((top_of_ram - total_ram) >> 20));
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT); max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
#endif #endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn; max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
......
...@@ -7,6 +7,7 @@ config PPC_PMAC ...@@ -7,6 +7,7 @@ config PPC_PMAC
select PPC_INDIRECT_PCI if PPC32 select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32 select PPC_MPC106 if PPC32
select PPC_NATIVE select PPC_NATIVE
select ZONE_DMA if PPC32
default y default y
config PPC_PMAC64 config PPC_PMAC64
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment