Commit 1acfd2bd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-6.6-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Fix stale propagated yield_cpu in qspinlocks leading to lockups

 - Fix broken hugepages on some configs due to ARCH_FORCE_MAX_ORDER

 - Fix a spurious warning when copros are in use at exit time

Thanks to Nicholas Piggin, Christophe Leroy, Nysal Jan K.A Sachin Sant,
and Shrikanth Hegde.

* tag 'powerpc-6.6-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/qspinlock: Fix stale propagated yield_cpu
  powerpc/64s/radix: Don't warn on copros in radix__tlb_flush()
  powerpc/mm: Allow ARCH_FORCE_MAX_ORDER up to 12
parents d537ae43 f9bc9bbe
...@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER ...@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER
default "6" if PPC32 && PPC_64K_PAGES default "6" if PPC32 && PPC_64K_PAGES
range 4 10 if PPC32 && PPC_256K_PAGES range 4 10 if PPC32 && PPC_256K_PAGES
default "4" if PPC32 && PPC_256K_PAGES default "4" if PPC32 && PPC_256K_PAGES
range 10 10 range 10 12
default "10" default "10"
help help
The kernel page allocator limits the size of maximal physically The kernel page allocator limits the size of maximal physically
......
...@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode * ...@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if ((yield_count & 1) == 0) if ((yield_count & 1) == 0)
goto yield_prev; /* owner vcpu is running */ goto yield_prev; /* owner vcpu is running */
if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
goto yield_prev; /* re-sample lock owner */
spin_end(); spin_end();
preempted = true; preempted = true;
......
...@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb) ...@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
smp_mb(); /* see radix__flush_tlb_mm */ smp_mb(); /* see radix__flush_tlb_mm */
exit_flush_lazy_tlbs(mm); exit_flush_lazy_tlbs(mm);
_tlbiel_pid(mm->context.id, RIC_FLUSH_ALL); __flush_all_mm(mm, true);
/*
* It should not be possible to have coprocessors still
* attached here.
*/
if (WARN_ON_ONCE(atomic_read(&mm->context.copros) > 0))
__flush_all_mm(mm, true);
preempt_enable(); preempt_enable();
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment