Commit fdf880a6 authored by Cyril Bur's avatar Cyril Bur Committed by Michael Ellerman

powerpc: Fix checkstop in native_hpte_clear() with lockdep

native_hpte_clear() is called in real mode from two places:
- Early in boot during htab initialisation if firmware assisted dump is
  active.
- Late in the kexec path.

In both contexts there is no need to disable interrupts are they are
already disabled. Furthermore, locking around the tlbie() is only required
for pre POWER5 hardware.

On POWER5 or newer hardware concurrent tlbie()s work as expected and on pre
POWER5 hardware concurrent tlbie()s could result in deadlock. This code
would only be executed at crashdump time, during which all bets are off,
concurrent tlbie()s are unlikely and taking locks is unsafe therefore the
best course of action is to simply do nothing. Concurrent tlbie()s are not
possible in the first case as secondary CPUs have not come up yet.
Signed-off-by: default avatarCyril Bur <cyrilbur@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 4108efb0
...@@ -61,8 +61,13 @@ struct machdep_calls { ...@@ -61,8 +61,13 @@ struct machdep_calls {
unsigned long addr, unsigned long addr,
unsigned char *hpte_slot_array, unsigned char *hpte_slot_array,
int psize, int ssize, int local); int psize, int ssize, int local);
/* special for kexec, to be called in real mode, linear mapping is /*
* destroyed as well */ * Special for kexec.
* To be called in real mode with interrupts disabled. No locks are
* taken as such, concurrent access on pre POWER5 hardware could result
* in a deadlock.
* The linear mapping is destroyed as well.
*/
void (*hpte_clear_all)(void); void (*hpte_clear_all)(void);
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
......
...@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, ...@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* be when they isi), and we are the only one left. We rely on our kernel * be when they isi), and we are the only one left. We rely on our kernel
* mapping being 0xC0's and the hardware ignoring those two real bits. * mapping being 0xC0's and the hardware ignoring those two real bits.
* *
* This must be called with interrupts disabled.
*
* Taking the native_tlbie_lock is unsafe here due to the possibility of
* lockdep being on. On pre POWER5 hardware, not taking the lock could
* cause deadlock. POWER5 and newer not taking the lock is fine. This only
* gets called during boot before secondary CPUs have come up and during
* crashdump and all bets are off anyway.
*
* TODO: add batching support when enabled. remember, no dynamic memory here, * TODO: add batching support when enabled. remember, no dynamic memory here,
* athough there is the control page available... * athough there is the control page available...
*/ */
static void native_hpte_clear(void) static void native_hpte_clear(void)
{ {
unsigned long vpn = 0; unsigned long vpn = 0;
unsigned long slot, slots, flags; unsigned long slot, slots;
struct hash_pte *hptep = htab_address; struct hash_pte *hptep = htab_address;
unsigned long hpte_v; unsigned long hpte_v;
unsigned long pteg_count; unsigned long pteg_count;
...@@ -596,13 +604,6 @@ static void native_hpte_clear(void) ...@@ -596,13 +604,6 @@ static void native_hpte_clear(void)
pteg_count = htab_hash_mask + 1; pteg_count = htab_hash_mask + 1;
local_irq_save(flags);
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP; slots = pteg_count * HPTES_PER_GROUP;
for (slot = 0; slot < slots; slot++, hptep++) { for (slot = 0; slot < slots; slot++, hptep++) {
...@@ -614,8 +615,8 @@ static void native_hpte_clear(void) ...@@ -614,8 +615,8 @@ static void native_hpte_clear(void)
hpte_v = be64_to_cpu(hptep->v); hpte_v = be64_to_cpu(hptep->v);
/* /*
* Call __tlbie() here rather than tlbie() since we * Call __tlbie() here rather than tlbie() since we can't take the
* already hold the native_tlbie_lock. * native_tlbie_lock.
*/ */
if (hpte_v & HPTE_V_VALID) { if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
...@@ -625,8 +626,6 @@ static void native_hpte_clear(void) ...@@ -625,8 +626,6 @@ static void native_hpte_clear(void)
} }
asm volatile("eieio; tlbsync; ptesync":::"memory"); asm volatile("eieio; tlbsync; ptesync":::"memory");
raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment