Commit f892c21d authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32s: Make local symbols non visible in hash_low.

In hash_low.S, a lot of named local symbols are used instead of
numbers to ease code readability. However, they don't need to be
visible.

In order to ease blacklisting of functions running with MMU
disabled for kprobe, rename the symbols to .Lsymbols in order
to hide them as if they were numbered labels.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Acked-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/90c430d9e0f7af772a58aaeaf17bcc6321265340.1585670437.git.christophe.leroy@c-s.fr
parent a64371b5
......@@ -81,7 +81,7 @@ _GLOBAL(hash_page)
rlwinm. r8,r8,0,0,20 /* extract pt base address */
#endif
#ifdef CONFIG_SMP
beq- hash_page_out /* return if no mapping */
beq- .Lhash_page_out /* return if no mapping */
#else
/* XXX it seems like the 601 will give a machine fault on the
rfi if its alignment is wrong (bottom 4 bits of address are
......@@ -109,11 +109,11 @@ _GLOBAL(hash_page)
#if (PTE_FLAGS_OFFSET != 0)
addi r8,r8,PTE_FLAGS_OFFSET
#endif
retry:
.Lretry:
lwarx r6,0,r8 /* get linux-style pte, flag word */
andc. r5,r3,r6 /* check access & ~permission */
#ifdef CONFIG_SMP
bne- hash_page_out /* return if access not permitted */
bne- .Lhash_page_out /* return if access not permitted */
#else
bnelr-
#endif
......@@ -128,7 +128,7 @@ retry:
#endif /* CONFIG_SMP */
#endif /* CONFIG_PTE_64BIT */
stwcx. r5,0,r8 /* attempt to update PTE */
bne- retry /* retry if someone got there first */
bne- .Lretry /* retry if someone got there first */
mfsrin r3,r4 /* get segment reg for segment */
#ifndef CONFIG_VMAP_STACK
......@@ -156,7 +156,7 @@ retry:
#endif
#ifdef CONFIG_SMP
hash_page_out:
.Lhash_page_out:
eieio
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
li r0,0
......@@ -360,7 +360,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
CMPPTE 0,r6,r5
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_slot
beq+ .Lfound_slot
patch_site 0f, patch__hash_page_B
/* Search the secondary PTEG for a matching PTE */
......@@ -372,7 +372,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
2: LDPTEu r6,HPTE_SIZE(r4)
CMPPTE 0,r6,r5
bdnzf 2,2b
beq+ found_slot
beq+ .Lfound_slot
xori r5,r5,PTE_H /* clear H bit again */
/* Search the primary PTEG for an empty slot */
......@@ -381,7 +381,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
TST_V(r6) /* test valid bit */
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_empty
beq+ .Lfound_empty
/* update counter of times that the primary PTEG is full */
lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
......@@ -399,7 +399,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
2: LDPTEu r6,HPTE_SIZE(r4)
TST_V(r6)
bdnzf 2,2b
beq+ found_empty
beq+ .Lfound_empty
xori r5,r5,PTE_H /* clear H bit again */
/*
......@@ -437,9 +437,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
#ifndef CONFIG_SMP
/* Store PTE in PTEG */
found_empty:
.Lfound_empty:
STPTE r5,0(r4)
found_slot:
.Lfound_slot:
STPTE r8,HPTE_SIZE/2(r4)
#else /* CONFIG_SMP */
......@@ -460,8 +460,8 @@ found_slot:
* We do however have to make sure that the PTE is never in an invalid
* state with the V bit set.
*/
found_empty:
found_slot:
.Lfound_empty:
.Lfound_slot:
CLR_V(r5,r0) /* clear V (valid) bit in PTE */
STPTE r5,0(r4)
sync
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment