Commit a395f7a6 authored by David S. Miller's avatar David S. Miller Committed by Greg Kroah-Hartman

sparc: Don't leak context bits into thread->fault_address

[ Upstream commit 4f6deb8c ]

On pre-Niagara systems, we fetch the fault address on data TLB
exceptions from the TLB_TAG_ACCESS register.  But this register also
contains the context ID assosciated with the fault in the low 13 bits
of the register value.

This propagates into current_thread_info()->fault_address and can
cause trouble later on.

So clear the low 13-bits out of the TLB_TAG_ACCESS value in the cases
where it matters.
Reported-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4e772c53
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
/* PROT ** ICACHE line 2: More real fault processing */ /* PROT ** ICACHE line 2: More real fault processing */
ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
srlx %g5, PAGE_SHIFT, %g5
sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
nop nop
nop nop
nop
nop
/* PROT ** ICACHE line 3: Unused... */ /* PROT ** ICACHE line 3: Unused... */
nop nop
......
...@@ -20,6 +20,10 @@ kvmap_itlb: ...@@ -20,6 +20,10 @@ kvmap_itlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4 ldxa [%g4] ASI_IMMU, %g4
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g4 here.
*/
/* sun4v_itlb_miss branches here with the missing virtual /* sun4v_itlb_miss branches here with the missing virtual
* address already loaded into %g4 * address already loaded into %g4
*/ */
...@@ -128,6 +132,10 @@ kvmap_dtlb: ...@@ -128,6 +132,10 @@ kvmap_dtlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4 ldxa [%g4] ASI_DMMU, %g4
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g4 here.
*/
/* sun4v_dtlb_miss branches here with the missing virtual /* sun4v_dtlb_miss branches here with the missing virtual
* address already loaded into %g4 * address already loaded into %g4
*/ */
...@@ -251,6 +259,10 @@ kvmap_dtlb_longpath: ...@@ -251,6 +259,10 @@ kvmap_dtlb_longpath:
nop nop
.previous .previous
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g5 here.
*/
be,pt %xcc, sparc64_realfault_common be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4 mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline ba,pt %xcc, winfix_trampoline
......
...@@ -29,13 +29,17 @@ ...@@ -29,13 +29,17 @@
*/ */
tsb_miss_dtlb: tsb_miss_dtlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4
srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk ba,pt %xcc, tsb_miss_page_table_walk
ldxa [%g4] ASI_DMMU, %g4 sllx %g4, PAGE_SHIFT, %g4
tsb_miss_itlb: tsb_miss_itlb:
mov TLB_TAG_ACCESS, %g4 mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4
srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk ba,pt %xcc, tsb_miss_page_table_walk
ldxa [%g4] ASI_IMMU, %g4 sllx %g4, PAGE_SHIFT, %g4
/* At this point we have: /* At this point we have:
* %g1 -- PAGE_SIZE TSB entry address * %g1 -- PAGE_SIZE TSB entry address
...@@ -284,6 +288,10 @@ tsb_do_dtlb_fault: ...@@ -284,6 +288,10 @@ tsb_do_dtlb_fault:
nop nop
.previous .previous
/* Clear context ID bits. */
srlx %g5, PAGE_SHIFT, %g5
sllx %g5, PAGE_SHIFT, %g5
be,pt %xcc, sparc64_realfault_common be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4 mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline ba,pt %xcc, winfix_trampoline
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment