Commit 961246b4 authored by Olivier DANET's avatar Olivier DANET Committed by David S. Miller

[PATCH] sparc32: vm_area_struct access for old Sun SPARCs.

Commit e4c6bfd2 ("mm: rearrange
vm_area_struct for fewer cache misses") changed the layout of the
vm_area_struct structure, it broke several SPARC32 assembly routines
which used numerical constants for accessing the vm_mm field.

This patch defines the VMA_VM_MM constant to replace the immediate values.
Signed-off-by: default avatarOlivier DANET <odanet@caramail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent aabb9875
...@@ -49,6 +49,8 @@ int foo(void) ...@@ -49,6 +49,8 @@ int foo(void)
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK(); BLANK();
DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
BLANK();
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
/* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
return 0; return 0;
......
...@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out: ...@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
/* The things we do for performance... */ /* The things we do for performance... */
hypersparc_flush_cache_range: hypersparc_flush_cache_range:
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
ld [%o0 + AOFF_mm_context], %g1 ld [%o0 + AOFF_mm_context], %g1
cmp %g1, -1 cmp %g1, -1
...@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out: ...@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
*/ */
/* Verified, my ass... */ /* Verified, my ass... */
hypersparc_flush_cache_page: hypersparc_flush_cache_page:
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %g2 ld [%o0 + AOFF_mm_context], %g2
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
cmp %g2, -1 cmp %g2, -1
...@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out: ...@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
sta %g5, [%g1] ASI_M_MMUREGS sta %g5, [%g1] ASI_M_MMUREGS
hypersparc_flush_tlb_range: hypersparc_flush_tlb_range:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5 lda [%g1] ASI_M_MMUREGS, %g5
...@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out: ...@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
sta %g5, [%g1] ASI_M_MMUREGS sta %g5, [%g1] ASI_M_MMUREGS
hypersparc_flush_tlb_page: hypersparc_flush_tlb_page:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
andn %o1, (PAGE_SIZE - 1), %o1 andn %o1, (PAGE_SIZE - 1), %o1
......
...@@ -105,7 +105,7 @@ swift_flush_cache_mm_out: ...@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
.globl swift_flush_cache_range .globl swift_flush_cache_range
swift_flush_cache_range: swift_flush_cache_range:
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
sub %o2, %o1, %o2 sub %o2, %o1, %o2
sethi %hi(4096), %o3 sethi %hi(4096), %o3
cmp %o2, %o3 cmp %o2, %o3
...@@ -116,7 +116,7 @@ swift_flush_cache_range: ...@@ -116,7 +116,7 @@ swift_flush_cache_range:
.globl swift_flush_cache_page .globl swift_flush_cache_page
swift_flush_cache_page: swift_flush_cache_page:
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
70: 70:
ld [%o0 + AOFF_mm_context], %g2 ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1 cmp %g2, -1
...@@ -219,7 +219,7 @@ swift_flush_sig_insns: ...@@ -219,7 +219,7 @@ swift_flush_sig_insns:
.globl swift_flush_tlb_range .globl swift_flush_tlb_range
.globl swift_flush_tlb_all .globl swift_flush_tlb_all
swift_flush_tlb_range: swift_flush_tlb_range:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
swift_flush_tlb_mm: swift_flush_tlb_mm:
ld [%o0 + AOFF_mm_context], %g2 ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1 cmp %g2, -1
...@@ -233,7 +233,7 @@ swift_flush_tlb_all_out: ...@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
.globl swift_flush_tlb_page .globl swift_flush_tlb_page
swift_flush_tlb_page: swift_flush_tlb_page:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
andn %o1, (PAGE_SIZE - 1), %o1 andn %o1, (PAGE_SIZE - 1), %o1
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
/* Sliiick... */ /* Sliiick... */
tsunami_flush_cache_page: tsunami_flush_cache_page:
tsunami_flush_cache_range: tsunami_flush_cache_range:
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
tsunami_flush_cache_mm: tsunami_flush_cache_mm:
ld [%o0 + AOFF_mm_context], %g2 ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1 cmp %g2, -1
...@@ -46,7 +46,7 @@ tsunami_flush_sig_insns: ...@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
/* More slick stuff... */ /* More slick stuff... */
tsunami_flush_tlb_range: tsunami_flush_tlb_range:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
tsunami_flush_tlb_mm: tsunami_flush_tlb_mm:
ld [%o0 + AOFF_mm_context], %g2 ld [%o0 + AOFF_mm_context], %g2
cmp %g2, -1 cmp %g2, -1
...@@ -65,7 +65,7 @@ tsunami_flush_tlb_out: ...@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
/* This one can be done in a fine grained manner... */ /* This one can be done in a fine grained manner... */
tsunami_flush_tlb_page: tsunami_flush_tlb_page:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
andn %o1, (PAGE_SIZE - 1), %o1 andn %o1, (PAGE_SIZE - 1), %o1
......
...@@ -108,7 +108,7 @@ viking_mxcc_flush_page: ...@@ -108,7 +108,7 @@ viking_mxcc_flush_page:
viking_flush_cache_page: viking_flush_cache_page:
viking_flush_cache_range: viking_flush_cache_range:
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
#endif #endif
viking_flush_cache_mm: viking_flush_cache_mm:
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -148,7 +148,7 @@ viking_flush_tlb_mm: ...@@ -148,7 +148,7 @@ viking_flush_tlb_mm:
#endif #endif
viking_flush_tlb_range: viking_flush_tlb_range:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5 lda [%g1] ASI_M_MMUREGS, %g5
...@@ -173,7 +173,7 @@ viking_flush_tlb_range: ...@@ -173,7 +173,7 @@ viking_flush_tlb_range:
#endif #endif
viking_flush_tlb_page: viking_flush_tlb_page:
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5 lda [%g1] ASI_M_MMUREGS, %g5
...@@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range: ...@@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range:
tst %g5 tst %g5
bne 3f bne 3f
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5 lda [%g1] ASI_M_MMUREGS, %g5
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
...@@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page: ...@@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page:
tst %g5 tst %g5
bne 2f bne 2f
mov SRMMU_CTX_REG, %g1 mov SRMMU_CTX_REG, %g1
ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5 lda [%g1] ASI_M_MMUREGS, %g5
and %o1, PAGE_MASK, %o1 and %o1, PAGE_MASK, %o1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment