Commit 5267f329 authored by David S. Miller's avatar David S. Miller

Sparc64: Real mans flush_tlb_kernel_range

parent 7f6245b9
......@@ -615,6 +615,7 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_mm;
extern unsigned long xcall_flush_tlb_range;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_flush_tlb_all;
extern unsigned long xcall_tlbcachesync;
extern unsigned long xcall_flush_cache_all;
......@@ -849,6 +850,18 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
end = PAGE_ALIGN(end);
if (start != end) {
smp_cross_call(&xcall_flush_tlb_kernel_range,
0, start, end);
__flush_tlb_kernel_range(start, end);
}
}
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
{
{
......
......@@ -201,6 +201,22 @@ __cheetah_flush_tlb_range:
retl
/*IC19*/ wrpr %g5, 0x0, %pstate
.align 32
.globl __flush_tlb_kernel_range
__flush_tlb_kernel_range: /* %o0=start, %o1=end */
cmp %o0, %o1
be,pn %xcc, 2f
sethi %hi(PAGE_SIZE), %o4
sub %o1, %o0, %o3
sub %o3, %o4, %o3
1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
membar #Sync
brnz,pt %o3, 1b
sub %o3, %o4, %o3
2: retl
flush %g6
__spitfire_flush_tlb_mm_slow:
rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
......@@ -485,6 +501,25 @@ xcall_flush_tlb_range:
nop
nop
.globl xcall_flush_tlb_kernel_range
xcall_flush_tlb_kernel_range:
sethi %hi(PAGE_SIZE - 1), %g2
or %g2, %lo(PAGE_SIZE - 1), %g2
andn %g1, %g2, %g1
andn %g7, %g2, %g7
sub %g7, %g1, %g3
add %g2, 1, %g2
sub %g3, %g2, %g3
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
membar #Sync
brnz,pt %g3, 1b
sub %g3, %g2, %g3
retry
nop
nop
nop
/* NOTE: This is SPECIAL!! We do etrap/rtrap however
* we choose to deal with the "BH's run with
* %pil==15" problem (described in asm/pil.h)
......
......@@ -13,9 +13,13 @@ extern void __flush_tlb_range(unsigned long context, unsigned long start,
unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP
#define flush_tlb_all() __flush_tlb_all()
#define flush_tlb_kernel_range(start,end) \
__flush_tlb_kernel_range(start,end)
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
......@@ -45,6 +49,7 @@ extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
......@@ -52,6 +57,8 @@ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \
smp_flush_tlb_range(vma, start, end)
#define flush_tlb_kernel_range(start, end) \
smp_flush_tlb_kernel_range(start, end)
#define flush_tlb_page(vma, page) \
smp_flush_tlb_page((vma)->vm_mm, page)
......@@ -83,7 +90,4 @@ static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long st
}
}
/* XXX For now... */
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* _SPARC64_TLBFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment