Commit 889be673 authored by David S. Miller's avatar David S. Miller

[SPARC64]: vmap/vunmap cache flushing need not do anything.

We do though need an I-cache flush on spitfire chips when
doing a module load.

Happily this allows us to kill off flush_cache_all() from
sparc64.
parent e2872f56
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/processor.h>
#include <asm/spitfire.h>
static struct vm_struct * modvmlist = NULL; static struct vm_struct * modvmlist = NULL;
static void module_unmap(void * addr) static void module_unmap(void * addr)
...@@ -279,6 +282,16 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -279,6 +282,16 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
{ {
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
return 0; return 0;
} }
......
...@@ -118,7 +118,6 @@ void __init smp_callin(void) ...@@ -118,7 +118,6 @@ void __init smp_callin(void)
inherit_locked_prom_mappings(0); inherit_locked_prom_mappings(0);
__flush_cache_all();
__flush_tlb_all(); __flush_tlb_all();
smp_setup_percpu_timer(); smp_setup_percpu_timer();
...@@ -661,7 +660,6 @@ extern unsigned long xcall_flush_tlb_range; ...@@ -661,7 +660,6 @@ extern unsigned long xcall_flush_tlb_range;
extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_flush_tlb_all_spitfire; extern unsigned long xcall_flush_tlb_all_spitfire;
extern unsigned long xcall_flush_tlb_all_cheetah; extern unsigned long xcall_flush_tlb_all_cheetah;
extern unsigned long xcall_flush_cache_all_spitfire;
extern unsigned long xcall_report_regs; extern unsigned long xcall_report_regs;
extern unsigned long xcall_receive_signal; extern unsigned long xcall_receive_signal;
extern unsigned long xcall_flush_dcache_page_cheetah; extern unsigned long xcall_flush_dcache_page_cheetah;
...@@ -776,15 +774,6 @@ void smp_report_regs(void) ...@@ -776,15 +774,6 @@ void smp_report_regs(void)
smp_cross_call(&xcall_report_regs, 0, 0, 0); smp_cross_call(&xcall_report_regs, 0, 0, 0);
} }
void smp_flush_cache_all(void)
{
/* Cheetah need do nothing. */
if (tlb_type == spitfire) {
smp_cross_call(&xcall_flush_cache_all_spitfire, 0, 0, 0);
__flush_cache_all();
}
}
void smp_flush_tlb_all(void) void smp_flush_tlb_all(void)
{ {
if (tlb_type == spitfire) if (tlb_type == spitfire)
......
...@@ -1025,19 +1025,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) ...@@ -1025,19 +1025,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
} }
} }
void __flush_cache_all(void)
{
/* Cheetah should be fine here too. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
}
/* If not locked, zap it. */ /* If not locked, zap it. */
void __flush_tlb_all(void) void __flush_tlb_all(void)
{ {
......
...@@ -721,20 +721,6 @@ xcall_flush_tlb_all_cheetah: ...@@ -721,20 +721,6 @@ xcall_flush_tlb_all_cheetah:
stxa %g0, [%g2] ASI_IMMU_DEMAP stxa %g0, [%g2] ASI_IMMU_DEMAP
retry retry
.globl xcall_flush_cache_all_spitfire
xcall_flush_cache_all_spitfire:
sethi %hi(16383), %g2
or %g2, %lo(16383), %g2
clr %g3
1: stxa %g0, [%g3] ASI_IC_TAG
membar #Sync
add %g3, 32, %g3
cmp %g3, %g2
bleu,pt %xcc, 1b
nop
flush %g6
retry
/* These just get rescheduled to PIL vectors. */ /* These just get rescheduled to PIL vectors. */
.globl xcall_call_function .globl xcall_call_function
xcall_call_function: xcall_call_function:
......
...@@ -33,18 +33,6 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page); ...@@ -33,18 +33,6 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
extern void __flush_dcache_range(unsigned long start, unsigned long end); extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
#endif /* ! CONFIG_SMP */
#define flush_icache_page(vma, pg) do { } while(0) #define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
...@@ -55,7 +43,7 @@ extern void smp_flush_cache_all(void); ...@@ -55,7 +43,7 @@ extern void smp_flush_cache_all(void);
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) do { } while (0)
#endif /* _SPARC64_CACHEFLUSH_H */ #endif /* _SPARC64_CACHEFLUSH_H */
...@@ -70,7 +70,6 @@ extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start, ...@@ -70,7 +70,6 @@ extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page); extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all() #define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm) #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \ #define flush_tlb_range(vma, start, end) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment