Commit 851b3f32 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc fixes from David Miller:

 1) Fix race in sparc64 TLB shootdowns, we have to synchronize with the
    sibling cpus completing if we are passing them a reference via
    pointer to a data structure.

 2) Fix cleaning of bitmaps in sparc32, from Akinobu Mita.

 3) Fix various sparc header mistakes, some of which resulted in
    userland build breakage.  From Sam Ravnborg.

 4) Kill ghost declarations and defines missed when several bits of code
    got deleted recently.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Fix race in TLB batch processing.
  sparc: use asm-generic version of types.h
  bbc_i2c: fix section mismatch warning
  sparc: use generic headers
  sparc:cleanup unused code in smp_32.h
  sparc/iommu: fix typo s/265KB/256KB/
  sparc/srmmu: clear trailing edge of bitmap properly
  sparc:remove unused declaration smp_boot_cpus()
parents c437d888 f36391d2
...@@ -2,11 +2,16 @@ ...@@ -2,11 +2,16 @@
generic-y += clkdev.h generic-y += clkdev.h
generic-y += cputime.h
generic-y += div64.h generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h generic-y += exec.h
generic-y += local64.h generic-y += local64.h
generic-y += mutex.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += local.h generic-y += local.h
generic-y += module.h generic-y += module.h
generic-y += serial.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h generic-y += word-at-a-time.h
#ifndef __SPARC_CPUTIME_H
#define __SPARC_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SPARC_CPUTIME_H */
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#include <asm-generic/mutex-dec.h>
...@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, ...@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
} }
#include <asm/tlbflush.h>
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/* We provide our own get_unmapped_area to cope with VA holes and /* We provide our own get_unmapped_area to cope with VA holes and
......
#ifndef __SPARC_SERIAL_H
#define __SPARC_SERIAL_H
#define BASE_BAUD ( 1843200 / 16 )
#endif /* __SPARC_SERIAL_H */
...@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, ...@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long); unsigned long, unsigned long);
void cpu_panic(void); void cpu_panic(void);
extern void smp4m_irq_rotate(int cpu);
/* /*
* General functions that each host system must provide. * General functions that each host system must provide.
...@@ -46,7 +45,6 @@ void sun4m_init_smp(void); ...@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
void sun4d_init_smp(void); void sun4d_init_smp(void);
void smp_callin(void); void smp_callin(void);
void smp_boot_cpus(void);
void smp_store_cpu_info(int); void smp_store_cpu_info(int);
void smp_resched_interrupt(void); void smp_resched_interrupt(void);
...@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void); ...@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
void smp_setup_cpu_possible_map(void); void smp_setup_cpu_possible_map(void);
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
......
...@@ -18,8 +18,7 @@ do { \ ...@@ -18,8 +18,7 @@ do { \
* and 2 stores in this critical code path. -DaveM * and 2 stores in this critical code path. -DaveM
*/ */
#define switch_to(prev, next, last) \ #define switch_to(prev, next, last) \
do { flush_tlb_pending(); \ do { save_and_clear_fpu(); \
save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \ /* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \ /* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \ __asm__ __volatile__("wr %%g0, %0, %%asi" \
......
...@@ -11,24 +11,40 @@ ...@@ -11,24 +11,40 @@
struct tlb_batch { struct tlb_batch {
struct mm_struct *mm; struct mm_struct *mm;
unsigned long tlb_nr; unsigned long tlb_nr;
unsigned long active;
unsigned long vaddrs[TLB_BATCH_NR]; unsigned long vaddrs[TLB_BATCH_NR];
}; };
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tsb_user(struct tlb_batch *tb); extern void flush_tsb_user(struct tlb_batch *tb);
extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
/* TLB flush operations. */ /* TLB flush operations. */
extern void flush_tlb_pending(void); static inline void flush_tlb_mm(struct mm_struct *mm)
{
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define flush_tlb_range(vma,start,end) \ extern void flush_tlb_pending(void);
do { (void)(start); flush_tlb_pending(); } while (0) extern void arch_enter_lazy_mmu_mode(void);
#define flush_tlb_page(vma,addr) flush_tlb_pending() extern void arch_leave_lazy_mmu_mode(void);
#define flush_tlb_mm(mm) flush_tlb_pending() #define arch_flush_lazy_mmu_mode() do {} while (0)
/* Local cpu only. */ /* Local cpu only. */
extern void __flush_tlb_all(void); extern void __flush_tlb_all(void);
extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ ...@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
__flush_tlb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \
} while (0) } while (0)
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
}
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
#define flush_tlb_kernel_range(start, end) \ #define flush_tlb_kernel_range(start, end) \
do { flush_tsb_kernel_range(start,end); \ do { flush_tsb_kernel_range(start,end); \
smp_flush_tlb_kernel_range(start, end); \ smp_flush_tlb_kernel_range(start, end); \
} while (0) } while (0)
#define global_flush_tlb_page(mm, vaddr) \
smp_flush_tlb_page(mm, vaddr)
#endif /* ! CONFIG_SMP */ #endif /* ! CONFIG_SMP */
#endif /* _SPARC64_TLBFLUSH_H */ #endif /* _SPARC64_TLBFLUSH_H */
...@@ -44,7 +44,6 @@ header-y += swab.h ...@@ -44,7 +44,6 @@ header-y += swab.h
header-y += termbits.h header-y += termbits.h
header-y += termios.h header-y += termios.h
header-y += traps.h header-y += traps.h
header-y += types.h
header-y += uctx.h header-y += uctx.h
header-y += unistd.h header-y += unistd.h
header-y += utrap.h header-y += utrap.h
......
#ifndef _SPARC_TYPES_H
#define _SPARC_TYPES_H
/*
* This file is never included by application software unless
* explicitly requested (e.g., via linux/types.h) in which case the
* application is Linux specific so (user-) name space pollution is
* not a major issue. However, for interoperability, libraries still
* need to be careful to avoid a name clashes.
*/
#if defined(__sparc__)
#include <asm-generic/int-ll64.h>
#endif /* defined(__sparc__) */
#endif /* defined(_SPARC_TYPES_H) */
...@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm) ...@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
} }
extern unsigned long xcall_flush_tlb_mm; extern unsigned long xcall_flush_tlb_mm;
extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_fetch_glob_regs;
extern unsigned long xcall_fetch_glob_pmu; extern unsigned long xcall_fetch_glob_pmu;
...@@ -1074,23 +1074,56 @@ void smp_flush_tlb_mm(struct mm_struct *mm) ...@@ -1074,23 +1074,56 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
put_cpu(); put_cpu();
} }
struct tlb_pending_info {
unsigned long ctx;
unsigned long nr;
unsigned long *vaddrs;
};
static void tlb_pending_func(void *info)
{
struct tlb_pending_info *t = info;
__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
}
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
{ {
u32 ctx = CTX_HWBITS(mm->context); u32 ctx = CTX_HWBITS(mm->context);
struct tlb_pending_info info;
int cpu = get_cpu(); int cpu = get_cpu();
info.ctx = ctx;
info.nr = nr;
info.vaddrs = vaddrs;
if (mm == current->mm && atomic_read(&mm->mm_users) == 1) if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else else
smp_cross_call_masked(&xcall_flush_tlb_pending, smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
ctx, nr, (unsigned long) vaddrs, &info, 1);
mm_cpumask(mm));
__flush_tlb_pending(ctx, nr, vaddrs); __flush_tlb_pending(ctx, nr, vaddrs);
put_cpu(); put_cpu();
} }
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long context = CTX_HWBITS(mm->context);
int cpu = get_cpu();
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_cross_call_masked(&xcall_flush_tlb_page,
context, vaddr, 0,
mm_cpumask(mm));
__flush_tlb_page(context, vaddr);
put_cpu();
}
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
start &= PAGE_MASK; start &= PAGE_MASK;
......
...@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len) ...@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
void bit_map_init(struct bit_map *t, unsigned long *map, int size) void bit_map_init(struct bit_map *t, unsigned long *map, int size)
{ {
bitmap_zero(map, size);
if ((size & 07) != 0)
BUG();
memset(map, 0, size>>3);
memset(t, 0, sizeof *t); memset(t, 0, sizeof *t);
spin_lock_init(&t->lock); spin_lock_init(&t->lock);
t->map = map; t->map = map;
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#define IOMMU_RNGE IOMMU_RNGE_256MB #define IOMMU_RNGE IOMMU_RNGE_256MB
#define IOMMU_START 0xF0000000 #define IOMMU_START 0xF0000000
#define IOMMU_WINSIZE (256*1024*1024U) #define IOMMU_WINSIZE (256*1024*1024U)
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */ #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
/* srmmu.c */ /* srmmu.c */
......
...@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void) ...@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
SRMMU_NOCACHE_ALIGN_MAX, 0UL); SRMMU_NOCACHE_ALIGN_MAX, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size); memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); srmmu_nocache_bitmap =
__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
......
...@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); ...@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
void flush_tlb_pending(void) void flush_tlb_pending(void)
{ {
struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct tlb_batch *tb = &get_cpu_var(tlb_batch);
struct mm_struct *mm = tb->mm;
if (tb->tlb_nr) { if (!tb->tlb_nr)
flush_tsb_user(tb); goto out;
if (CTX_VALID(tb->mm->context)) { flush_tsb_user(tb);
if (CTX_VALID(mm->context)) {
if (tb->tlb_nr == 1) {
global_flush_tlb_page(mm, tb->vaddrs[0]);
} else {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_flush_tlb_pending(tb->mm, tb->tlb_nr, smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
&tb->vaddrs[0]); &tb->vaddrs[0]);
...@@ -37,12 +43,30 @@ void flush_tlb_pending(void) ...@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
tb->tlb_nr, &tb->vaddrs[0]); tb->tlb_nr, &tb->vaddrs[0]);
#endif #endif
} }
tb->tlb_nr = 0;
} }
tb->tlb_nr = 0;
out:
put_cpu_var(tlb_batch); put_cpu_var(tlb_batch);
} }
void arch_enter_lazy_mmu_mode(void)
{
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
tb->active = 1;
}
void arch_leave_lazy_mmu_mode(void)
{
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
if (tb->tlb_nr)
flush_tlb_pending();
tb->active = 0;
}
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec) bool exec)
{ {
...@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, ...@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
nr = 0; nr = 0;
} }
if (!tb->active) {
global_flush_tlb_page(mm, vaddr);
flush_tsb_user_page(mm, vaddr);
return;
}
if (nr == 0) if (nr == 0)
tb->mm = mm; tb->mm = mm;
......
...@@ -7,11 +7,10 @@ ...@@ -7,11 +7,10 @@
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/tsb.h> #include <asm/tsb.h>
#include <asm/tlb.h>
#include <asm/oplib.h> #include <asm/oplib.h>
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
...@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) ...@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
} }
} }
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
unsigned long tsb, unsigned long nentries) unsigned long hash_shift,
unsigned long nentries)
{ {
unsigned long i; unsigned long tag, ent, hash;
for (i = 0; i < tb->tlb_nr; i++) { v &= ~0x1UL;
unsigned long v = tb->vaddrs[i]; hash = tsb_hash(v, hash_shift, nentries);
unsigned long tag, ent, hash; ent = tsb + (hash * sizeof(struct tsb));
tag = (v >> 22UL);
v &= ~0x1UL; tsb_flush(ent, tag);
}
hash = tsb_hash(v, hash_shift, nentries); static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
ent = tsb + (hash * sizeof(struct tsb)); unsigned long tsb, unsigned long nentries)
tag = (v >> 22UL); {
unsigned long i;
tsb_flush(ent, tag); for (i = 0; i < tb->tlb_nr; i++)
} __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
} }
void flush_tsb_user(struct tlb_batch *tb) void flush_tsb_user(struct tlb_batch *tb)
...@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) ...@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
} }
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
......
...@@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */ ...@@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */
nop nop
nop nop
.align 32
.globl __flush_tlb_page
__flush_tlb_page: /* 22 insns */
/* %o0 = context, %o1 = vaddr */
rdpr %pstate, %g7
andn %g7, PSTATE_IE, %g2
wrpr %g2, %pstate
mov SECONDARY_CONTEXT, %o4
ldxa [%o4] ASI_DMMU, %g2
stxa %o0, [%o4] ASI_DMMU
andcc %o1, 1, %g0
andn %o1, 1, %o3
be,pn %icc, 1f
or %o3, 0x10, %o3
stxa %g0, [%o3] ASI_IMMU_DEMAP
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
membar #Sync
stxa %g2, [%o4] ASI_DMMU
sethi %hi(KERNBASE), %o4
flush %o4
retl
wrpr %g7, 0x0, %pstate
nop
nop
nop
nop
.align 32 .align 32
.globl __flush_tlb_pending .globl __flush_tlb_pending
__flush_tlb_pending: /* 26 insns */ __flush_tlb_pending: /* 26 insns */
...@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ ...@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
retl retl
wrpr %g7, 0x0, %pstate wrpr %g7, 0x0, %pstate
__cheetah_flush_tlb_page: /* 22 insns */
/* %o0 = context, %o1 = vaddr */
rdpr %pstate, %g7
andn %g7, PSTATE_IE, %g2
wrpr %g2, 0x0, %pstate
wrpr %g0, 1, %tl
mov PRIMARY_CONTEXT, %o4
ldxa [%o4] ASI_DMMU, %g2
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
or %o0, %o3, %o0 /* Preserve nucleus page size fields */
stxa %o0, [%o4] ASI_DMMU
andcc %o1, 1, %g0
be,pn %icc, 1f
andn %o1, 1, %o3
stxa %g0, [%o3] ASI_IMMU_DEMAP
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
membar #Sync
stxa %g2, [%o4] ASI_DMMU
sethi %hi(KERNBASE), %o4
flush %o4
wrpr %g0, 0, %tl
retl
wrpr %g7, 0x0, %pstate
__cheetah_flush_tlb_pending: /* 27 insns */ __cheetah_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7 rdpr %pstate, %g7
...@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ ...@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
retl retl
nop nop
__hypervisor_flush_tlb_page: /* 11 insns */
/* %o0 = context, %o1 = vaddr */
mov %o0, %g2
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
mov %g2, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
srlx %o0, PAGE_SHIFT, %o0
sllx %o0, PAGE_SHIFT, %o0
ta HV_MMU_UNMAP_ADDR_TRAP
brnz,pn %o0, __hypervisor_tlb_tl0_error
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
retl
nop
__hypervisor_flush_tlb_pending: /* 16 insns */ __hypervisor_flush_tlb_pending: /* 16 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1 sllx %o1, 3, %g1
...@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: ...@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
call tlb_patch_one call tlb_patch_one
mov 19, %o2 mov 19, %o2
sethi %hi(__flush_tlb_page), %o0
or %o0, %lo(__flush_tlb_page), %o0
sethi %hi(__cheetah_flush_tlb_page), %o1
or %o1, %lo(__cheetah_flush_tlb_page), %o1
call tlb_patch_one
mov 22, %o2
sethi %hi(__flush_tlb_pending), %o0 sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1 sethi %hi(__cheetah_flush_tlb_pending), %o1
...@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ ...@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
nop nop
nop nop
.globl xcall_flush_tlb_pending .globl xcall_flush_tlb_page
xcall_flush_tlb_pending: /* 21 insns */ xcall_flush_tlb_page: /* 17 insns */
/* %g5=context, %g1=nr, %g7=vaddrs[] */ /* %g5=context, %g1=vaddr */
sllx %g1, 3, %g1
mov PRIMARY_CONTEXT, %g4 mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2 ldxa [%g4] ASI_DMMU, %g2
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
...@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ ...@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
or %g5, %g4, %g5 or %g5, %g4, %g5
mov PRIMARY_CONTEXT, %g4 mov PRIMARY_CONTEXT, %g4
stxa %g5, [%g4] ASI_DMMU stxa %g5, [%g4] ASI_DMMU
1: sub %g1, (1 << 3), %g1 andcc %g1, 0x1, %g0
ldx [%g7 + %g1], %g5
andcc %g5, 0x1, %g0
be,pn %icc, 2f be,pn %icc, 2f
andn %g1, 0x1, %g5
andn %g5, 0x1, %g5
stxa %g0, [%g5] ASI_IMMU_DEMAP stxa %g0, [%g5] ASI_IMMU_DEMAP
2: stxa %g0, [%g5] ASI_DMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
membar #Sync membar #Sync
brnz,pt %g1, 1b
nop
stxa %g2, [%g4] ASI_DMMU stxa %g2, [%g4] ASI_DMMU
retry retry
nop nop
nop
.globl xcall_flush_tlb_kernel_range .globl xcall_flush_tlb_kernel_range
xcall_flush_tlb_kernel_range: /* 25 insns */ xcall_flush_tlb_kernel_range: /* 25 insns */
...@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ ...@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
membar #Sync membar #Sync
retry retry
.globl __hypervisor_xcall_flush_tlb_pending .globl __hypervisor_xcall_flush_tlb_page
__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ /* %g5=ctx, %g1=vaddr */
sllx %g1, 3, %g1
mov %o0, %g2 mov %o0, %g2
mov %o1, %g3 mov %o1, %g3
mov %o2, %g4 mov %o2, %g4
1: sub %g1, (1 << 3), %g1 mov %g1, %o0 /* ARG0: virtual address */
ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
mov %g5, %o1 /* ARG1: mmu context */ mov %g5, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */ mov HV_MMU_ALL, %o2 /* ARG2: flags */
srlx %o0, PAGE_SHIFT, %o0 srlx %o0, PAGE_SHIFT, %o0
...@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ ...@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
mov HV_MMU_UNMAP_ADDR_TRAP, %g6 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
brnz,a,pn %o0, __hypervisor_tlb_xcall_error brnz,a,pn %o0, __hypervisor_tlb_xcall_error
mov %o0, %g5 mov %o0, %g5
brnz,pt %g1, 1b
nop
mov %g2, %o0 mov %g2, %o0
mov %g3, %o1 mov %g3, %o1
mov %g4, %o2 mov %g4, %o2
...@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops: ...@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one call tlb_patch_one
mov 10, %o2 mov 10, %o2
sethi %hi(__flush_tlb_page), %o0
or %o0, %lo(__flush_tlb_page), %o0
sethi %hi(__hypervisor_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
call tlb_patch_one
mov 11, %o2
sethi %hi(__flush_tlb_pending), %o0 sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1 sethi %hi(__hypervisor_flush_tlb_pending), %o1
...@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops: ...@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one call tlb_patch_one
mov 21, %o2 mov 21, %o2
sethi %hi(xcall_flush_tlb_pending), %o0 sethi %hi(xcall_flush_tlb_page), %o0
or %o0, %lo(xcall_flush_tlb_pending), %o0 or %o0, %lo(xcall_flush_tlb_page), %o0
sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
call tlb_patch_one call tlb_patch_one
mov 21, %o2 mov 17, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0 sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
......
...@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id) ...@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void __init reset_one_i2c(struct bbc_i2c_bus *bp) static void reset_one_i2c(struct bbc_i2c_bus *bp)
{ {
writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0);
writeb(bp->own, bp->i2c_control_regs + 0x1); writeb(bp->own, bp->i2c_control_regs + 0x1);
...@@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp) ...@@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0);
} }
static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index)
{ {
struct bbc_i2c_bus *bp; struct bbc_i2c_bus *bp;
struct device_node *dp; struct device_node *dp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment