Commit 2cfd716d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull more powerpc updates from Michael Ellerman:
 "These were delayed for various reasons, so I let them sit in next a
  bit longer, rather than including them in my first pull request.

  Fixes:
   - Fix early access to cpu_spec relocation from Benjamin Herrenschmidt
   - Fix incorrect event codes in power9-event-list from Madhavan Srinivasan
   - Move register_process_table() out of ppc_md from Michael Ellerman

  Use jump_label use for [cpu|mmu]_has_feature():
   - Add mmu_early_init_devtree() from Michael Ellerman
   - Move disable_radix handling into mmu_early_init_devtree() from Michael Ellerman
   - Do hash device tree scanning earlier from Michael Ellerman
   - Do radix device tree scanning earlier from Michael Ellerman
   - Do feature patching before MMU init from Michael Ellerman
   - Check features don't change after patching from Michael Ellerman
   - Make MMU_FTR_RADIX a MMU family feature from Aneesh Kumar K.V
   - Convert mmu_has_feature() to returning bool from Michael Ellerman
   - Convert cpu_has_feature() to returning bool from Michael Ellerman
   - Define radix_enabled() in one place & use static inline from Michael Ellerman
   - Add early_[cpu|mmu]_has_feature() from Michael Ellerman
   - Convert early cpu/mmu feature check to use the new helpers from Aneesh Kumar K.V
   - jump_label: Make it possible for arches to invoke jump_label_init() earlier from Kevin Hao
   - Call jump_label_init() in apply_feature_fixups() from Aneesh Kumar K.V
   - Remove mfvtb() from Kevin Hao
   - Move cpu_has_feature() to a separate file from Kevin Hao
   - Add kconfig option to use jump labels for cpu/mmu_has_feature() from Michael Ellerman
   - Add option to use jump label for cpu_has_feature() from Kevin Hao
   - Add option to use jump label for mmu_has_feature() from Kevin Hao
   - Catch usage of cpu/mmu_has_feature() before jump label init from Aneesh Kumar K.V
   - Annotate jump label assembly from Michael Ellerman

  TLB flush enhancements from Aneesh Kumar K.V:
   - radix: Implement tlb mmu gather flush efficiently
   - Add helper for finding SLBE LLP encoding
   - Use hugetlb flush functions
   - Drop multiple definition of mm_is_core_local
   - radix: Add tlb flush of THP ptes
   - radix: Rename function and drop unused arg
   - radix/hugetlb: Add helper for finding page size
   - hugetlb: Add flush_hugetlb_tlb_range
   - remove flush_tlb_page_nohash

  Add new ptrace regsets from Anshuman Khandual and Simon Guo:
   - elf: Add powerpc specific core note sections
   - Add the function flush_tmregs_to_thread
   - Enable in transaction NT_PRFPREG ptrace requests
   - Enable in transaction NT_PPC_VMX ptrace requests
   - Enable in transaction NT_PPC_VSX ptrace requests
   - Adapt gpr32_get, gpr32_set functions for transaction
   - Enable support for NT_PPC_CGPR
   - Enable support for NT_PPC_CFPR
   - Enable support for NT_PPC_CVMX
   - Enable support for NT_PPC_CVSX
   - Enable support for TM SPR state
   - Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR
   - Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
   - Enable support for EBB registers
   - Enable support for Performance Monitor registers"

* tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (48 commits)
  powerpc/mm: Move register_process_table() out of ppc_md
  powerpc/perf: Fix incorrect event codes in power9-event-list
  powerpc/32: Fix early access to cpu_spec relocation
  powerpc/ptrace: Enable support for Performance Monitor registers
  powerpc/ptrace: Enable support for EBB registers
  powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
  powerpc/ptrace: Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR
  powerpc/ptrace: Enable support for TM SPR state
  powerpc/ptrace: Enable support for NT_PPC_CVSX
  powerpc/ptrace: Enable support for NT_PPC_CVMX
  powerpc/ptrace: Enable support for NT_PPC_CFPR
  powerpc/ptrace: Enable support for NT_PPC_CGPR
  powerpc/ptrace: Adapt gpr32_get, gpr32_set functions for transaction
  powerpc/ptrace: Enable in transaction NT_PPC_VSX ptrace requests
  powerpc/ptrace: Enable in transaction NT_PPC_VMX ptrace requests
  powerpc/ptrace: Enable in transaction NT_PRFPREG ptrace requests
  powerpc/process: Add the function flush_tmregs_to_thread
  elf: Add powerpc specific core note sections
  powerpc/mm: remove flush_tlb_page_nohash
  powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range
  ...
parents 755b20f4 eea8148c
...@@ -60,6 +60,25 @@ config CODE_PATCHING_SELFTEST ...@@ -60,6 +60,25 @@ config CODE_PATCHING_SELFTEST
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
default n default n
config JUMP_LABEL_FEATURE_CHECKS
bool "Enable use of jump label for cpu/mmu_has_feature()"
depends on JUMP_LABEL
default y
help
Selecting this options enables use of jump labels for some internal
feature checks. This should generate more optimal code for those
checks.
config JUMP_LABEL_FEATURE_CHECK_DEBUG
bool "Do extra check on feature fixup calls"
depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS
default n
help
This tries to catch incorrect usage of cpu_has_feature() and
mmu_has_feature() in the code.
If you don't know what this means, say N.
config FTR_FIXUP_SELFTEST config FTR_FIXUP_SELFTEST
bool "Run self-tests of the feature-fixup code" bool "Run self-tests of the feature-fixup code"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
......
...@@ -11,4 +11,19 @@ extern unsigned long ...@@ -11,4 +11,19 @@ extern unsigned long
radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long len, unsigned long pgoff,
unsigned long flags); unsigned long flags);
static inline int hstate_get_psize(struct hstate *hstate)
{
unsigned long shift;
shift = huge_page_shift(hstate);
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
return MMU_PAGE_2M;
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
return MMU_PAGE_1G;
else {
WARN(1, "Wrong huge page shift\n");
return mmu_virtual_psize;
}
}
#endif #endif
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/book3s/64/pgtable.h> #include <asm/book3s/64/pgtable.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpu_has_feature.h>
/* /*
* SLB * SLB
...@@ -190,6 +191,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) ...@@ -190,6 +191,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG(); BUG();
} }
static inline unsigned long get_sllp_encoding(int psize)
{
unsigned long sllp;
sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
return sllp;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* /*
......
...@@ -23,13 +23,6 @@ struct mmu_psize_def { ...@@ -23,13 +23,6 @@ struct mmu_psize_def {
}; };
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
#ifdef CONFIG_PPC_RADIX_MMU
#define radix_enabled() mmu_has_feature(MMU_FTR_RADIX)
#else
#define radix_enabled() (0)
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* 64-bit classic hash table MMU */ /* 64-bit classic hash table MMU */
...@@ -107,6 +100,9 @@ extern int mmu_vmemmap_psize; ...@@ -107,6 +100,9 @@ extern int mmu_vmemmap_psize;
extern int mmu_io_psize; extern int mmu_io_psize;
/* MMU initialization */ /* MMU initialization */
void mmu_early_init_devtree(void);
void hash__early_init_devtree(void);
void radix__early_init_devtree(void);
extern void radix_init_native(void); extern void radix_init_native(void);
extern void hash__early_init_mmu(void); extern void hash__early_init_mmu(void);
extern void radix__early_init_mmu(void); extern void radix__early_init_mmu(void);
...@@ -132,11 +128,15 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, ...@@ -132,11 +128,15 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
{ {
if (radix_enabled()) if (early_radix_enabled())
return radix__setup_initial_memory_limit(first_memblock_base, return radix__setup_initial_memory_limit(first_memblock_base,
first_memblock_size); first_memblock_size);
return hash__setup_initial_memory_limit(first_memblock_base, return hash__setup_initial_memory_limit(first_memblock_base,
first_memblock_size); first_memblock_size);
} }
extern int (*register_process_table)(unsigned long base, unsigned long page_size,
unsigned long tbl_size);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */ #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
...@@ -75,11 +75,6 @@ static inline void hash__flush_tlb_page(struct vm_area_struct *vma, ...@@ -75,11 +75,6 @@ static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
{ {
} }
static inline void hash__flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr)
{
}
static inline void hash__flush_tlb_range(struct vm_area_struct *vma, static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -10,26 +10,32 @@ static inline int mmu_get_ap(int psize) ...@@ -10,26 +10,32 @@ static inline int mmu_get_ap(int psize)
return mmu_psize_defs[psize].ap; return mmu_psize_defs[psize].ap;
} }
extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize);
extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void radix__local_flush_tlb_mm(struct mm_struct *mm); extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize);
extern void radix__tlb_flush(struct mmu_gather *tlb); extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm); extern void radix__flush_tlb_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize);
#else #else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) #define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr) #define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
#endif #endif
extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
......
...@@ -7,6 +7,25 @@ ...@@ -7,6 +7,25 @@
#include <asm/book3s/64/tlbflush-hash.h> #include <asm/book3s/64/tlbflush-hash.h>
#include <asm/book3s/64/tlbflush-radix.h> #include <asm/book3s/64/tlbflush-radix.h>
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (radix_enabled())
return radix__flush_pmd_tlb_range(vma, start, end);
return hash__flush_tlb_range(vma, start, end);
}
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
if (radix_enabled())
return radix__flush_hugetlb_tlb_range(vma, start, end);
return hash__flush_tlb_range(vma, start, end);
}
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -38,14 +57,6 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, ...@@ -38,14 +57,6 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
return hash__local_flush_tlb_page(vma, vmaddr); return hash__local_flush_tlb_page(vma, vmaddr);
} }
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr)
{
if (radix_enabled())
return radix__flush_tlb_page(vma, vmaddr);
return hash__flush_tlb_page_nohash(vma, vmaddr);
}
static inline void tlb_flush(struct mmu_gather *tlb) static inline void tlb_flush(struct mmu_gather *tlb)
{ {
if (radix_enabled()) if (radix_enabled())
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
/* /*
* No cache flushing is required when address mappings are changed, * No cache flushing is required when address mappings are changed,
......
#ifndef __ASM_POWERPC_CPUFEATURES_H
#define __ASM_POWERPC_CPUFEATURES_H
#ifndef __ASSEMBLY__
#include <linux/bug.h>
#include <asm/cputable.h>
static inline bool early_cpu_has_feature(unsigned long feature)
{
return !!((CPU_FTRS_ALWAYS & feature) ||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
}
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
#include <linux/jump_label.h>
#define NUM_CPU_FTR_KEYS 64
extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
static __always_inline bool cpu_has_feature(unsigned long feature)
{
int i;
BUILD_BUG_ON(!__builtin_constant_p(feature));
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) {
printk("Warning! cpu_has_feature() used prior to jump label init!\n");
dump_stack();
return early_cpu_has_feature(feature);
}
#endif
if (CPU_FTRS_ALWAYS & feature)
return true;
if (!(CPU_FTRS_POSSIBLE & feature))
return false;
i = __builtin_ctzl(feature);
return static_branch_likely(&cpu_feature_keys[i]);
}
#else
static inline bool cpu_has_feature(unsigned long feature)
{
return early_cpu_has_feature(feature);
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_POWERPC_CPUFEATURE_H */
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __ASM_POWERPC_CPUTABLE_H #define __ASM_POWERPC_CPUTABLE_H
#include <linux/types.h>
#include <asm/asm-compat.h> #include <asm/asm-compat.h>
#include <asm/feature-fixups.h> #include <asm/feature-fixups.h>
#include <uapi/asm/cputable.h> #include <uapi/asm/cputable.h>
...@@ -122,6 +123,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, ...@@ -122,6 +123,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
extern const char *powerpc_base_platform; extern const char *powerpc_base_platform;
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
extern void cpu_feature_keys_init(void);
#else
static inline void cpu_feature_keys_init(void) { }
#endif
/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */ /* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
enum { enum {
TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */ TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
...@@ -576,14 +583,6 @@ enum { ...@@ -576,14 +583,6 @@ enum {
}; };
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
static inline int cpu_has_feature(unsigned long feature)
{
return (CPU_FTRS_ALWAYS & feature) ||
(CPU_FTRS_POSSIBLE
& cur_cpu_spec->cpu_features
& feature);
}
#define HBP_NUM 1 #define HBP_NUM 1
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -28,6 +28,7 @@ static inline void setup_cputime_one_jiffy(void) { } ...@@ -28,6 +28,7 @@ static inline void setup_cputime_one_jiffy(void) { }
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/param.h> #include <asm/param.h>
#include <asm/cpu_has_feature.h>
typedef u64 __nocast cputime_t; typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t; typedef u64 __nocast cputime64_t;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#include <asm/cpu_has_feature.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000) #define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
typedef struct { typedef struct {
unsigned int base; unsigned int base;
......
...@@ -147,7 +147,7 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, ...@@ -147,7 +147,7 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
{ {
pte_t pte; pte_t pte;
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_tlb_page(vma, addr); flush_hugetlb_page(vma, addr);
} }
static inline int huge_pte_none(pte_t pte) static inline int huge_pte_none(pte_t pte)
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch) static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{ {
asm_volatile_goto("1:\n\t" asm_volatile_goto("1:\n\t"
"nop\n\t" "nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t" ".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t" ".popsection \n\t"
...@@ -36,7 +36,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran ...@@ -36,7 +36,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{ {
asm_volatile_goto("1:\n\t" asm_volatile_goto("1:\n\t"
"b %l[l_yes]\n\t" "b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t" ".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t" ".popsection \n\t"
......
...@@ -181,8 +181,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, ...@@ -181,8 +181,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
switch (b_psize) { switch (b_psize) {
case MMU_PAGE_4K: case MMU_PAGE_4K:
sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) | sllp = get_sllp_encoding(a_psize);
((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
rb |= sllp << 5; /* AP field */ rb |= sllp << 5; /* AP field */
rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
break; break;
......
...@@ -219,8 +219,6 @@ struct machdep_calls { ...@@ -219,8 +219,6 @@ struct machdep_calls {
#ifdef CONFIG_ARCH_RANDOM #ifdef CONFIG_ARCH_RANDOM
int (*get_random_seed)(unsigned long *v); int (*get_random_seed)(unsigned long *v);
#endif #endif
int (*register_process_table)(unsigned long base, unsigned long page_size,
unsigned long tbl_size);
}; };
extern void e500_idle(void); extern void e500_idle(void);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/cputable.h> #include <asm/cputable.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cpu_has_feature.h>
/* /*
* This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
*/ */
/* /*
* First half is MMU families * MMU families
*/ */
#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) #define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) #define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
...@@ -21,9 +21,13 @@ ...@@ -21,9 +21,13 @@
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020) #define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
/* Radix page table supported and enabled */
#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040)
/* /*
* This is individual features * Individual features below.
*/ */
/* /*
* We need to clear top 16bits of va (from the remaining 64 bits )in * We need to clear top 16bits of va (from the remaining 64 bits )in
* tlbie* instructions * tlbie* instructions
...@@ -93,11 +97,6 @@ ...@@ -93,11 +97,6 @@
*/ */
#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
/*
* Radix page table available
*/
#define MMU_FTR_RADIX ASM_CONST(0x80000000)
/* MMU feature bit sets for various CPUs */ /* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
...@@ -113,6 +112,7 @@ ...@@ -113,6 +112,7 @@
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/bug.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
...@@ -131,20 +131,71 @@ enum { ...@@ -131,20 +131,71 @@ enum {
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA | MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
#ifdef CONFIG_PPC_RADIX_MMU #ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_RADIX | MMU_FTR_TYPE_RADIX |
#endif #endif
0, 0,
}; };
static inline int mmu_has_feature(unsigned long feature) static inline bool early_mmu_has_feature(unsigned long feature)
{ {
return (MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
}
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
#include <linux/jump_label.h>
#define NUM_MMU_FTR_KEYS 32
extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
extern void mmu_feature_keys_init(void);
static __always_inline bool mmu_has_feature(unsigned long feature)
{
int i;
BUILD_BUG_ON(!__builtin_constant_p(feature));
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) {
printk("Warning! mmu_has_feature() used prior to jump label init!\n");
dump_stack();
return early_mmu_has_feature(feature);
}
#endif
if (!(MMU_FTRS_POSSIBLE & feature))
return false;
i = __builtin_ctzl(feature);
return static_branch_likely(&mmu_feature_keys[i]);
} }
static inline void mmu_clear_feature(unsigned long feature) static inline void mmu_clear_feature(unsigned long feature)
{ {
int i;
i = __builtin_ctzl(feature);
cur_cpu_spec->mmu_features &= ~feature; cur_cpu_spec->mmu_features &= ~feature;
static_branch_disable(&mmu_feature_keys[i]);
} }
#else
static inline void mmu_feature_keys_init(void)
{
}
static inline bool mmu_has_feature(unsigned long feature)
{
return early_mmu_has_feature(feature);
}
static inline void mmu_clear_feature(unsigned long feature)
{
cur_cpu_spec->mmu_features &= ~feature;
}
#endif /* CONFIG_JUMP_LABEL */
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
...@@ -164,6 +215,28 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) ...@@ -164,6 +215,28 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
} }
#endif /* !CONFIG_DEBUG_VM */ #endif /* !CONFIG_DEBUG_VM */
#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
static inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
#else
static inline bool radix_enabled(void)
{
return false;
}
static inline bool early_radix_enabled(void)
{
return false;
}
#endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array. /* The kernel use the constants below to index in the page sizes array.
...@@ -210,6 +283,7 @@ extern void early_init_mmu(void); ...@@ -210,6 +283,7 @@ extern void early_init_mmu(void);
extern void early_init_mmu_secondary(void); extern void early_init_mmu_secondary(void);
extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size); phys_addr_t first_memblock_size);
static inline void mmu_early_init_devtree(void) { }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -230,9 +304,5 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, ...@@ -230,9 +304,5 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
# include <asm/mmu-8xx.h> # include <asm/mmu-8xx.h>
#endif #endif
#ifndef radix_enabled
#define radix_enabled() (0)
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */ #endif /* _ASM_POWERPC_MMU_H_ */
...@@ -1256,15 +1256,6 @@ static inline void msr_check_and_clear(unsigned long bits) ...@@ -1256,15 +1256,6 @@ static inline void msr_check_and_clear(unsigned long bits)
__msr_check_and_clear(bits); __msr_check_and_clear(bits);
} }
static inline unsigned long mfvtb (void)
{
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_ARCH_207S))
return mfspr(SPRN_VTB);
#endif
return 0;
}
#ifdef __powerpc64__ #ifdef __powerpc64__
#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \ #define mftb() ({unsigned long rval; \
......
...@@ -75,6 +75,14 @@ static inline void disable_kernel_spe(void) ...@@ -75,6 +75,14 @@ static inline void disable_kernel_spe(void)
static inline void __giveup_spe(struct task_struct *t) { } static inline void __giveup_spe(struct task_struct *t) { }
#endif #endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void flush_tmregs_to_thread(struct task_struct *);
#else
static inline void flush_tmregs_to_thread(struct task_struct *t)
{
}
#endif
static inline void clear_task_ebb(struct task_struct *t) static inline void clear_task_ebb(struct task_struct *t)
{ {
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpu_has_feature.h>
/* time.c */ /* time.c */
extern unsigned long tb_ticks_per_jiffy; extern unsigned long tb_ticks_per_jiffy;
...@@ -103,7 +104,7 @@ static inline u64 get_vtb(void) ...@@ -103,7 +104,7 @@ static inline u64 get_vtb(void)
{ {
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_ARCH_207S)) if (cpu_has_feature(CPU_FTR_ARCH_207S))
return mfvtb(); return mfspr(SPRN_VTB);
#endif #endif
return 0; return 0;
} }
......
...@@ -46,5 +46,18 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, ...@@ -46,5 +46,18 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
#endif #endif
} }
#ifdef CONFIG_SMP
static inline int mm_is_core_local(struct mm_struct *mm)
{
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
}
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */ #endif /* __ASM_POWERPC_TLB_H */
...@@ -54,7 +54,6 @@ extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, ...@@ -54,7 +54,6 @@ extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) #define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
#endif #endif
#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
#elif defined(CONFIG_PPC_STD_MMU_32) #elif defined(CONFIG_PPC_STD_MMU_32)
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in); unsigned long *v2_in);
......
...@@ -91,6 +91,11 @@ ...@@ -91,6 +91,11 @@
#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ #define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
#define ELF_NFPREG 33 /* includes fpscr */ #define ELF_NFPREG 33 /* includes fpscr */
#define ELF_NVMX 34 /* includes all vector registers */
#define ELF_NVSX 32 /* includes all VSX registers */
#define ELF_NTMSPRREG 3 /* include tfhar, tfiar, texasr */
#define ELF_NEBB 3 /* includes ebbrr, ebbhr, bescr */
#define ELF_NPMU 5 /* includes siar, sdar, sier, mmcr2, mmcr0 */
typedef unsigned long elf_greg_t64; typedef unsigned long elf_greg_t64;
typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG]; typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/emulated_ops.h> #include <asm/emulated_ops.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/disassemble.h> #include <asm/disassemble.h>
#include <asm/cpu_has_feature.h>
struct aligninfo { struct aligninfo {
unsigned char len; unsigned char len;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/jump_label.h>
#include <asm/oprofile_impl.h> #include <asm/oprofile_impl.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -2224,3 +2225,39 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) ...@@ -2224,3 +2225,39 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
return NULL; return NULL;
} }
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
[0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
};
EXPORT_SYMBOL_GPL(cpu_feature_keys);
void __init cpu_feature_keys_init(void)
{
int i;
for (i = 0; i < NUM_CPU_FTR_KEYS; i++) {
unsigned long f = 1ul << i;
if (!(cur_cpu_spec->cpu_features & f))
static_branch_disable(&cpu_feature_keys[i]);
}
}
struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
[0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
};
EXPORT_SYMBOL_GPL(mmu_feature_keys);
void __init mmu_feature_keys_init(void)
{
int i;
for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
unsigned long f = 1ul << i;
if (!(cur_cpu_spec->mmu_features & f))
static_branch_disable(&mmu_feature_keys[i]);
}
}
#endif
...@@ -532,7 +532,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ...@@ -532,7 +532,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION BEGIN_MMU_FTR_SECTION
b 2f b 2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
clrrdi r6,r8,28 /* get its ESID */ clrrdi r6,r8,28 /* get its ESID */
clrrdi r9,r1,28 /* get current sp ESID */ clrrdi r9,r1,28 /* get current sp ESID */
......
...@@ -940,7 +940,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -940,7 +940,7 @@ BEGIN_MMU_FTR_SECTION
b do_hash_page /* Try to handle as hpte fault */ b do_hash_page /* Try to handle as hpte fault */
MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE
b handle_page_fault b handle_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
.align 7 .align 7
.globl h_data_storage_common .globl h_data_storage_common
...@@ -971,7 +971,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -971,7 +971,7 @@ BEGIN_MMU_FTR_SECTION
b do_hash_page /* Try to handle as hpte fault */ b do_hash_page /* Try to handle as hpte fault */
MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE
b handle_page_fault b handle_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
...@@ -1392,7 +1392,7 @@ slb_miss_realmode: ...@@ -1392,7 +1392,7 @@ slb_miss_realmode:
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION BEGIN_MMU_FTR_SECTION
bl slb_allocate_realmode bl slb_allocate_realmode
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX) END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
#endif #endif
/* All done -- return from exception. */ /* All done -- return from exception. */
...@@ -1406,7 +1406,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -1406,7 +1406,7 @@ BEGIN_MMU_FTR_SECTION
beq- 2f beq- 2f
FTR_SECTION_ELSE FTR_SECTION_ELSE
b 2f b 2f
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
.machine push .machine push
.machine "power4" .machine "power4"
......
...@@ -572,7 +572,7 @@ common_exit: ...@@ -572,7 +572,7 @@ common_exit:
BEGIN_MMU_FTR_SECTION BEGIN_MMU_FTR_SECTION
b no_segments b no_segments
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
/* Restore SLB from PACA */ /* Restore SLB from PACA */
ld r8,PACA_SLBSHADOWPTR(r13) ld r8,PACA_SLBSHADOWPTR(r13)
......
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#endif #endif
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <asm/trace.h> #include <asm/trace.h>
#include <asm/cpu_has_feature.h>
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat);
......
...@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca) ...@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
* if we do a GET_PACA() before the feature fixups have been * if we do a GET_PACA() before the feature fixups have been
* applied * applied
*/ */
if (cpu_has_feature(CPU_FTR_HVMODE)) if (early_cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca); mtspr(SPRN_SPRG_HPACA, local_paca);
#endif #endif
mtspr(SPRN_SPRG_PACA, local_paca); mtspr(SPRN_SPRG_PACA, local_paca);
......
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/exec.h> #include <asm/exec.h>
#include <asm/livepatch.h> #include <asm/livepatch.h>
#include <asm/cpu_has_feature.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
...@@ -1073,6 +1074,26 @@ static inline void restore_sprs(struct thread_struct *old_thread, ...@@ -1073,6 +1074,26 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#endif #endif
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* Process self tracing is not yet supported through
* ptrace interface. Ptrace generic code should have
* prevented this from happening in the first place.
* Warn once here with the message, if some how it
* is attempted.
*/
WARN_ONCE(tsk == current,
"Not expecting ptrace on self: TM regs may be incorrect\n");
/*
* If task is not current, it should have been flushed
* already to it's thread_struct during __switch_to().
*/
}
#endif
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new) struct task_struct *new)
{ {
......
...@@ -170,7 +170,7 @@ static struct ibm_pa_feature { ...@@ -170,7 +170,7 @@ static struct ibm_pa_feature {
*/ */
{CPU_FTR_TM_COMP, 0, 0, {CPU_FTR_TM_COMP, 0, 0,
PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0}, PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
{0, MMU_FTR_RADIX, 0, 0, 40, 0, 0}, {0, MMU_FTR_TYPE_RADIX, 0, 0, 40, 0, 0},
}; };
static void __init scan_features(unsigned long node, const unsigned char *ftrs, static void __init scan_features(unsigned long node, const unsigned char *ftrs,
...@@ -647,14 +647,6 @@ static void __init early_reserve_mem(void) ...@@ -647,14 +647,6 @@ static void __init early_reserve_mem(void)
#endif #endif
} }
static bool disable_radix;
static int __init parse_disable_radix(char *p)
{
disable_radix = true;
return 0;
}
early_param("disable_radix", parse_disable_radix);
void __init early_init_devtree(void *params) void __init early_init_devtree(void *params)
{ {
phys_addr_t limit; phys_addr_t limit;
...@@ -744,11 +736,8 @@ void __init early_init_devtree(void *params) ...@@ -744,11 +736,8 @@ void __init early_init_devtree(void *params)
*/ */
spinning_secondaries = boot_cpu_count - 1; spinning_secondaries = boot_cpu_count - 1;
#endif #endif
/*
* now fixup radix MMU mode based on kernel command line mmu_early_init_devtree();
*/
if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */ /* Scan and build the list of machine check recoverable ranges */
......
...@@ -64,6 +64,10 @@ struct pt_regs_offset { ...@@ -64,6 +64,10 @@ struct pt_regs_offset {
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0} #define REG_OFFSET_END {.name = NULL, .offset = 0}
#define TVSO(f) (offsetof(struct thread_vr_state, f))
#define TFSO(f) (offsetof(struct thread_fp_state, f))
#define TSO(f) (offsetof(struct thread_struct, f))
static const struct pt_regs_offset regoffset_table[] = { static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0), GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1), GPR_OFFSET_NAME(1),
...@@ -181,6 +185,26 @@ static int set_user_msr(struct task_struct *task, unsigned long msr) ...@@ -181,6 +185,26 @@ static int set_user_msr(struct task_struct *task, unsigned long msr)
return 0; return 0;
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static unsigned long get_user_ckpt_msr(struct task_struct *task)
{
return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
}
static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
{
task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
return 0;
}
static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
{
task->thread.ckpt_regs.trap = trap & 0xfff0;
return 0;
}
#endif
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static int get_user_dscr(struct task_struct *task, unsigned long *data) static int get_user_dscr(struct task_struct *task, unsigned long *data)
{ {
...@@ -358,6 +382,29 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, ...@@ -358,6 +382,29 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
return ret; return ret;
} }
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which returns the current running values of
* all the FPR registers, needs to know whether any transaction is active
* or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*
* There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
* which determines the final code in this function. All the combinations of
* these two config options are possible except the one below as transactional
* memory config pulls in CONFIG_VSX automatically.
*
* !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
*/
static int fpr_get(struct task_struct *target, const struct user_regset *regset, static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
...@@ -368,14 +415,31 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, ...@@ -368,14 +415,31 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#endif #endif
flush_fp_to_thread(target); flush_fp_to_thread(target);
#ifdef CONFIG_VSX #if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */ /* copy to local buffer then write that out */
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_TRANS_FPR(i);
buf[32] = target->thread.transact_fp.fpscr;
} else {
for (i = 0; i < 32 ; i++) for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i); buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr; buf[32] = target->thread.fp_state.fpscr;
}
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#endif
#else #if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#endif
#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32])); offsetof(struct thread_fp_state, fpr[32]));
...@@ -384,6 +448,29 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, ...@@ -384,6 +448,29 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#endif #endif
} }
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which setss the current running values of
* all the FPR registers, needs to know whether any transaction is active
* or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*
* There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
* which determines the final code in this function. All the combinations of
* these two config options are possible except the one below as transactional
* memory config pulls in CONFIG_VSX automatically.
*
* !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
*/
static int fpr_set(struct task_struct *target, const struct user_regset *regset, static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
...@@ -394,16 +481,38 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, ...@@ -394,16 +481,38 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
#endif #endif
flush_fp_to_thread(target); flush_fp_to_thread(target);
#ifdef CONFIG_VSX #if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */ /* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i) if (i)
return i; return i;
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
for (i = 0; i < 32 ; i++)
target->thread.TS_TRANS_FPR(i) = buf[i];
target->thread.transact_fp.fpscr = buf[32];
} else {
for (i = 0; i < 32 ; i++) for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i]; target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32]; target->thread.fp_state.fpscr = buf[32];
}
return 0; return 0;
#else #endif
#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
return 0;
#endif
#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32])); offsetof(struct thread_fp_state, fpr[32]));
...@@ -433,10 +542,28 @@ static int vr_active(struct task_struct *target, ...@@ -433,10 +542,28 @@ static int vr_active(struct task_struct *target,
return target->thread.used_vr ? regset->n : 0; return target->thread.used_vr ? regset->n : 0;
} }
/*
* When the transaction is active, 'transact_vr' holds the current running
* value of all the VMX registers and 'vr_state' holds the last checkpointed
* value of all the VMX registers for the current transaction to fall back
* on in case it aborts. When transaction is not active 'vr_state' holds
* the current running state of all the VMX registers. So this function which
* gets the current running values of all the VMX registers, needs to know
* whether any transaction is active or not.
*
* Userspace interface buffer layout:
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
* };
*/
static int vr_get(struct task_struct *target, const struct user_regset *regset, static int vr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
struct thread_vr_state *addr;
int ret; int ret;
flush_altivec_to_thread(target); flush_altivec_to_thread(target);
...@@ -444,8 +571,19 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, ...@@ -444,8 +571,19 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32])); offsetof(struct thread_vr_state, vr[32]));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_fp_to_thread(target);
flush_tmregs_to_thread(target);
addr = &target->thread.transact_vr;
} else {
addr = &target->thread.vr_state;
}
#else
addr = &target->thread.vr_state;
#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.vr_state, 0, addr, 0,
33 * sizeof(vector128)); 33 * sizeof(vector128));
if (!ret) { if (!ret) {
/* /*
...@@ -456,7 +594,16 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, ...@@ -456,7 +594,16 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
u32 word; u32 word;
} vrsave; } vrsave;
memset(&vrsave, 0, sizeof(vrsave)); memset(&vrsave, 0, sizeof(vrsave));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr))
vrsave.word = target->thread.transact_vrsave;
else
vrsave.word = target->thread.vrsave;
#else
vrsave.word = target->thread.vrsave; vrsave.word = target->thread.vrsave;
#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1); 33 * sizeof(vector128), -1);
} }
...@@ -464,10 +611,28 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, ...@@ -464,10 +611,28 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
return ret; return ret;
} }
/*
* When the transaction is active, 'transact_vr' holds the current running
* value of all the VMX registers and 'vr_state' holds the last checkpointed
* value of all the VMX registers for the current transaction to fall back
* on in case it aborts. When transaction is not active 'vr_state' holds
* the current running state of all the VMX registers. So this function which
* sets the current running values of all the VMX registers, needs to know
* whether any transaction is active or not.
*
* Userspace interface buffer layout:
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
* };
*/
static int vr_set(struct task_struct *target, const struct user_regset *regset, static int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
struct thread_vr_state *addr;
int ret; int ret;
flush_altivec_to_thread(target); flush_altivec_to_thread(target);
...@@ -475,8 +640,19 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, ...@@ -475,8 +640,19 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32])); offsetof(struct thread_vr_state, vr[32]));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
flush_fp_to_thread(target);
flush_tmregs_to_thread(target);
addr = &target->thread.transact_vr;
} else {
addr = &target->thread.vr_state;
}
#else
addr = &target->thread.vr_state;
#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.vr_state, 0, addr, 0,
33 * sizeof(vector128)); 33 * sizeof(vector128));
if (!ret && count > 0) { if (!ret && count > 0) {
/* /*
...@@ -487,11 +663,28 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, ...@@ -487,11 +663,28 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
u32 word; u32 word;
} vrsave; } vrsave;
memset(&vrsave, 0, sizeof(vrsave)); memset(&vrsave, 0, sizeof(vrsave));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr))
vrsave.word = target->thread.transact_vrsave;
else
vrsave.word = target->thread.vrsave;
#else
vrsave.word = target->thread.vrsave; vrsave.word = target->thread.vrsave;
#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1); 33 * sizeof(vector128), -1);
if (!ret) if (!ret) {
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr))
target->thread.transact_vrsave = vrsave.word;
else
target->thread.vrsave = vrsave.word;
#else
target->thread.vrsave = vrsave.word; target->thread.vrsave = vrsave.word;
#endif
}
} }
return ret; return ret;
...@@ -512,6 +705,21 @@ static int vsr_active(struct task_struct *target, ...@@ -512,6 +705,21 @@ static int vsr_active(struct task_struct *target,
return target->thread.used_vsr ? regset->n : 0; return target->thread.used_vsr ? regset->n : 0;
} }
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which returns the current running values of
* all the FPR registers, needs to know whether any transaction is active
* or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 vsx[32];
* };
*/
static int vsr_get(struct task_struct *target, const struct user_regset *regset, static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
...@@ -519,16 +727,47 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset, ...@@ -519,16 +727,47 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
u64 buf[32]; u64 buf[32];
int ret, i; int ret, i;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
#endif
flush_vsx_to_thread(target); flush_vsx_to_thread(target);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.
transact_fp.fpr[i][TS_VSRLOWOFFSET];
} else {
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.
fp_state.fpr[i][TS_VSRLOWOFFSET];
}
#else
for (i = 0; i < 32 ; i++) for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double)); buf, 0, 32 * sizeof(double));
return ret; return ret;
} }
/*
* When the transaction is active, 'transact_fp' holds the current running
* value of all FPR registers and 'fp_state' holds the last checkpointed
* value of all FPR registers for the current transaction. When transaction
* is not active 'fp_state' holds the current running state of all the FPR
* registers. So this function which sets the current running values of all
* the FPR registers, needs to know whether any transaction is active or not.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 vsx[32];
* };
*/
static int vsr_set(struct task_struct *target, const struct user_regset *regset, static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
...@@ -536,12 +775,30 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, ...@@ -536,12 +775,30 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
u64 buf[32]; u64 buf[32];
int ret,i; int ret,i;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
#endif
flush_vsx_to_thread(target); flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double)); buf, 0, 32 * sizeof(double));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
for (i = 0; i < 32 ; i++)
target->thread.transact_fp.
fpr[i][TS_VSRLOWOFFSET] = buf[i];
} else {
for (i = 0; i < 32 ; i++)
target->thread.fp_state.
fpr[i][TS_VSRLOWOFFSET] = buf[i];
}
#else
for (i = 0; i < 32 ; i++) for (i = 0; i < 32 ; i++)
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
#endif
return ret; return ret;
...@@ -614,137 +871,1271 @@ static int evr_set(struct task_struct *target, const struct user_regset *regset, ...@@ -614,137 +871,1271 @@ static int evr_set(struct task_struct *target, const struct user_regset *regset,
} }
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* /**
* These are our native regset flavors. * tm_cgpr_active - get active number of registers in CGPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed GPR category.
*/ */
enum powerpc_regset { static int tm_cgpr_active(struct task_struct *target,
REGSET_GPR, const struct user_regset *regset)
REGSET_FPR, {
#ifdef CONFIG_ALTIVEC if (!cpu_has_feature(CPU_FTR_TM))
REGSET_VMX, return -ENODEV;
#endif
#ifdef CONFIG_VSX
REGSET_VSX,
#endif
#ifdef CONFIG_SPE
REGSET_SPE,
#endif
};
static const struct user_regset native_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.get = gpr_get, .set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_VSX
[REGSET_VSX] = {
.core_note_type = NT_PPC_VSX, .n = 32,
.size = sizeof(double), .align = sizeof(double),
.active = vsr_active, .get = vsr_get, .set = vsr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
};
static const struct user_regset_view user_ppc_native_view = { if (!MSR_TM_ACTIVE(target->thread.regs->msr))
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, return 0;
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#ifdef CONFIG_PPC64 return regset->n;
#include <linux/compat.h> }
static int gpr32_get(struct task_struct *target, /**
* tm_cgpr_get - get CGPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets transaction checkpointed GPR registers.
*
* When the transaction is active, 'ckpt_regs' holds all the checkpointed
* GPR register values for the current transaction to fall back on if it
* aborts in between. This function gets those checkpointed GPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* struct pt_regs ckpt_regs;
* };
*/
static int tm_cgpr_get(struct task_struct *target,
const struct user_regset *regset, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
const unsigned long *regs = &target->thread.regs->gpr[0]; int ret;
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
if (target->thread.regs == NULL) if (!cpu_has_feature(CPU_FTR_TM))
return -EIO; return -ENODEV;
if (!FULL_REGS(target->thread.regs)) { if (!MSR_TM_ACTIVE(target->thread.regs->msr))
/* We have a partial register set. Fill 14-31 with bogus values */ return -ENODATA;
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
pos /= sizeof(reg); flush_fp_to_thread(target);
count /= sizeof(reg); flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
if (kbuf) ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
for (; count > 0 && pos < PT_MSR; --count) &target->thread.ckpt_regs,
*k++ = regs[pos++]; 0, offsetof(struct pt_regs, msr));
else if (!ret) {
for (; count > 0 && pos < PT_MSR; --count) unsigned long msr = get_user_ckpt_msr(target);
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
if (count > 0 && pos == PT_MSR) { ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
reg = get_user_msr(target); offsetof(struct pt_regs, msr),
if (kbuf) offsetof(struct pt_regs, msr) +
*k++ = reg; sizeof(msr));
else if (__put_user(reg, u++))
return -EFAULT;
++pos;
--count;
} }
if (kbuf) BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
for (; count > 0 && pos < PT_REGS_COUNT; --count) offsetof(struct pt_regs, msr) + sizeof(long));
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_REGS_COUNT; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
kbuf = k; if (!ret)
ubuf = u; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
pos *= sizeof(reg); &target->thread.ckpt_regs.orig_gpr3,
count *= sizeof(reg); offsetof(struct pt_regs, orig_gpr3),
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, sizeof(struct pt_regs));
PT_REGS_COUNT * sizeof(reg), -1); if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
} }
static int gpr32_set(struct task_struct *target, /*
* tm_cgpr_set - set the CGPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed GPR registers.
*
* When the transaction is active, 'ckpt_regs' holds the checkpointed
* GPR register values for the current transaction to fall back on if it
* aborts in between. This function sets those checkpointed GPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* struct pt_regs ckpt_regs;
* };
*/
static int tm_cgpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long reg;
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_ckpt_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs.orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_ckpt_trap(target, reg);
}
if (!ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
/**
* tm_cfpr_active - get active number of registers in CFPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed FPR category.
*/
static int tm_cfpr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cfpr_get - get CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets in transaction checkpointed FPR registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed FPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
*};
*/
static int tm_cfpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
u64 buf[33];
int i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
}
/**
* tm_cfpr_set - set CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed FPR registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* FPR register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
*};
*/
static int tm_cfpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[33];
int i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
return 0;
}
/**
* tm_cvmx_active - get active number of registers in CVMX
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in checkpointed VMX category.
*/
static int tm_cvmx_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cvmx_get - get CMVX registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets in transaction checkpointed VMX registers.
*
* When the transaction is active 'vr_state' and 'vr_save' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
*};
*/
static int tm_cvmx_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret) {
/*
* Copy out only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
return ret;
}
/**
* tm_cvmx_set - set CMVX registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed VMX registers.
*
* When the transaction is active 'vr_state' and 'vr_save' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
*};
*/
static int tm_cvmx_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret)
target->thread.vrsave = vrsave.word;
}
return ret;
}
/**
* tm_cvsx_active - get active number of registers in CVSX
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed VSX category.
*/
static int tm_cvsx_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
/**
* tm_cvsx_get - get CVSX registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets in transaction checkpointed VSX registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed VSX registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* u64 vsx[32];
*};
*/
static int tm_cvsx_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
u64 buf[32];
int ret, i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
return ret;
}
/**
* tm_cvsx_set - set CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed VSX registers.
*
* When the transaction is active 'fp_state' holds the checkpointed
* VSX register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
*
* struct data {
* u64 vsx[32];
*};
*/
static int tm_cvsx_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[32];
int ret, i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
/**
* tm_spr_active - get active number of registers in TM SPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks the active number of available
* regisers in the transactional memory SPR category.
*/
static int tm_spr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
return regset->n;
}
/**
* tm_spr_get - get the TM related SPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy from.
* @ubuf: User buffer to copy into.
*
* This function gets transactional memory related SPR registers.
* The userspace interface buffer layout is as follows.
*
* struct {
* u64 tm_tfhar;
* u64 tm_texasr;
* u64 tm_tfiar;
* };
*/
static int tm_spr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
/* Build tests */
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
/* Flush the states */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* TFHAR register */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfhar, 0, sizeof(u64));
/* TEXASR register */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_texasr, sizeof(u64),
2 * sizeof(u64));
/* TFIAR register */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfiar,
2 * sizeof(u64), 3 * sizeof(u64));
return ret;
}
/**
* tm_spr_set - set the TM related SPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets transactional memory related SPR registers.
* The userspace interface buffer layout is as follows.
*
* struct {
* u64 tm_tfhar;
* u64 tm_texasr;
* u64 tm_tfiar;
* };
*/
static int tm_spr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
/* Build tests */
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
/* Flush the states */
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_tmregs_to_thread(target);
/* TFHAR register */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfhar, 0, sizeof(u64));
/* TEXASR register */
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_texasr, sizeof(u64),
2 * sizeof(u64));
/* TFIAR register */
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfiar,
2 * sizeof(u64), 3 * sizeof(u64));
return ret;
}
static int tm_tar_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
static int tm_tar_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tar, 0, sizeof(u64));
return ret;
}
static int tm_tar_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tar, 0, sizeof(u64));
return ret;
}
static int tm_ppr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
static int tm_ppr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_ppr, 0, sizeof(u64));
return ret;
}
static int tm_ppr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_ppr, 0, sizeof(u64));
return ret;
}
static int tm_dscr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
static int tm_dscr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_dscr, 0, sizeof(u64));
return ret;
}
static int tm_dscr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_dscr, 0, sizeof(u64));
return ret;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC64
static int ppr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ppr, 0, sizeof(u64));
return ret;
}
static int ppr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ppr, 0, sizeof(u64));
return ret;
}
static int dscr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.dscr, 0, sizeof(u64));
return ret;
}
static int dscr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.dscr, 0, sizeof(u64));
return ret;
}
#endif
#ifdef CONFIG_PPC_BOOK3S_64
static int tar_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.tar, 0, sizeof(u64));
return ret;
}
static int tar_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tar, 0, sizeof(u64));
return ret;
}
static int ebb_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (target->thread.used_ebb)
return regset->n;
return 0;
}
static int ebb_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
/* Build tests */
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (!target->thread.used_ebb)
return -ENODATA;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
}
static int ebb_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
/* Build tests */
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (target->thread.used_ebb)
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ebbrr, 0, sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ebbhr, sizeof(unsigned long),
2 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.bescr,
2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
return ret;
}
static int pmu_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
return regset->n;
}
static int pmu_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
/* Build tests */
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.siar, 0,
5 * sizeof(unsigned long));
}
static int pmu_set(struct task_struct *target,
const struct user_regset *regset, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{
int ret = 0;
/* Build tests */
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.siar, 0,
sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.sdar, sizeof(unsigned long),
2 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.sier, 2 * sizeof(unsigned long),
3 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.mmcr2, 3 * sizeof(unsigned long),
4 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.mmcr0, 4 * sizeof(unsigned long),
5 * sizeof(unsigned long));
return ret;
}
#endif
/*
* These are our native regset flavors.
*/
enum powerpc_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_ALTIVEC
REGSET_VMX,
#endif
#ifdef CONFIG_VSX
REGSET_VSX,
#endif
#ifdef CONFIG_SPE
REGSET_SPE,
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
REGSET_TM_CGPR, /* TM checkpointed GPR registers */
REGSET_TM_CFPR, /* TM checkpointed FPR registers */
REGSET_TM_CVMX, /* TM checkpointed VMX registers */
REGSET_TM_CVSX, /* TM checkpointed VSX registers */
REGSET_TM_SPR, /* TM specific SPR registers */
REGSET_TM_CTAR, /* TM checkpointed TAR register */
REGSET_TM_CPPR, /* TM checkpointed PPR register */
REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
#endif
#ifdef CONFIG_PPC64
REGSET_PPR, /* PPR register */
REGSET_DSCR, /* DSCR register */
#endif
#ifdef CONFIG_PPC_BOOK3S_64
REGSET_TAR, /* TAR register */
REGSET_EBB, /* EBB registers */
REGSET_PMR, /* Performance Monitor Registers */
#endif
};
static const struct user_regset native_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.get = gpr_get, .set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_VSX
[REGSET_VSX] = {
.core_note_type = NT_PPC_VSX, .n = 32,
.size = sizeof(double), .align = sizeof(double),
.active = vsr_active, .get = vsr_get, .set = vsr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
[REGSET_TM_CGPR] = {
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
},
[REGSET_TM_CFPR] = {
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
},
[REGSET_TM_CVMX] = {
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
},
[REGSET_TM_CVSX] = {
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
},
[REGSET_TM_SPR] = {
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
},
[REGSET_TM_CTAR] = {
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
},
[REGSET_TM_CPPR] = {
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
},
[REGSET_TM_CDSCR] = {
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
},
#endif
#ifdef CONFIG_PPC64
[REGSET_PPR] = {
.core_note_type = NT_PPC_PPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = ppr_get, .set = ppr_set
},
[REGSET_DSCR] = {
.core_note_type = NT_PPC_DSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = dscr_get, .set = dscr_set
},
#endif
#ifdef CONFIG_PPC_BOOK3S_64
[REGSET_TAR] = {
.core_note_type = NT_PPC_TAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = tar_get, .set = tar_set
},
[REGSET_EBB] = {
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
.size = sizeof(u64), .align = sizeof(u64),
.active = ebb_active, .get = ebb_get, .set = ebb_set
},
[REGSET_PMR] = {
.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
.size = sizeof(u64), .align = sizeof(u64),
.active = pmu_active, .get = pmu_get, .set = pmu_set
},
#endif
};
static const struct user_regset_view user_ppc_native_view = {
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#ifdef CONFIG_PPC64
#include <linux/compat.h>
static int gpr32_get_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf, bool tm_active)
{
const unsigned long *regs = &target->thread.regs->gpr[0];
const unsigned long *ckpt_regs;
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
ckpt_regs = &target->thread.ckpt_regs.gpr[0];
#endif
if (tm_active) {
regs = ckpt_regs;
} else {
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/*
* We have a partial register set.
* Fill 14-31 with bogus values.
*/
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
}
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_MSR; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
if (count > 0 && pos == PT_MSR) {
reg = get_user_msr(target);
if (kbuf)
*k++ = reg;
else if (__put_user(reg, u++))
return -EFAULT;
++pos;
--count;
}
if (kbuf)
for (; count > 0 && pos < PT_REGS_COUNT; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_REGS_COUNT; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
PT_REGS_COUNT * sizeof(reg), -1);
}
static int gpr32_set_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf, bool tm_active)
{ {
unsigned long *regs = &target->thread.regs->gpr[0]; unsigned long *regs = &target->thread.regs->gpr[0];
unsigned long *ckpt_regs;
const compat_ulong_t *k = kbuf; const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf; const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg; compat_ulong_t reg;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
ckpt_regs = &target->thread.ckpt_regs.gpr[0];
#endif
if (tm_active) {
regs = ckpt_regs;
} else {
regs = &target->thread.regs->gpr[0];
if (target->thread.regs == NULL) if (target->thread.regs == NULL)
return -EIO; return -EIO;
CHECK_FULL_REGS(target->thread.regs); CHECK_FULL_REGS(target->thread.regs);
}
pos /= sizeof(reg); pos /= sizeof(reg);
count /= sizeof(reg); count /= sizeof(reg);
...@@ -804,6 +2195,40 @@ static int gpr32_set(struct task_struct *target, ...@@ -804,6 +2195,40 @@ static int gpr32_set(struct task_struct *target,
(PT_TRAP + 1) * sizeof(reg), -1); (PT_TRAP + 1) * sizeof(reg), -1);
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static int tm_cgpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
}
static int tm_cgpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
}
/* /*
* These are the regset flavors matching the CONFIG_PPC32 native set. * These are the regset flavors matching the CONFIG_PPC32 native set.
*/ */
...@@ -832,6 +2257,73 @@ static const struct user_regset compat_regsets[] = { ...@@ -832,6 +2257,73 @@ static const struct user_regset compat_regsets[] = {
.active = evr_active, .get = evr_get, .set = evr_set .active = evr_active, .get = evr_get, .set = evr_set
}, },
#endif #endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
[REGSET_TM_CGPR] = {
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.active = tm_cgpr_active,
.get = tm_cgpr32_get, .set = tm_cgpr32_set
},
[REGSET_TM_CFPR] = {
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
},
[REGSET_TM_CVMX] = {
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
},
[REGSET_TM_CVSX] = {
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
},
[REGSET_TM_SPR] = {
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
},
[REGSET_TM_CTAR] = {
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
},
[REGSET_TM_CPPR] = {
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
},
[REGSET_TM_CDSCR] = {
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
},
#endif
#ifdef CONFIG_PPC64
[REGSET_PPR] = {
.core_note_type = NT_PPC_PPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = ppr_get, .set = ppr_set
},
[REGSET_DSCR] = {
.core_note_type = NT_PPC_DSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = dscr_get, .set = dscr_set
},
#endif
#ifdef CONFIG_PPC_BOOK3S_64
[REGSET_TAR] = {
.core_note_type = NT_PPC_TAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.get = tar_get, .set = tar_set
},
[REGSET_EBB] = {
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
.size = sizeof(u64), .align = sizeof(u64),
.active = ebb_active, .get = ebb_get, .set = ebb_set
},
#endif
}; };
static const struct user_regset_view user_ppc_compat_view = { static const struct user_regset_view user_ppc_compat_view = {
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <asm/hugetlb.h> #include <asm/hugetlb.h>
#include <asm/livepatch.h> #include <asm/livepatch.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cpu_has_feature.h>
#include "setup.h" #include "setup.h"
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/serial.h> #include <asm/serial.h>
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/cpu_has_feature.h>
#define DBG(fmt...) #define DBG(fmt...)
......
...@@ -227,8 +227,8 @@ static void __init configure_exceptions(void) ...@@ -227,8 +227,8 @@ static void __init configure_exceptions(void)
opal_configure_cores(); opal_configure_cores();
/* Enable AIL if supported, and we are in hypervisor mode */ /* Enable AIL if supported, and we are in hypervisor mode */
if (cpu_has_feature(CPU_FTR_HVMODE) && if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
cpu_has_feature(CPU_FTR_ARCH_207S)) { early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
unsigned long lpcr = mfspr(SPRN_LPCR); unsigned long lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
} }
...@@ -298,12 +298,12 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -298,12 +298,12 @@ void __init early_setup(unsigned long dt_ptr)
*/ */
configure_exceptions(); configure_exceptions();
/* Initialize the hash table or TLB handling */
early_init_mmu();
/* Apply all the dynamic patching */ /* Apply all the dynamic patching */
apply_feature_fixups(); apply_feature_fixups();
/* Initialize the hash table or TLB handling */
early_init_mmu();
/* /*
* At this point, we can let interrupts switch to virtual mode * At this point, we can let interrupts switch to virtual mode
* (the MMU has been setup), so adjust the MSR in the PACA to * (the MMU has been setup), so adjust the MSR in the PACA to
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/asm-prototypes.h> #include <asm/asm-prototypes.h>
#include <asm/cpu_has_feature.h>
#ifdef DEBUG #ifdef DEBUG
#include <asm/udbg.h> #include <asm/udbg.h>
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -152,9 +153,18 @@ static void do_final_fixups(void) ...@@ -152,9 +153,18 @@ static void do_final_fixups(void)
#endif #endif
} }
void apply_feature_fixups(void) static unsigned long __initdata saved_cpu_features;
static unsigned int __initdata saved_mmu_features;
#ifdef CONFIG_PPC64
static unsigned long __initdata saved_firmware_features;
#endif
void __init apply_feature_fixups(void)
{ {
struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec); struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
*PTRRELOC(&saved_cpu_features) = spec->cpu_features;
*PTRRELOC(&saved_mmu_features) = spec->mmu_features;
/* /*
* Apply the CPU-specific and firmware specific fixups to kernel text * Apply the CPU-specific and firmware specific fixups to kernel text
...@@ -173,11 +183,36 @@ void apply_feature_fixups(void) ...@@ -173,11 +183,36 @@ void apply_feature_fixups(void)
PTRRELOC(&__stop___lwsync_fixup)); PTRRELOC(&__stop___lwsync_fixup));
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
saved_firmware_features = powerpc_firmware_features;
do_feature_fixups(powerpc_firmware_features, do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
#endif #endif
do_final_fixups(); do_final_fixups();
/*
* Initialise jump label. This causes all the cpu/mmu_has_feature()
* checks to take on their correct polarity based on the current set of
* CPU/MMU features.
*/
jump_label_init();
cpu_feature_keys_init();
mmu_feature_keys_init();
}
static int __init check_features(void)
{
WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
"CPU features changed after feature patching!\n");
WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
"MMU features changed after feature patching!\n");
#ifdef CONFIG_PPC64
WARN(saved_firmware_features != powerpc_firmware_features,
"Firmware features changed after feature patching!\n");
#endif
return 0;
} }
late_initcall(check_features);
#ifdef CONFIG_FTR_FIXUP_SELFTEST #ifdef CONFIG_FTR_FIXUP_SELFTEST
......
...@@ -72,8 +72,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) ...@@ -72,8 +72,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
/* clear out bits after (52) [0....52.....63] */ /* clear out bits after (52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1); va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8; va |= ssize << 8;
sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | sllp = get_sllp_encoding(apsize);
((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
va |= sllp << 5; va |= sllp << 5;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
...@@ -122,8 +121,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) ...@@ -122,8 +121,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
/* clear out bits after(52) [0....52.....63] */ /* clear out bits after(52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1); va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8; va |= ssize << 8;
sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | sllp = get_sllp_encoding(apsize);
((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
va |= sllp << 5; va |= sllp << 5;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory"); : : "r"(va) : "memory");
...@@ -749,5 +747,5 @@ void __init hpte_init_native(void) ...@@ -749,5 +747,5 @@ void __init hpte_init_native(void)
mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
if (cpu_has_feature(CPU_FTR_ARCH_300)) if (cpu_has_feature(CPU_FTR_ARCH_300))
ppc_md.register_process_table = native_register_proc_table; register_process_table = native_register_proc_table;
} }
...@@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, ...@@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
return 0; return 0;
} }
static void __init htab_init_seg_sizes(void)
{
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
}
static int __init get_idx_from_shift(unsigned int shift) static int __init get_idx_from_shift(unsigned int shift)
{ {
int idx = -1; int idx = -1;
...@@ -539,7 +534,7 @@ static bool might_have_hea(void) ...@@ -539,7 +534,7 @@ static bool might_have_hea(void)
#endif /* #ifdef CONFIG_PPC_64K_PAGES */ #endif /* #ifdef CONFIG_PPC_64K_PAGES */
static void __init htab_init_page_sizes(void) static void __init htab_scan_page_sizes(void)
{ {
int rc; int rc;
...@@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void) ...@@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void)
* Try to find the available page sizes in the device-tree * Try to find the available page sizes in the device-tree
*/ */
rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
if (rc != 0) /* Found */ if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
goto found;
/* /*
* Not in the device-tree, let's fallback on known size * Nothing in the device-tree, but the CPU supports 16M pages,
* list for 16M capable GP & GR * so let's fallback on a known size list for 16M capable CPUs.
*/ */
if (mmu_has_feature(MMU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp, memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp)); sizeof(mmu_psize_defaults_gp));
found: }
#ifdef CONFIG_HUGETLB_PAGE
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
#endif /* CONFIG_HUGETLB_PAGE */
}
static void __init htab_init_page_sizes(void)
{
if (!debug_pagealloc_enabled()) { if (!debug_pagealloc_enabled()) {
/* /*
* Pick a size for the linear mapping. Currently, we only * Pick a size for the linear mapping. Currently, we only
...@@ -630,11 +631,6 @@ static void __init htab_init_page_sizes(void) ...@@ -630,11 +631,6 @@ static void __init htab_init_page_sizes(void)
,mmu_psize_defs[mmu_vmemmap_psize].shift ,mmu_psize_defs[mmu_vmemmap_psize].shift
#endif #endif
); );
#ifdef CONFIG_HUGETLB_PAGE
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
#endif /* CONFIG_HUGETLB_PAGE */
} }
static int __init htab_dt_scan_pftsize(unsigned long node, static int __init htab_dt_scan_pftsize(unsigned long node,
...@@ -759,12 +755,6 @@ static void __init htab_initialize(void) ...@@ -759,12 +755,6 @@ static void __init htab_initialize(void)
DBG(" -> htab_initialize()\n"); DBG(" -> htab_initialize()\n");
/* Initialize segment sizes */
htab_init_seg_sizes();
/* Initialize page sizes */
htab_init_page_sizes();
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
mmu_kernel_ssize = MMU_SEGSIZE_1T; mmu_kernel_ssize = MMU_SEGSIZE_1T;
mmu_highuser_ssize = MMU_SEGSIZE_1T; mmu_highuser_ssize = MMU_SEGSIZE_1T;
...@@ -885,8 +875,19 @@ static void __init htab_initialize(void) ...@@ -885,8 +875,19 @@ static void __init htab_initialize(void)
#undef KB #undef KB
#undef MB #undef MB
void __init hash__early_init_devtree(void)
{
/* Initialize segment sizes */
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
/* Initialize page sizes */
htab_scan_page_sizes();
}
void __init hash__early_init_mmu(void) void __init hash__early_init_mmu(void)
{ {
htab_init_page_sizes();
/* /*
* initialize page table size * initialize page table size
*/ */
......
...@@ -5,39 +5,34 @@ ...@@ -5,39 +5,34 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/tlb.h>
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{ {
unsigned long ap, shift; int psize;
struct hstate *hstate = hstate_file(vma->vm_file); struct hstate *hstate = hstate_file(vma->vm_file);
shift = huge_page_shift(hstate); psize = hstate_get_psize(hstate);
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
ap = mmu_get_ap(MMU_PAGE_2M);
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
ap = mmu_get_ap(MMU_PAGE_1G);
else {
WARN(1, "Wrong huge page shift\n");
return ;
}
radix___flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
} }
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{ {
unsigned long ap, shift; int psize;
struct hstate *hstate = hstate_file(vma->vm_file); struct hstate *hstate = hstate_file(vma->vm_file);
shift = huge_page_shift(hstate); psize = hstate_get_psize(hstate);
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift) radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
ap = mmu_get_ap(MMU_PAGE_2M); }
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
ap = mmu_get_ap(MMU_PAGE_1G); void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
else { unsigned long end)
WARN(1, "Wrong huge page shift\n"); {
return ; int psize;
} struct hstate *hstate = hstate_file(vma->vm_file);
radix___local_flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
psize = hstate_get_psize(hstate);
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
} }
/* /*
......
...@@ -411,3 +411,25 @@ struct page *realmode_pfn_to_page(unsigned long pfn) ...@@ -411,3 +411,25 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
EXPORT_SYMBOL_GPL(realmode_pfn_to_page); EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
#ifdef CONFIG_PPC_STD_MMU_64
static bool disable_radix;
static int __init parse_disable_radix(char *p)
{
disable_radix = true;
return 0;
}
early_param("disable_radix", parse_disable_radix);
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
if (early_radix_enabled())
radix__early_init_devtree();
else
hash__early_init_devtree();
}
#endif /* CONFIG_PPC_STD_MMU_64 */
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#include "mmu_decl.h" #include "mmu_decl.h"
#include <trace/events/thp.h> #include <trace/events/thp.h>
int (*register_process_table)(unsigned long base, unsigned long page_size,
unsigned long tbl_size);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* /*
* This is called when relaxing access to a hugepage. It's also called in the page * This is called when relaxing access to a hugepage. It's also called in the page
...@@ -33,7 +36,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, ...@@ -33,7 +36,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
changed = !pmd_same(*(pmdp), entry); changed = !pmd_same(*(pmdp), entry);
if (changed) { if (changed) {
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
} }
return changed; return changed;
} }
...@@ -66,7 +69,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -66,7 +69,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp) pmd_t *pmdp)
{ {
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/* /*
* This ensures that generic code that rely on IRQ disabling * This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected. * to prevent a parallel THP split work as expected.
......
...@@ -171,7 +171,7 @@ static void __init radix_init_pgtable(void) ...@@ -171,7 +171,7 @@ static void __init radix_init_pgtable(void)
* of process table here. But our linear mapping also enable us to use * of process table here. But our linear mapping also enable us to use
* physical address here. * physical address here.
*/ */
ppc_md.register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
} }
...@@ -198,7 +198,7 @@ static void __init radix_init_partition_table(void) ...@@ -198,7 +198,7 @@ static void __init radix_init_partition_table(void)
void __init radix_init_native(void) void __init radix_init_native(void)
{ {
ppc_md.register_process_table = native_register_process_table; register_process_table = native_register_process_table;
} }
static int __init get_idx_from_shift(unsigned int shift) static int __init get_idx_from_shift(unsigned int shift)
...@@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node, ...@@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
return 1; return 1;
} }
static void __init radix_init_page_sizes(void) void __init radix__early_init_devtree(void)
{ {
int rc; int rc;
...@@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void) ...@@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
__pte_frag_nr = H_PTE_FRAG_NR; __pte_frag_nr = H_PTE_FRAG_NR;
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
radix_init_page_sizes();
if (!firmware_has_feature(FW_FEATURE_LPAR)) { if (!firmware_has_feature(FW_FEATURE_LPAR)) {
radix_init_native(); radix_init_native();
lpcr = mfspr(SPRN_LPCR); lpcr = mfspr(SPRN_LPCR);
......
...@@ -225,7 +225,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, ...@@ -225,7 +225,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
if (!is_vm_hugetlb_page(vma)) if (!is_vm_hugetlb_page(vma))
assert_pte_locked(vma->vm_mm, address); assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(ptep, entry); __ptep_set_access_flags(ptep, entry);
flush_tlb_page_nohash(vma, address); flush_tlb_page(vma, address);
} }
return changed; return changed;
} }
......
...@@ -140,10 +140,11 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) ...@@ -140,10 +140,11 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
} }
EXPORT_SYMBOL(radix__local_flush_tlb_pwc); EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid) int psize)
{ {
unsigned long pid; unsigned long pid;
unsigned long ap = mmu_get_ap(psize);
preempt_disable(); preempt_disable();
pid = mm ? mm->context.id : 0; pid = mm ? mm->context.id : 0;
...@@ -159,18 +160,12 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd ...@@ -159,18 +160,12 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
if (vma && is_vm_hugetlb_page(vma)) if (vma && is_vm_hugetlb_page(vma))
return __local_flush_hugetlb_page(vma, vmaddr); return __local_flush_hugetlb_page(vma, vmaddr);
#endif #endif
radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
mmu_get_ap(mmu_virtual_psize), 0); mmu_virtual_psize);
} }
EXPORT_SYMBOL(radix__local_flush_tlb_page); EXPORT_SYMBOL(radix__local_flush_tlb_page);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int mm_is_core_local(struct mm_struct *mm)
{
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
void radix__flush_tlb_mm(struct mm_struct *mm) void radix__flush_tlb_mm(struct mm_struct *mm)
{ {
unsigned long pid; unsigned long pid;
...@@ -221,10 +216,11 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) ...@@ -221,10 +216,11 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
} }
EXPORT_SYMBOL(radix__flush_tlb_pwc); EXPORT_SYMBOL(radix__flush_tlb_pwc);
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid) int psize)
{ {
unsigned long pid; unsigned long pid;
unsigned long ap = mmu_get_ap(psize);
preempt_disable(); preempt_disable();
pid = mm ? mm->context.id : 0; pid = mm ? mm->context.id : 0;
...@@ -250,8 +246,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -250,8 +246,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
if (vma && is_vm_hugetlb_page(vma)) if (vma && is_vm_hugetlb_page(vma))
return flush_hugetlb_page(vma, vmaddr); return flush_hugetlb_page(vma, vmaddr);
#endif #endif
radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
mmu_get_ap(mmu_virtual_psize), 0); mmu_virtual_psize);
} }
EXPORT_SYMBOL(radix__flush_tlb_page); EXPORT_SYMBOL(radix__flush_tlb_page);
...@@ -299,10 +295,67 @@ static int radix_get_mmu_psize(int page_size) ...@@ -299,10 +295,67 @@ static int radix_get_mmu_psize(int page_size)
void radix__tlb_flush(struct mmu_gather *tlb) void radix__tlb_flush(struct mmu_gather *tlb)
{ {
int psize = 0;
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
int page_size = tlb->page_size;
psize = radix_get_mmu_psize(page_size);
/*
* if page size is not something we understand, do a full mm flush
*/
if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
else
radix__flush_tlb_mm(mm); radix__flush_tlb_mm(mm);
} }
#define TLB_FLUSH_ALL -1UL
/*
* Number of pages above which we will do a bcast tlbie. Just a
* number at this point copied from x86
*/
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize)
{
unsigned long pid;
unsigned long addr;
int local = mm_is_core_local(mm);
unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
preempt_disable();
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto err_out;
if (end == TLB_FLUSH_ALL ||
(end - start) > tlb_single_page_flush_ceiling * page_size) {
if (local)
_tlbiel_pid(pid, RIC_FLUSH_TLB);
else
_tlbie_pid(pid, RIC_FLUSH_TLB);
goto err_out;
}
for (addr = start; addr < end; addr += page_size) {
if (local)
_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
else {
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
}
}
err_out:
preempt_enable();
}
void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size) unsigned long page_size)
{ {
...@@ -340,3 +393,10 @@ void radix__flush_tlb_lpid(unsigned long lpid) ...@@ -340,3 +393,10 @@ void radix__flush_tlb_lpid(unsigned long lpid)
asm volatile("eieio; tlbsync; ptesync": : :"memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory");
} }
EXPORT_SYMBOL(radix__flush_tlb_lpid); EXPORT_SYMBOL(radix__flush_tlb_lpid);
void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
}
EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
...@@ -48,17 +48,6 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) ...@@ -48,17 +48,6 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
} }
EXPORT_SYMBOL(flush_hash_entry); EXPORT_SYMBOL(flush_hash_entry);
/*
* Called by ptep_set_access_flags, must flush on CPUs for which the
* DSI handler can't just "fixup" the TLB on a write fault
*/
void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
{
if (Hash != 0)
return;
_tlbie(addr);
}
/* /*
* Called at the end of a mmu_gather operation to make sure the * Called at the end of a mmu_gather operation to make sure the
* TLB flush is completely done. * TLB flush is completely done.
......
...@@ -215,12 +215,6 @@ EXPORT_SYMBOL(local_flush_tlb_page); ...@@ -215,12 +215,6 @@ EXPORT_SYMBOL(local_flush_tlb_page);
static DEFINE_RAW_SPINLOCK(tlbivax_lock); static DEFINE_RAW_SPINLOCK(tlbivax_lock);
static int mm_is_core_local(struct mm_struct *mm)
{
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
struct tlb_flush_param { struct tlb_flush_param {
unsigned long addr; unsigned long addr;
unsigned int pid; unsigned int pid;
......
...@@ -34,15 +34,15 @@ EVENT(PM_L1_ICACHE_MISS, 0x200fd) ...@@ -34,15 +34,15 @@ EVENT(PM_L1_ICACHE_MISS, 0x200fd)
/* Instruction Demand sectors wriittent into IL1 */ /* Instruction Demand sectors wriittent into IL1 */
EVENT(PM_L1_DEMAND_WRITE, 0x0408c) EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
/* Instruction prefetch written into IL1 */ /* Instruction prefetch written into IL1 */
EVENT(PM_IC_PREF_WRITE, 0x0408e) EVENT(PM_IC_PREF_WRITE, 0x0488c)
/* The data cache was reloaded from local core's L3 due to a demand load */ /* The data cache was reloaded from local core's L3 due to a demand load */
EVENT(PM_DATA_FROM_L3, 0x4c042) EVENT(PM_DATA_FROM_L3, 0x4c042)
/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
EVENT(PM_DATA_FROM_L3MISS, 0x300fe) EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
/* All successful D-side store dispatches for this thread */ /* All successful D-side store dispatches for this thread */
EVENT(PM_L2_ST, 0x16081) EVENT(PM_L2_ST, 0x16880)
/* All successful D-side store dispatches for this thread that were L2 Miss */ /* All successful D-side store dispatches for this thread that were L2 Miss */
EVENT(PM_L2_ST_MISS, 0x26081) EVENT(PM_L2_ST_MISS, 0x26880)
/* Total HW L3 prefetches(Load+store) */ /* Total HW L3 prefetches(Load+store) */
EVENT(PM_L3_PREF_ALL, 0x4e052) EVENT(PM_L3_PREF_ALL, 0x4e052)
/* Data PTEG reload */ /* Data PTEG reload */
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cell-regs.h> #include <asm/cell-regs.h>
#include <asm/cpu_has_feature.h>
#include "pervasive.h" #include "pervasive.h"
......
...@@ -20,6 +20,7 @@ along with this file; see the file COPYING. If not, write to the Free ...@@ -20,6 +20,7 @@ along with this file; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
#include "nonstdio.h" #include "nonstdio.h"
#include "ansidecl.h" #include "ansidecl.h"
#include "ppc.h" #include "ppc.h"
......
...@@ -382,6 +382,19 @@ typedef struct elf64_shdr { ...@@ -382,6 +382,19 @@ typedef struct elf64_shdr {
#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ #define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
#define NT_PPC_TAR 0x103 /* Target Address Register */
#define NT_PPC_PPR 0x104 /* Program Priority Register */
#define NT_PPC_DSCR 0x105 /* Data Stream Control Register */
#define NT_PPC_EBB 0x106 /* Event Based Branch Registers */
#define NT_PPC_PMU 0x107 /* Performance Monitor Registers */
#define NT_PPC_TM_CGPR 0x108 /* TM checkpointed GPR Registers */
#define NT_PPC_TM_CFPR 0x109 /* TM checkpointed FPR Registers */
#define NT_PPC_TM_CVMX 0x10a /* TM checkpointed VMX Registers */
#define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
#define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */
#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ #define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
......
...@@ -288,6 +288,9 @@ void __init jump_label_init(void) ...@@ -288,6 +288,9 @@ void __init jump_label_init(void)
BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
if (static_key_initialized)
return;
jump_label_lock(); jump_label_lock();
jump_label_sort_entries(iter_start, iter_stop); jump_label_sort_entries(iter_start, iter_stop);
......
...@@ -3942,6 +3942,14 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3942,6 +3942,14 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
return i ? i : -EFAULT; return i ? i : -EFAULT;
} }
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
/*
* ARCHes with special requirements for evicting HUGETLB backing TLB entries can
* implement this.
*/
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif
unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot) unsigned long address, unsigned long end, pgprot_t newprot)
{ {
...@@ -4002,7 +4010,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, ...@@ -4002,7 +4010,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* once we release i_mmap_rwsem, another task can do the final put_page * once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk. * and that page table be reused and filled with junk.
*/ */
flush_tlb_range(vma, start, end); flush_hugetlb_tlb_range(vma, start, end);
mmu_notifier_invalidate_range(mm, start, end); mmu_notifier_invalidate_range(mm, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping); i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end); mmu_notifier_invalidate_range_end(mm, start, end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment