Commit ce45327c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'csky-for-linus-5.2-rc1' of git://github.com/c-sky/csky-linux

Pull arch/csky updates from Guo Ren:

 - Fixup vdsp&fpu issues in kernel

 - Add dynamic function tracer

 - Use in_syscall & forget_syscall instead of r11_sig

 - Reconstruct signal processing

 - Support dynamic start physical address

 - Fixup wrong update_mmu_cache implementation

 - Support vmlinux bootup with MMU off

 - Use va_pa_offset instead of phys_offset

 - Fixup syscall_trace return processing flow

 - Add perf callchain support

 - Add perf_arch_fetch_caller_regs support

 - Add page fault perf event support

 - Add support for perf registers sampling

* tag 'csky-for-linus-5.2-rc1' of git://github.com/c-sky/csky-linux:
  csky/syscall_trace: Fixup return processing flow
  csky: Fixup compile warning
  csky: Add support for perf registers sampling
  csky: add page fault perf event support
  csky: Use va_pa_offset instead of phys_offset
  csky: Support vmlinux bootup with MMU off
  csky: Add perf_arch_fetch_caller_regs support
  csky: Fixup wrong update_mmu_cache implementation
  csky: Support dynamic start physical address
  csky: Reconstruct signal processing
  csky: Use in_syscall & forget_syscall instead of r11_sig
  csky: Add non-uapi asm/ptrace.h namespace
  csky: mm/fault.c: Remove duplicate header
  csky: remove redundant generic-y
  csky: Update syscall_trace_enter/exit implementation
  csky: Add perf callchain support
  csky/ftrace: Add dynamic function tracer (include graph tracer)
  csky: Fixup vdsp&fpu issues in kernel
parents e7a1414f a691f333
...@@ -29,15 +29,20 @@ config CSKY ...@@ -29,15 +29,20 @@ config CSKY
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_C_RECORDMCOUNT select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_SYSCALL_TRACEPOINTS
select MAY_HAVE_SPARSE_IRQ select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select OF select OF
......
...@@ -36,7 +36,7 @@ endif ...@@ -36,7 +36,7 @@ endif
ifneq ($(CSKYABI),) ifneq ($(CSKYABI),)
MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT) MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT)
KBUILD_CFLAGS += -mcpu=$(MCPU_STR) KBUILD_CFLAGS += -mcpu=$(CPUTYPE) -Wa,-mcpu=$(MCPU_STR)
KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\" KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\"
KBUILD_CFLAGS += -msoft-float -mdiv KBUILD_CFLAGS += -msoft-float -mdiv
KBUILD_CFLAGS += -fno-tree-vectorize KBUILD_CFLAGS += -fno-tree-vectorize
......
...@@ -40,6 +40,26 @@ static inline void write_mmu_entryhi(int value) ...@@ -40,6 +40,26 @@ static inline void write_mmu_entryhi(int value)
cpwcr("cpcr4", value); cpwcr("cpcr4", value);
} }
static inline unsigned long read_mmu_msa0(void)
{
return cprcr("cpcr30");
}
static inline void write_mmu_msa0(unsigned long value)
{
cpwcr("cpcr30", value);
}
static inline unsigned long read_mmu_msa1(void)
{
return cprcr("cpcr31");
}
static inline void write_mmu_msa1(unsigned long value)
{
cpwcr("cpcr31", value);
}
/* /*
* TLB operations. * TLB operations.
*/ */
...@@ -65,11 +85,11 @@ static inline void tlb_invalid_indexed(void) ...@@ -65,11 +85,11 @@ static inline void tlb_invalid_indexed(void)
static inline void setup_pgd(unsigned long pgd, bool kernel) static inline void setup_pgd(unsigned long pgd, bool kernel)
{ {
cpwcr("cpcr29", pgd); cpwcr("cpcr29", pgd | BIT(0));
} }
static inline unsigned long get_pgd(void) static inline unsigned long get_pgd(void)
{ {
return cprcr("cpcr29"); return cprcr("cpcr29") & ~BIT(0);
} }
#endif /* __ASM_CSKY_CKMMUV1_H */ #endif /* __ASM_CSKY_CKMMUV1_H */
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
#define LSAVE_A4 40 #define LSAVE_A4 40
#define LSAVE_A5 44 #define LSAVE_A5 44
#define EPC_INCREASE 2
#define EPC_KEEP 0
.macro USPTOKSP .macro USPTOKSP
mtcr sp, ss1 mtcr sp, ss1
mfcr sp, ss0 mfcr sp, ss0
...@@ -29,10 +26,6 @@ ...@@ -29,10 +26,6 @@
mfcr sp, ss1 mfcr sp, ss1
.endm .endm
.macro INCTRAP rx
addi \rx, EPC_INCREASE
.endm
.macro SAVE_ALL epc_inc .macro SAVE_ALL epc_inc
mtcr r13, ss2 mtcr r13, ss2
mfcr r13, epsr mfcr r13, epsr
...@@ -150,11 +143,35 @@ ...@@ -150,11 +143,35 @@
cpwcr \rx, cpcr8 cpwcr \rx, cpcr8
.endm .endm
.macro SETUP_MMU rx .macro SETUP_MMU
lrw \rx, PHYS_OFFSET | 0xe /* Init psr and enable ee */
cpwcr \rx, cpcr30 lrw r6, DEFAULT_PSR_VALUE
lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe mtcr r6, psr
cpwcr \rx, cpcr31 psrset ee
/* Select MMU as co-processor */
cpseti cp15
/*
* cpcr30 format:
* 31 - 29 | 28 - 4 | 3 | 2 | 1 | 0
* BA Reserved C D V
*/
cprcr r6, cpcr30
lsri r6, 28
lsli r6, 28
addi r6, 0xe
cpwcr r6, cpcr30
lsri r6, 28
addi r6, 2
lsli r6, 28
addi r6, 0xe
cpwcr r6, cpcr31
.endm .endm
.macro ANDI_R3 rx, imm
lsri \rx, 3
andi \rx, (\imm >> 3)
.endm
#endif /* __ASM_CSKY_ENTRY_H */ #endif /* __ASM_CSKY_ENTRY_H */
...@@ -5,9 +5,8 @@ ...@@ -5,9 +5,8 @@
#define __ASM_CSKY_REGDEF_H #define __ASM_CSKY_REGDEF_H
#define syscallid r1 #define syscallid r1
#define r11_sig r11
#define regs_syscallid(regs) regs->regs[9] #define regs_syscallid(regs) regs->regs[9]
#define regs_fp(regs) regs->regs[2]
/* /*
* PSR format: * PSR format:
...@@ -23,4 +22,6 @@ ...@@ -23,4 +22,6 @@
#define SYSTRACE_SAVENUM 2 #define SYSTRACE_SAVENUM 2
#define TRAP0_SIZE 2
#endif /* __ASM_CSKY_REGDEF_H */ #endif /* __ASM_CSKY_REGDEF_H */
...@@ -34,10 +34,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -34,10 +34,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
{ {
unsigned long addr, pfn; unsigned long addr, pfn;
struct page *page; struct page *page;
void *va;
if (!(vma->vm_flags & VM_EXEC))
return;
pfn = pte_pfn(*pte); pfn = pte_pfn(*pte);
if (unlikely(!pfn_valid(pfn))) if (unlikely(!pfn_valid(pfn)))
...@@ -47,14 +43,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -47,14 +43,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
if (page == ZERO_PAGE(0)) if (page == ZERO_PAGE(0))
return; return;
va = page_address(page);
addr = (unsigned long) va;
if (va == NULL && PageHighMem(page))
addr = (unsigned long) kmap_atomic(page); addr = (unsigned long) kmap_atomic(page);
cache_wbinv_range(addr, addr + PAGE_SIZE); cache_wbinv_range(addr, addr + PAGE_SIZE);
if (va == NULL && PageHighMem(page))
kunmap_atomic((void *) addr); kunmap_atomic((void *) addr);
} }
...@@ -42,6 +42,26 @@ static inline void write_mmu_entryhi(int value) ...@@ -42,6 +42,26 @@ static inline void write_mmu_entryhi(int value)
mtcr("cr<4, 15>", value); mtcr("cr<4, 15>", value);
} }
static inline unsigned long read_mmu_msa0(void)
{
return mfcr("cr<30, 15>");
}
static inline void write_mmu_msa0(unsigned long value)
{
mtcr("cr<30, 15>", value);
}
static inline unsigned long read_mmu_msa1(void)
{
return mfcr("cr<31, 15>");
}
static inline void write_mmu_msa1(unsigned long value)
{
mtcr("cr<31, 15>", value);
}
/* /*
* TLB operations. * TLB operations.
*/ */
...@@ -70,18 +90,16 @@ static inline void tlb_invalid_indexed(void) ...@@ -70,18 +90,16 @@ static inline void tlb_invalid_indexed(void)
mtcr("cr<8, 15>", 0x02000000); mtcr("cr<8, 15>", 0x02000000);
} }
/* setup hardrefil pgd */
static inline unsigned long get_pgd(void)
{
return mfcr("cr<29, 15>");
}
static inline void setup_pgd(unsigned long pgd, bool kernel) static inline void setup_pgd(unsigned long pgd, bool kernel)
{ {
if (kernel) if (kernel)
mtcr("cr<28, 15>", pgd); mtcr("cr<28, 15>", pgd | BIT(0));
else else
mtcr("cr<29, 15>", pgd); mtcr("cr<29, 15>", pgd | BIT(0));
} }
static inline unsigned long get_pgd(void)
{
return mfcr("cr<29, 15>") & ~BIT(0);
}
#endif /* __ASM_CSKY_CKMMUV2_H */ #endif /* __ASM_CSKY_CKMMUV2_H */
...@@ -14,18 +14,11 @@ ...@@ -14,18 +14,11 @@
#define LSAVE_A2 32 #define LSAVE_A2 32
#define LSAVE_A3 36 #define LSAVE_A3 36
#define EPC_INCREASE 4
#define EPC_KEEP 0
#define KSPTOUSP #define KSPTOUSP
#define USPTOKSP #define USPTOKSP
#define usp cr<14, 1> #define usp cr<14, 1>
.macro INCTRAP rx
addi \rx, EPC_INCREASE
.endm
.macro SAVE_ALL epc_inc .macro SAVE_ALL epc_inc
subi sp, 152 subi sp, 152
stw tls, (sp, 0) stw tls, (sp, 0)
...@@ -169,10 +162,80 @@ ...@@ -169,10 +162,80 @@
mtcr \rx, cr<8, 15> mtcr \rx, cr<8, 15>
.endm .endm
.macro SETUP_MMU rx .macro SETUP_MMU
lrw \rx, PHYS_OFFSET | 0xe /* Init psr and enable ee */
mtcr \rx, cr<30, 15> lrw r6, DEFAULT_PSR_VALUE
lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe mtcr r6, psr
mtcr \rx, cr<31, 15> psrset ee
/* Invalid I/Dcache BTB BHT */
movi r6, 7
lsli r6, 16
addi r6, (1<<4) | 3
mtcr r6, cr17
/* Invalid all TLB */
bgeni r6, 26
mtcr r6, cr<8, 15> /* Set MCIR */
/* Check MMU on/off */
mfcr r6, cr18
btsti r6, 0
bt 1f
/* MMU off: setup mapping tlb entry */
movi r6, 0
mtcr r6, cr<6, 15> /* Set MPR with 4K page size */
grs r6, 1f /* Get current pa by PC */
bmaski r7, (PAGE_SHIFT + 1) /* r7 = 0x1fff */
andn r6, r7
mtcr r6, cr<4, 15> /* Set MEH */
mov r8, r6
movi r7, 0x00000006
or r8, r7
mtcr r8, cr<2, 15> /* Set MEL0 */
movi r7, 0x00001006
or r8, r7
mtcr r8, cr<3, 15> /* Set MEL1 */
bgeni r8, 28
mtcr r8, cr<8, 15> /* Set MCIR to write TLB */
br 2f
1:
/*
* MMU on: use origin MSA value from bootloader
*
* cr<30/31, 15> MSA register format:
* 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
* BA Reserved SH WA B SO SEC C D V
*/
mfcr r6, cr<30, 15> /* Get MSA0 */
2:
lsri r6, 28
lsli r6, 28
addi r6, 0x1ce
mtcr r6, cr<30, 15> /* Set MSA0 */
lsri r6, 28
addi r6, 2
lsli r6, 28
addi r6, 0x1ce
mtcr r6, cr<31, 15> /* Set MSA1 */
/* enable MMU */
mfcr r6, cr18
bseti r6, 0
mtcr r6, cr18
jmpi 3f /* jump to va */
3:
.endm
.macro ANDI_R3 rx, imm
lsri \rx, 3
andi \rx, (\imm >> 3)
.endm .endm
#endif /* __ASM_CSKY_ENTRY_H */ #endif /* __ASM_CSKY_ENTRY_H */
...@@ -5,9 +5,8 @@ ...@@ -5,9 +5,8 @@
#define __ASM_CSKY_REGDEF_H #define __ASM_CSKY_REGDEF_H
#define syscallid r7 #define syscallid r7
#define r11_sig r11
#define regs_syscallid(regs) regs->regs[3] #define regs_syscallid(regs) regs->regs[3]
#define regs_fp(regs) regs->regs[4]
/* /*
* PSR format: * PSR format:
...@@ -23,4 +22,6 @@ ...@@ -23,4 +22,6 @@
#define SYSTRACE_SAVENUM 5 #define SYSTRACE_SAVENUM 5
#define TRAP0_SIZE 4
#endif /* __ASM_CSKY_REGDEF_H */ #endif /* __ASM_CSKY_REGDEF_H */
...@@ -61,10 +61,17 @@ ...@@ -61,10 +61,17 @@
addi sp, 16 addi sp, 16
.endm .endm
.macro nop32_stub
nop32
nop32
nop32
.endm
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
jmp lr jmp lr
END(ftrace_stub) END(ftrace_stub)
#ifndef CONFIG_DYNAMIC_FTRACE
ENTRY(_mcount) ENTRY(_mcount)
mcount_enter mcount_enter
...@@ -76,7 +83,7 @@ ENTRY(_mcount) ...@@ -76,7 +83,7 @@ ENTRY(_mcount)
bf skip_ftrace bf skip_ftrace
mov a0, lr mov a0, lr
subi a0, MCOUNT_INSN_SIZE subi a0, 4
ldw a1, (sp, 24) ldw a1, (sp, 24)
jsr r26 jsr r26
...@@ -101,13 +108,41 @@ skip_ftrace: ...@@ -101,13 +108,41 @@ skip_ftrace:
mcount_exit mcount_exit
#endif #endif
END(_mcount) END(_mcount)
#else /* CONFIG_DYNAMIC_FTRACE */
ENTRY(_mcount)
mov t1, lr
ldw lr, (sp, 0)
addi sp, 4
jmp t1
ENDPROC(_mcount)
ENTRY(ftrace_caller)
mcount_enter
ldw a0, (sp, 16)
subi a0, 4
ldw a1, (sp, 24)
nop
GLOBAL(ftrace_call)
nop32_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
nop
GLOBAL(ftrace_graph_call)
nop32_stub
#endif
mcount_exit
ENDPROC(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
mov a0, sp mov a0, sp
addi a0, 24 addi a0, 24
ldw a1, (sp, 16) ldw a1, (sp, 16)
subi a1, MCOUNT_INSN_SIZE subi a1, 4
mov a2, r8 mov a2, r8
lrw r26, prepare_ftrace_return lrw r26, prepare_ftrace_return
jsr r26 jsr r26
......
...@@ -35,11 +35,7 @@ ENTRY(memmove) ...@@ -35,11 +35,7 @@ ENTRY(memmove)
.L_len_larger_16bytes: .L_len_larger_16bytes:
subi r1, 16 subi r1, 16
subi r0, 16 subi r0, 16
#if defined(__CSKY_VDSPV2__) #if defined(__CK860__)
vldx.8 vr0, (r1), r19
PRE_BNEZAD (r18)
vstx.8 vr0, (r0), r19
#elif defined(__CK860__)
ldw r3, (r1, 12) ldw r3, (r1, 12)
stw r3, (r0, 12) stw r3, (r0, 12)
ldw r3, (r1, 8) ldw r3, (r1, 8)
......
...@@ -12,7 +12,6 @@ generic-y += dma-mapping.h ...@@ -12,7 +12,6 @@ generic-y += dma-mapping.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += exec.h generic-y += exec.h
generic-y += fb.h generic-y += fb.h
generic-y += ftrace.h
generic-y += futex.h generic-y += futex.h
generic-y += gpio.h generic-y += gpio.h
generic-y += hardirq.h generic-y += hardirq.h
......
...@@ -4,10 +4,26 @@ ...@@ -4,10 +4,26 @@
#ifndef __ASM_CSKY_FTRACE_H #ifndef __ASM_CSKY_FTRACE_H
#define __ASM_CSKY_FTRACE_H #define __ASM_CSKY_FTRACE_H
#define MCOUNT_INSN_SIZE 4 #define MCOUNT_INSN_SIZE 14
#define HAVE_FUNCTION_GRAPH_FP_TEST #define HAVE_FUNCTION_GRAPH_FP_TEST
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define MCOUNT_ADDR ((unsigned long)_mcount)
#ifndef __ASSEMBLY__
extern void _mcount(unsigned long);
extern void ftrace_graph_call(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
struct dyn_arch_ftrace {
};
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_CSKY_FTRACE_H */ #endif /* __ASM_CSKY_FTRACE_H */
...@@ -14,23 +14,10 @@ ...@@ -14,23 +14,10 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <abi/ckmmu.h> #include <abi/ckmmu.h>
static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
{
pgd -= PAGE_OFFSET;
pgd += PHYS_OFFSET;
pgd |= 1;
setup_pgd(pgd, kernel);
}
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
tlbmiss_handler_setup_pgd((unsigned long)pgd, 0) setup_pgd(__pa(pgd), false)
#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
tlbmiss_handler_setup_pgd((unsigned long)pgd, 1) setup_pgd(__pa(pgd), true)
static inline unsigned long tlb_get_pgd(void)
{
return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET;
}
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/const.h> #include <linux/const.h>
/* /*
* PAGE_SHIFT determines the page size * PAGE_SHIFT determines the page size: 4KB
*/ */
#define PAGE_SHIFT 12 #define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
...@@ -17,12 +17,18 @@ ...@@ -17,12 +17,18 @@
#define THREAD_MASK (~(THREAD_SIZE - 1)) #define THREAD_MASK (~(THREAD_SIZE - 1))
#define THREAD_SHIFT (PAGE_SHIFT + 1) #define THREAD_SHIFT (PAGE_SHIFT + 1)
/* /*
* NOTE: virtual isn't really correct, actually it should be the offset into the * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there
* memory node, but we have no highmem, so that works for now. * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical
* TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots * address region. We use them mapping kernel 1GB direct-map address area and
* of the shifts unnecessary. * for more than 1GB of memory we use highmem.
*/ */
#define PAGE_OFFSET 0x80000000
#define SSEG_SIZE 0x20000000
#define LOWMEM_LIMIT (SSEG_SIZE * 2)
#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -50,9 +56,6 @@ struct page; ...@@ -50,9 +56,6 @@ struct page;
struct vm_area_struct; struct vm_area_struct;
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pte_low; } pte_t;
#define pte_val(x) ((x).pte_low) #define pte_val(x) ((x).pte_low)
...@@ -69,18 +72,13 @@ typedef struct page *pgtable_t; ...@@ -69,18 +72,13 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) }) #define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) })
#endif /* !__ASSEMBLY__ */ extern unsigned long va_pa_offset;
#define PHYS_OFFSET (CONFIG_RAM_BASE & ~(LOWMEM_LIMIT - 1)) #define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET)
#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (LOWMEM_LIMIT - 1))
#define ARCH_PFN_OFFSET PFN_DOWN(CONFIG_RAM_BASE)
#define PAGE_OFFSET 0x80000000 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset)
#define LOWMEM_LIMIT 0x40000000 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - \
PHYS_OFFSET))
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ #define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \
...@@ -90,15 +88,10 @@ typedef struct page *pgtable_t; ...@@ -90,15 +88,10 @@ typedef struct page *pgtable_t;
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
/*
* main RAM and kernel working space are coincident at 0x80000000, but to make
* life more interesting, there's also an uncached virtual shadow at 0xb0000000
* - these mappings are fixed in the MMU
*/
#define pfn_to_kaddr(x) __va(PFN_PHYS(x)) #define pfn_to_kaddr(x) __va(PFN_PHYS(x))
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_CSKY_PAGE_H */ #endif /* __ASM_CSKY_PAGE_H */
...@@ -4,4 +4,12 @@ ...@@ -4,4 +4,12 @@
#ifndef __ASM_CSKY_PERF_EVENT_H #ifndef __ASM_CSKY_PERF_EVENT_H
#define __ASM_CSKY_PERF_EVENT_H #define __ASM_CSKY_PERF_EVENT_H
#include <abi/regdef.h>
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->pc = (__ip); \
regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \
asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \
}
#endif /* __ASM_PERF_EVENT_ELF_H */ #endif /* __ASM_PERF_EVENT_ELF_H */
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PTRACE_H
#define __ASM_CSKY_PTRACE_H
#include <uapi/asm/ptrace.h>
#include <asm/traps.h>
#include <linux/types.h>
#ifndef __ASSEMBLY__
#define PS_S 0x80000000 /* Supervisor Mode */
#define arch_has_single_step() (1)
#define current_pt_regs() \
({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
#define user_stack_pointer(regs) ((regs)->usp)
#define user_mode(regs) (!((regs)->sr & PS_S))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
static inline bool in_syscall(struct pt_regs const *regs)
{
return ((regs->sr >> 16) & 0xff) == VEC_TRAP0;
}
static inline void forget_syscall(struct pt_regs *regs)
{
regs->sr &= ~(0xff << 16);
}
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->a0;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_PTRACE_H */
...@@ -8,12 +8,21 @@ ...@@ -8,12 +8,21 @@
#include <abi/regdef.h> #include <abi/regdef.h>
#include <uapi/linux/audit.h> #include <uapi/linux/audit.h>
extern void *sys_call_table[];
static inline int static inline int
syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{ {
return regs_syscallid(regs); return regs_syscallid(regs);
} }
static inline void
syscall_set_nr(struct task_struct *task, struct pt_regs *regs,
int sysno)
{
regs_syscallid(regs) = sysno;
}
static inline void static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs) syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{ {
......
...@@ -51,17 +51,14 @@ static inline struct thread_info *current_thread_info(void) ...@@ -51,17 +51,14 @@ static inline struct thread_info *current_thread_info(void)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* entry.S relies on these definitions!
* bits 0-5 are tested at every exception exit
*/
#define TIF_SIGPENDING 0 /* signal pending */ #define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SYSCALL_TRACE 5 /* syscall trace active */ #define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_DELAYED_TRACE 14 /* single step a syscall */ #define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing */
#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19 /* thread is freezing for suspend */
#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
#define TIF_SECCOMP 21 /* secure computing */ #define TIF_SECCOMP 21 /* secure computing */
...@@ -69,10 +66,10 @@ static inline struct thread_info *current_thread_info(void) ...@@ -69,10 +66,10 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SECCOMP (1 << TIF_SECCOMP)
......
...@@ -2,3 +2,5 @@ ...@@ -2,3 +2,5 @@
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
#define NR_syscalls (__NR_syscalls)
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef _ASM_CSKY_PERF_REGS_H
#define _ASM_CSKY_PERF_REGS_H
/* Index of struct pt_regs */
enum perf_event_csky_regs {
PERF_REG_CSKY_TLS,
PERF_REG_CSKY_LR,
PERF_REG_CSKY_PC,
PERF_REG_CSKY_SR,
PERF_REG_CSKY_SP,
PERF_REG_CSKY_ORIG_A0,
PERF_REG_CSKY_A0,
PERF_REG_CSKY_A1,
PERF_REG_CSKY_A2,
PERF_REG_CSKY_A3,
PERF_REG_CSKY_REGS0,
PERF_REG_CSKY_REGS1,
PERF_REG_CSKY_REGS2,
PERF_REG_CSKY_REGS3,
PERF_REG_CSKY_REGS4,
PERF_REG_CSKY_REGS5,
PERF_REG_CSKY_REGS6,
PERF_REG_CSKY_REGS7,
PERF_REG_CSKY_REGS8,
PERF_REG_CSKY_REGS9,
#if defined(__CSKYABIV2__)
PERF_REG_CSKY_EXREGS0,
PERF_REG_CSKY_EXREGS1,
PERF_REG_CSKY_EXREGS2,
PERF_REG_CSKY_EXREGS3,
PERF_REG_CSKY_EXREGS4,
PERF_REG_CSKY_EXREGS5,
PERF_REG_CSKY_EXREGS6,
PERF_REG_CSKY_EXREGS7,
PERF_REG_CSKY_EXREGS8,
PERF_REG_CSKY_EXREGS9,
PERF_REG_CSKY_EXREGS10,
PERF_REG_CSKY_EXREGS11,
PERF_REG_CSKY_EXREGS12,
PERF_REG_CSKY_EXREGS13,
PERF_REG_CSKY_EXREGS14,
PERF_REG_CSKY_HI,
PERF_REG_CSKY_LO,
PERF_REG_CSKY_DCSR,
#endif
PERF_REG_CSKY_MAX,
};
#endif /* _ASM_CSKY_PERF_REGS_H */
...@@ -48,20 +48,5 @@ struct user_fp { ...@@ -48,20 +48,5 @@ struct user_fp {
unsigned long reserved; unsigned long reserved;
}; };
#ifdef __KERNEL__
#define PS_S 0x80000000 /* Supervisor Mode */
#define arch_has_single_step() (1)
#define current_pt_regs() \
({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
#define user_stack_pointer(regs) ((regs)->usp)
#define user_mode(regs) (!((regs)->sr & PS_S))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _CSKY_PTRACE_H */ #endif /* _CSKY_PTRACE_H */
...@@ -9,6 +9,8 @@ obj-$(CONFIG_SMP) += smp.o ...@@ -9,6 +9,8 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
......
...@@ -12,11 +12,10 @@ ...@@ -12,11 +12,10 @@
* If *ptr != oldval && return 1, * If *ptr != oldval && return 1,
* else *ptr = newval return 0. * else *ptr = newval return 0.
*/ */
#ifdef CONFIG_CPU_HAS_LDSTEX
ENTRY(csky_cmpxchg) ENTRY(csky_cmpxchg)
USPTOKSP USPTOKSP
mfcr a3, epc mfcr a3, epc
INCTRAP a3 addi a3, TRAP0_SIZE
subi sp, 8 subi sp, 8
stw a3, (sp, 0) stw a3, (sp, 0)
...@@ -24,6 +23,7 @@ ENTRY(csky_cmpxchg) ...@@ -24,6 +23,7 @@ ENTRY(csky_cmpxchg)
stw a3, (sp, 4) stw a3, (sp, 4)
psrset ee psrset ee
#ifdef CONFIG_CPU_HAS_LDSTEX
1: 1:
ldex a3, (a2) ldex a3, (a2)
cmpne a0, a3 cmpne a0, a3
...@@ -33,27 +33,7 @@ ENTRY(csky_cmpxchg) ...@@ -33,27 +33,7 @@ ENTRY(csky_cmpxchg)
bez a3, 1b bez a3, 1b
2: 2:
sync.is sync.is
mvc a0
ldw a3, (sp, 0)
mtcr a3, epc
ldw a3, (sp, 4)
mtcr a3, epsr
addi sp, 8
KSPTOUSP
rte
END(csky_cmpxchg)
#else #else
ENTRY(csky_cmpxchg)
USPTOKSP
mfcr a3, epc
INCTRAP a3
subi sp, 8
stw a3, (sp, 0)
mfcr a3, epsr
stw a3, (sp, 4)
psrset ee
1: 1:
ldw a3, (a2) ldw a3, (a2)
cmpne a0, a3 cmpne a0, a3
...@@ -61,6 +41,7 @@ ENTRY(csky_cmpxchg) ...@@ -61,6 +41,7 @@ ENTRY(csky_cmpxchg)
2: 2:
stw a1, (a2) stw a1, (a2)
3: 3:
#endif
mvc a0 mvc a0
ldw a3, (sp, 0) ldw a3, (sp, 0)
mtcr a3, epc mtcr a3, epc
...@@ -71,6 +52,7 @@ ENTRY(csky_cmpxchg) ...@@ -71,6 +52,7 @@ ENTRY(csky_cmpxchg)
rte rte
END(csky_cmpxchg) END(csky_cmpxchg)
#ifndef CONFIG_CPU_HAS_LDSTEX
/* /*
* Called from tlbmodified exception * Called from tlbmodified exception
*/ */
......
...@@ -40,7 +40,8 @@ ENTRY(csky_\name) ...@@ -40,7 +40,8 @@ ENTRY(csky_\name)
WR_MCIR a2 WR_MCIR a2
#endif #endif
bclri r6, 0 bclri r6, 0
lrw a2, PHYS_OFFSET lrw a2, va_pa_offset
ld.w a2, (a2, 0)
subu r6, a2 subu r6, a2
bseti r6, 31 bseti r6, 31
...@@ -50,7 +51,8 @@ ENTRY(csky_\name) ...@@ -50,7 +51,8 @@ ENTRY(csky_\name)
addu r6, a2 addu r6, a2
ldw r6, (r6) ldw r6, (r6)
lrw a2, PHYS_OFFSET lrw a2, va_pa_offset
ld.w a2, (a2, 0)
subu r6, a2 subu r6, a2
bseti r6, 31 bseti r6, 31
...@@ -91,7 +93,7 @@ ENTRY(csky_\name) ...@@ -91,7 +93,7 @@ ENTRY(csky_\name)
mfcr a3, ss2 mfcr a3, ss2
mfcr r6, ss3 mfcr r6, ss3
mfcr a2, ss4 mfcr a2, ss4
SAVE_ALL EPC_KEEP SAVE_ALL 0
.endm .endm
.macro tlbop_end is_write .macro tlbop_end is_write
RD_MEH a2 RD_MEH a2
...@@ -99,7 +101,6 @@ ENTRY(csky_\name) ...@@ -99,7 +101,6 @@ ENTRY(csky_\name)
mov a0, sp mov a0, sp
movi a1, \is_write movi a1, \is_write
jbsr do_page_fault jbsr do_page_fault
movi r11_sig, 0 /* r11 = 0, Not a syscall. */
jmpi ret_from_exception jmpi ret_from_exception
.endm .endm
...@@ -118,7 +119,7 @@ jbsr csky_cmpxchg_fixup ...@@ -118,7 +119,7 @@ jbsr csky_cmpxchg_fixup
tlbop_end 1 tlbop_end 1
ENTRY(csky_systemcall) ENTRY(csky_systemcall)
SAVE_ALL EPC_INCREASE SAVE_ALL TRAP0_SIZE
psrset ee, ie psrset ee, ie
...@@ -136,8 +137,9 @@ ENTRY(csky_systemcall) ...@@ -136,8 +137,9 @@ ENTRY(csky_systemcall)
bmaski r10, THREAD_SHIFT bmaski r10, THREAD_SHIFT
andn r9, r10 andn r9, r10
ldw r8, (r9, TINFO_FLAGS) ldw r8, (r9, TINFO_FLAGS)
btsti r8, TIF_SYSCALL_TRACE ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
bt 1f cmpnei r8, 0
bt csky_syscall_trace
#if defined(__CSKYABIV2__) #if defined(__CSKYABIV2__)
subi sp, 8 subi sp, 8
stw r5, (sp, 0x4) stw r5, (sp, 0x4)
...@@ -150,10 +152,9 @@ ENTRY(csky_systemcall) ...@@ -150,10 +152,9 @@ ENTRY(csky_systemcall)
stw a0, (sp, LSAVE_A0) /* Save return value */ stw a0, (sp, LSAVE_A0) /* Save return value */
jmpi ret_from_exception jmpi ret_from_exception
1: csky_syscall_trace:
movi a0, 0 /* enter system call */ mov a0, sp /* sp = pt_regs pointer */
mov a1, sp /* sp = pt_regs pointer */ jbsr syscall_trace_enter
jbsr syscall_trace
/* Prepare args before do system call */ /* Prepare args before do system call */
ldw a0, (sp, LSAVE_A0) ldw a0, (sp, LSAVE_A0)
ldw a1, (sp, LSAVE_A1) ldw a1, (sp, LSAVE_A1)
...@@ -173,9 +174,8 @@ ENTRY(csky_systemcall) ...@@ -173,9 +174,8 @@ ENTRY(csky_systemcall)
#endif #endif
stw a0, (sp, LSAVE_A0) /* Save return value */ stw a0, (sp, LSAVE_A0) /* Save return value */
movi a0, 1 /* leave system call */ mov a0, sp /* right now, sp --> pt_regs */
mov a1, sp /* right now, sp --> pt_regs */ jbsr syscall_trace_exit
jbsr syscall_trace
br ret_from_exception br ret_from_exception
ENTRY(ret_from_kernel_thread) ENTRY(ret_from_kernel_thread)
...@@ -190,14 +190,11 @@ ENTRY(ret_from_fork) ...@@ -190,14 +190,11 @@ ENTRY(ret_from_fork)
bmaski r10, THREAD_SHIFT bmaski r10, THREAD_SHIFT
andn r9, r10 andn r9, r10
ldw r8, (r9, TINFO_FLAGS) ldw r8, (r9, TINFO_FLAGS)
movi r11_sig, 1 ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
btsti r8, TIF_SYSCALL_TRACE cmpnei r8, 0
bf 3f bf ret_from_exception
movi a0, 1 mov a0, sp /* sp = pt_regs pointer */
mov a1, sp /* sp = pt_regs pointer */ jbsr syscall_trace_exit
jbsr syscall_trace
3:
jbsr ret_from_exception
ret_from_exception: ret_from_exception:
ld syscallid, (sp, LSAVE_PSR) ld syscallid, (sp, LSAVE_PSR)
...@@ -212,41 +209,30 @@ ret_from_exception: ...@@ -212,41 +209,30 @@ ret_from_exception:
bmaski r10, THREAD_SHIFT bmaski r10, THREAD_SHIFT
andn r9, r10 andn r9, r10
resume_userspace:
ldw r8, (r9, TINFO_FLAGS) ldw r8, (r9, TINFO_FLAGS)
andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
cmpnei r8, 0 cmpnei r8, 0
bt exit_work bt exit_work
1: RESTORE_ALL 1:
RESTORE_ALL
exit_work: exit_work:
lrw syscallid, ret_from_exception
mov lr, syscallid
btsti r8, TIF_NEED_RESCHED btsti r8, TIF_NEED_RESCHED
bt work_resched bt work_resched
/* If thread_info->flag is empty, RESTORE_ALL */
cmpnei r8, 0 mov a0, sp
bf 1b mov a1, r8
mov a1, sp jmpi do_notify_resume
mov a0, r8
mov a2, r11_sig /* syscall? */
btsti r8, TIF_SIGPENDING /* delivering a signal? */
/* prevent further restarts(set r11 = 0) */
clrt r11_sig
jbsr do_notify_resume /* do signals */
br resume_userspace
work_resched: work_resched:
lrw syscallid, ret_from_exception
mov r15, syscallid /* Return address in link */
jmpi schedule jmpi schedule
ENTRY(sys_rt_sigreturn)
movi r11_sig, 0
jmpi do_rt_sigreturn
ENTRY(csky_trap) ENTRY(csky_trap)
SAVE_ALL EPC_KEEP SAVE_ALL 0
psrset ee psrset ee
movi r11_sig, 0 /* r11 = 0, Not a syscall. */
mov a0, sp /* Push Stack pointer arg */ mov a0, sp /* Push Stack pointer arg */
jbsr trap_c /* Call C-level trap handler */ jbsr trap_c /* Call C-level trap handler */
jmpi ret_from_exception jmpi ret_from_exception
...@@ -261,7 +247,7 @@ ENTRY(csky_get_tls) ...@@ -261,7 +247,7 @@ ENTRY(csky_get_tls)
/* increase epc for continue */ /* increase epc for continue */
mfcr a0, epc mfcr a0, epc
INCTRAP a0 addi a0, TRAP0_SIZE
mtcr a0, epc mtcr a0, epc
/* get current task thread_info with kernel 8K stack */ /* get current task thread_info with kernel 8K stack */
...@@ -278,9 +264,8 @@ ENTRY(csky_get_tls) ...@@ -278,9 +264,8 @@ ENTRY(csky_get_tls)
rte rte
ENTRY(csky_irq) ENTRY(csky_irq)
SAVE_ALL EPC_KEEP SAVE_ALL 0
psrset ee psrset ee
movi r11_sig, 0 /* r11 = 0, Not a syscall. */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
mov r9, sp /* Get current stack pointer */ mov r9, sp /* Get current stack pointer */
......
...@@ -3,6 +3,137 @@ ...@@ -3,6 +3,137 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
#define NOP 0x4000
#define NOP32_HI 0xc400
#define NOP32_LO 0x4820
#define PUSH_LR 0x14d0
#define MOVIH_LINK 0xea3a
#define ORI_LINK 0xef5a
#define JSR_LINK 0xe8fa
#define BSR_LINK 0xe000
/*
* Gcc-csky with -pg will insert stub in function prologue:
* push lr
* jbsr _mcount
* nop32
* nop32
*
* If the (callee - current_pc) is less then 64MB, we'll use bsr:
* push lr
* bsr _mcount
* nop32
* nop32
* else we'll use (movih + ori + jsr):
* push lr
* movih r26, ...
* ori r26, ...
* jsr r26
*
* (r26 is our reserved link-reg)
*
*/
static inline void make_jbsr(unsigned long callee, unsigned long pc,
uint16_t *call, bool nolr)
{
long offset;
call[0] = nolr ? NOP : PUSH_LR;
offset = (long) callee - (long) pc;
if (unlikely(offset < -67108864 || offset > 67108864)) {
call[1] = MOVIH_LINK;
call[2] = callee >> 16;
call[3] = ORI_LINK;
call[4] = callee & 0xffff;
call[5] = JSR_LINK;
call[6] = 0;
} else {
offset = offset >> 1;
call[1] = BSR_LINK |
((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
call[2] = (uint16_t)((unsigned long) offset & 0xffff);
call[3] = call[5] = NOP32_HI;
call[4] = call[6] = NOP32_LO;
}
}
static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO,
NOP32_HI, NOP32_LO};
static int ftrace_check_current_nop(unsigned long hook)
{
uint16_t olds[7];
unsigned long hook_pos = hook - 2;
if (probe_kernel_read((void *)olds, (void *)hook_pos, sizeof(nops)))
return -EFAULT;
if (memcmp((void *)nops, (void *)olds, sizeof(nops))) {
pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n",
(void *)hook_pos,
olds[0], olds[1], olds[2], olds[3], olds[4], olds[5],
olds[6]);
return -EINVAL;
}
return 0;
}
static int ftrace_modify_code(unsigned long hook, unsigned long target,
bool enable, bool nolr)
{
uint16_t call[7];
unsigned long hook_pos = hook - 2;
int ret = 0;
make_jbsr(target, hook, call, nolr);
ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
sizeof(nops));
if (ret)
return -EPERM;
flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE);
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int ret = ftrace_check_current_nop(rec->ip);
if (ret)
return ret;
return ftrace_modify_code(rec->ip, addr, true, false);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
return ftrace_modify_code(rec->ip, addr, false, false);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = ftrace_modify_code((unsigned long)&ftrace_call,
(unsigned long)func, true, true);
return ret;
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
...@@ -43,8 +174,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, ...@@ -43,8 +174,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
*(unsigned long *)frame_pointer = return_hooker; *(unsigned long *)frame_pointer = return_hooker;
} }
} }
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_code((unsigned long)&ftrace_graph_call,
(unsigned long)&ftrace_graph_caller, true, true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_code((unsigned long)&ftrace_graph_call,
(unsigned long)&ftrace_graph_caller, false, true);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/* _mcount is defined in abi's mcount.S */ /* _mcount is defined in abi's mcount.S */
extern void _mcount(void);
EXPORT_SYMBOL(_mcount); EXPORT_SYMBOL(_mcount);
...@@ -7,16 +7,11 @@ ...@@ -7,16 +7,11 @@
__HEAD __HEAD
ENTRY(_start) ENTRY(_start)
/* set super user mode */ SETUP_MMU
lrw a3, DEFAULT_PSR_VALUE
mtcr a3, psr
psrset ee
SETUP_MMU a3
/* set stack point */ /* set stack point */
lrw a3, init_thread_union + THREAD_SIZE lrw r6, init_thread_union + THREAD_SIZE
mov sp, a3 mov sp, r6
jmpi csky_start jmpi csky_start
END(_start) END(_start)
...@@ -24,53 +19,12 @@ END(_start) ...@@ -24,53 +19,12 @@ END(_start)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.align 10 .align 10
ENTRY(_start_smp_secondary) ENTRY(_start_smp_secondary)
/* Invalid I/Dcache BTB BHT */ SETUP_MMU
movi a3, 7
lsli a3, 16
addi a3, (1<<4) | 3
mtcr a3, cr17
tlbi.alls
/* setup PAGEMASK */
movi a3, 0
mtcr a3, cr<6, 15>
/* setup MEL0/MEL1 */
grs a0, _start_smp_pc
_start_smp_pc:
bmaski a1, 13
andn a0, a1
movi a1, 0x00000006
movi a2, 0x00001006
or a1, a0
or a2, a0
mtcr a1, cr<2, 15>
mtcr a2, cr<3, 15>
/* setup MEH */
mtcr a0, cr<4, 15>
/* write TLB */
bgeni a3, 28
mtcr a3, cr<8, 15>
SETUP_MMU a3
/* enable MMU */
movi a3, 1
mtcr a3, cr18
jmpi _goto_mmu_on
_goto_mmu_on:
lrw a3, DEFAULT_PSR_VALUE
mtcr a3, psr
psrset ee
/* set stack point */ /* set stack point */
lrw a3, secondary_stack lrw r6, secondary_stack
ld.w a3, (a3, 0) ld.w r6, (r6, 0)
mov sp, a3 mov sp, r6
jmpi csky_start_secondary jmpi csky_start_secondary
END(_start_smp_secondary) END(_start_smp_secondary)
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/perf_event.h>
#include <linux/uaccess.h>
/* Kernel callchain */
struct stackframe {
unsigned long fp;
unsigned long lr;
};
static int unwind_frame_kernel(struct stackframe *frame)
{
if (kstack_end((void *)frame->fp))
return -EPERM;
if (frame->fp & 0x3 || frame->fp < TASK_SIZE)
return -EPERM;
*frame = *(struct stackframe *)frame->fp;
if (__kernel_text_address(frame->lr)) {
int graph = 0;
frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr,
NULL);
}
return 0;
}
static void notrace walk_stackframe(struct stackframe *fr,
struct perf_callchain_entry_ctx *entry)
{
do {
perf_callchain_store(entry, fr->lr);
} while (unwind_frame_kernel(fr) >= 0);
}
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
unsigned long fp, unsigned long reg_lr)
{
struct stackframe buftail;
unsigned long lr = 0;
unsigned long *user_frame_tail = (unsigned long *)fp;
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
sizeof(buftail)))
return 0;
if (reg_lr != 0)
lr = reg_lr;
else
lr = buftail.lr;
fp = buftail.fp;
perf_callchain_store(entry, lr);
return fp;
}
/*
* This will be called when the target is in user mode
* This function will only be called when we use
* "PERF_SAMPLE_CALLCHAIN" in
* kernel/events/core.c:perf_prepare_sample()
*
* How to trigger perf_callchain_[user/kernel] :
* $ perf record -e cpu-clock --call-graph fp ./program
* $ perf report --call-graph
*
* On C-SKY platform, the program being sampled and the C library
* need to be compiled with * -mbacktrace, otherwise the user
* stack will not contain function frame.
*/
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long fp = 0;
/* C-SKY does not support virtualization. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return;
fp = regs->regs[4];
perf_callchain_store(entry, regs->pc);
/*
* While backtrace from leaf function, lr is normally
* not saved inside frame on C-SKY, so get lr from pt_regs
* at the sample point. However, lr value can be incorrect if
* lr is used as temp register
*/
fp = user_backtrace(entry, fp, regs->lr);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct stackframe fr;
/* C-SKY does not support virtualization. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
pr_warn("C-SKY does not support perf in guest mode!");
return;
}
fr.fp = regs->regs[4];
fr.lr = regs->lr;
walk_stackframe(&fr, entry);
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/bug.h>
#include <asm/perf_regs.h>
#include <asm/ptrace.h>
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX))
return 0;
return (u64)*((u32 *)regs + idx);
}
#define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1))
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/audit.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -11,6 +12,7 @@ ...@@ -11,6 +12,7 @@
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/tracehook.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/user.h> #include <linux/user.h>
...@@ -22,6 +24,9 @@ ...@@ -22,6 +24,9 @@
#include <abi/regdef.h> #include <abi/regdef.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/* sets the trace bits. */ /* sets the trace bits. */
#define TRACE_MODE_SI (1 << 14) #define TRACE_MODE_SI (1 << 14)
#define TRACE_MODE_RUN 0 #define TRACE_MODE_RUN 0
...@@ -207,35 +212,27 @@ long arch_ptrace(struct task_struct *child, long request, ...@@ -207,35 +212,27 @@ long arch_ptrace(struct task_struct *child, long request,
return ret; return ret;
} }
/* asmlinkage void syscall_trace_enter(struct pt_regs *regs)
* If process's system calls is traces, do some corresponding handles in this
* function before entering system call function and after exiting system call
* function.
*/
asmlinkage void syscall_trace(int why, struct pt_regs *regs)
{ {
long saved_why; if (test_thread_flag(TIF_SYSCALL_TRACE))
/* if (tracehook_report_syscall_entry(regs))
* Save saved_why, why is used to denote syscall entry/exit; syscall_set_nr(current, regs, -1);
* why = 0:entry, why = 1: exit
*/
saved_why = regs->regs[SYSTRACE_SAVENUM];
regs->regs[SYSTRACE_SAVENUM] = why;
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
? 0x80 : 0)); trace_sys_enter(regs, syscall_get_nr(current, regs));
/* audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3);
* this isn't the same as continuing with a signal, but it will do }
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl asmlinkage void syscall_trace_exit(struct pt_regs *regs)
*/ {
if (current->exit_code) { audit_syscall_exit(regs);
send_sig(current->exit_code, current, 1);
current->exit_code = 0; if (test_thread_flag(TIF_SYSCALL_TRACE))
} tracehook_report_syscall_exit(regs, 0);
regs->regs[SYSTRACE_SAVENUM] = saved_why; if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, syscall_get_return_value(current, regs));
} }
extern void show_stack(struct task_struct *task, unsigned long *stack); extern void show_stack(struct task_struct *task, unsigned long *stack);
......
...@@ -142,18 +142,24 @@ void __init setup_arch(char **cmdline_p) ...@@ -142,18 +142,24 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
} }
asmlinkage __visible void __init csky_start(unsigned int unused, void *param) unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
asmlinkage __visible void __init csky_start(unsigned int unused,
void *dtb_start)
{ {
/* Clean up bss section */ /* Clean up bss section */
memset(__bss_start, 0, __bss_stop - __bss_start); memset(__bss_start, 0, __bss_stop - __bss_start);
va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1);
pre_trap_init(); pre_trap_init();
pre_mmu_init(); pre_mmu_init();
if (param == NULL) if (dtb_start == NULL)
early_init_dt_scan(__dtb_start); early_init_dt_scan(__dtb_start);
else else
early_init_dt_scan(param); early_init_dt_scan(dtb_start);
start_kernel(); start_kernel();
......
This diff is collapsed.
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/vt_kern.h> #include <linux/vt_kern.h>
#include <linux/kernel.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -82,7 +82,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -82,7 +82,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long pgd_base; unsigned long pgd_base;
pgd_base = tlb_get_pgd(); pgd_base = (unsigned long)__va(get_pgd());
pgd = (pgd_t *)pgd_base + offset; pgd = (pgd_t *)pgd_base + offset;
pgd_k = init_mm.pgd + offset; pgd_k = init_mm.pgd + offset;
...@@ -107,6 +107,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -107,6 +107,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
return; return;
} }
#endif #endif
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* /*
* If we're in an interrupt or have no user * If we're in an interrupt or have no user
* context, we must not take the fault.. * context, we must not take the fault..
...@@ -154,10 +156,15 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -154,10 +156,15 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
goto bad_area; goto bad_area;
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR) if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++; tsk->maj_flt++;
else perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
address);
} else {
tsk->min_flt++; tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
address);
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;
......
...@@ -397,6 +397,9 @@ if ($arch eq "x86_64") { ...@@ -397,6 +397,9 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "nds32") { } elsif ($arch eq "nds32") {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$"; $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$";
$alignment = 2; $alignment = 2;
} elsif ($arch eq "csky") {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_CKCORE_PCREL_JSR_IMM26BY2\\s+_mcount\$";
$alignment = 2;
} else { } else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment