Commit df24e178 authored by Helge Deller's avatar Helge Deller

parisc: Add vDSO support

Add minimal vDSO support, which provides the signal trampoline helpers,
but none of the userspace syscall helpers like time wrappers.

The big benefit of this vDSO implementation is, that we now don't need
an executeable stack any longer. PA-RISC is one of the last
architectures where an executeable stack was needed in oder to implement
the signal trampolines by putting assembly instructions on the stack
which then gets executed. Instead the kernel will provide the relevant
code in the vDSO page and only put the pointers to the signal
information on the stack.

By dropping the need for executable stacks we avoid running into issues
with applications which want non executable stacks for security reasons.
Additionally, alternative stacks on memory areas without exec
permissions are supported too.

This code is based on an initial implementation by Randolph Chung from 2006:
https://lore.kernel.org/linux-parisc/4544A34A.6080700@tausq.org/

I did the porting and lifted the code to current code base. Dave fixed
the unwind code so that gdb and glibc are able to backtrace through the
code. An additional patch to gdb will be pushed upstream by Dave.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
Signed-off-by: default avatarDave Anglin <dave.anglin@bell.net>
Cc: Randolph Chung <randolph@tausq.org>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 14615ecc
......@@ -10,6 +10,7 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_PTE_SPECIAL
select ARCH_NO_SG_CHAIN
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
......
......@@ -44,6 +44,16 @@ endif
export LD_BFD
# Set default 32 bits cross compilers for vdso
CC_ARCHES_32 = hppa hppa2.0 hppa1.1
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
CROSS32_COMPILE := $(call cc-cross-prefix, \
$(foreach a,$(CC_ARCHES_32), \
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
CROSS32CC := $(CROSS32_COMPILE)gcc
export CROSS32CC
# Set default cross compiler for kernel build
ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
......@@ -163,6 +173,26 @@ vmlinuz: vmlinux
@$(KGZIP) -cf -9 $< > $@
endif
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
# Therefore we need to generate the header after prepare0 has been made, hence
# this hack.
prepare: vdso_prepare
vdso_prepare: prepare0
$(if $(CONFIG_64BIT),$(Q)$(MAKE) \
$(build)=arch/parisc/kernel/vdso64 include/generated/vdso64-offsets.h)
$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 include/generated/vdso32-offsets.h
endif
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso $@
$(if $(CONFIG_COMPAT_VDSO), \
$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 $@)
install:
$(CONFIG_SHELL) $(srctree)/arch/parisc/install.sh \
$(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)"
......
......@@ -359,4 +359,19 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
#define arch_randomize_brk arch_randomize_brk
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack);
#define VDSO_AUX_ENT(a, b) NEW_AUX_ENT(a, b)
#define VDSO_CURRENT_BASE current->mm->context.vdso_base
#define ARCH_DLINFO \
do { \
if (VDSO_CURRENT_BASE) { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);\
} \
} while (0)
#endif
......@@ -2,7 +2,9 @@
#ifndef _PARISC_MMU_H_
#define _PARISC_MMU_H_
/* On parisc, we store the space id here */
typedef unsigned long mm_context_t;
typedef struct {
unsigned long space_id;
unsigned long vdso_base;
} mm_context_t;
#endif /* _PARISC_MMU_H_ */
......@@ -20,7 +20,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
BUG_ON(atomic_read(&mm->mm_users) != 1);
mm->context = alloc_sid();
mm->context.space_id = alloc_sid();
return 0;
}
......@@ -28,22 +28,22 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline void
destroy_context(struct mm_struct *mm)
{
free_sid(mm->context);
mm->context = 0;
free_sid(mm->context.space_id);
mm->context.space_id = 0;
}
static inline unsigned long __space_to_prot(mm_context_t context)
{
#if SPACEID_SHIFT == 0
return context << 1;
return context.space_id << 1;
#else
return context >> (SPACEID_SHIFT - 1);
return context.space_id >> (SPACEID_SHIFT - 1);
#endif
}
static inline void load_context(mm_context_t context)
{
mtsp(context, 3);
mtsp(context.space_id, 3);
mtctl(__space_to_prot(context), 8);
}
......@@ -89,8 +89,8 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
BUG_ON(next == &init_mm); /* Should never happen */
if (next->context == 0)
next->context = alloc_sid();
if (next->context.space_id == 0)
next->context.space_id = alloc_sid();
switch_mm(prev,next,current);
}
......
......@@ -70,7 +70,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
unsigned long flags;
purge_tlb_start(flags);
mtsp(mm->context, 1);
mtsp(mm->context.space_id, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
......@@ -219,9 +219,10 @@ extern void __update_cache(pte_t pte);
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_SPECIAL (_PAGE_DMB)
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
......@@ -348,6 +349,7 @@ static inline void pud_clear(pud_t *pud) {
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
......@@ -355,6 +357,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; ret
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
/*
* Huge pte definitions.
......
......@@ -236,7 +236,7 @@ on downward growing arches, it looks like this:
#define start_thread(regs, new_pc, new_sp) do { \
elf_addr_t *sp = (elf_addr_t *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
__u32 spaceid = (__u32)current->mm->context.space_id; \
elf_addr_t pc = (elf_addr_t)new_pc | 3; \
elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \
\
......
......@@ -2,16 +2,8 @@
#ifndef _ASM_PARISC_RT_SIGFRAME_H
#define _ASM_PARISC_RT_SIGFRAME_H
#define SIGRETURN_TRAMP 4
#define SIGRESTARTBLOCK_TRAMP 5
#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
struct rt_sigframe {
/* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
Secondary to that it must protect the ERESTART_RESTARTBLOCK
trampoline we left on the stack (we were bad and didn't
change sp so we could run really fast.) */
unsigned int tramp[TRAMP_SIZE];
unsigned int tramp[2]; /* holds original return address */
struct siginfo info;
struct ucontext uc;
};
......
......@@ -17,7 +17,7 @@ int __flush_tlb_range(unsigned long sid,
unsigned long start, unsigned long end);
#define flush_tlb_range(vma, start, end) \
__flush_tlb_range((vma)->vm_mm->context, start, end)
__flush_tlb_range((vma)->vm_mm->context.space_id, start, end)
#define flush_tlb_kernel_range(start, end) \
__flush_tlb_range(0, start, end)
......
......@@ -63,10 +63,6 @@
); \
__sys_res = (long)__res; \
} \
if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){ \
errno = -__sys_res; \
__sys_res = -1; \
} \
__sys_res; \
})
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PARISC_VDSO_H__
#define __PARISC_VDSO_H__
#ifndef __ASSEMBLY__
#ifdef CONFIG_64BIT
#include <generated/vdso64-offsets.h>
#endif
#include <generated/vdso32-offsets.h>
#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
extern struct vdso_data *vdso_data;
#endif /* __ASSEMBLY __ */
/* Default link addresses for the vDSOs */
#define VDSO_LBASE 0
#define VDSO_VERSION_STRING LINUX_5.18
#endif /* __PARISC_VDSO_H__ */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_PARISC_AUXVEC_H
#define _UAPI_PARISC_AUXVEC_H
/* The vDSO location. */
#define AT_SYSINFO_EHDR 33
#endif /* _UAPI_PARISC_AUXVEC_H */
......@@ -39,3 +39,8 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
# vdso
obj-y += vdso.o
obj-$(CONFIG_64BIT) += vdso64/
obj-y += vdso32/
......@@ -26,7 +26,11 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <uapi/asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <linux/uaccess.h>
#include "signal32.h"
/* Add FRAME_SIZE to the size x and align it to y. All definitions
* that use align_frame will include space for a frame.
......@@ -218,6 +222,11 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PRE_COUNT, offsetof(struct task_struct, thread_info.preempt_count));
BLANK();
DEFINE(ASM_SIGFRAME_SIZE, PARISC_RT_SIGFRAME_SIZE);
DEFINE(SIGFRAME_CONTEXT_REGS, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE32);
DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct compat_rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE32);
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
......
......@@ -566,7 +566,7 @@ void flush_cache_mm(struct mm_struct *mm)
rp3440, etc. So, avoid it if the mm isn't too big. */
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
mm_total_size(mm) >= parisc_cache_flush_threshold) {
if (mm->context)
if (mm->context.space_id)
flush_tlb_all();
flush_cache_all();
return;
......@@ -581,7 +581,7 @@ void flush_cache_range(struct vm_area_struct *vma,
{
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) {
if (vma->vm_mm->context)
if (vma->vm_mm->context.space_id)
flush_tlb_range(vma, start, end);
flush_cache_all();
return;
......@@ -594,7 +594,7 @@ void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (pfn_valid(pfn)) {
if (likely(vma->vm_mm->context)) {
if (likely(vma->vm_mm->context.space_id)) {
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} else {
......
This diff is collapsed.
......@@ -36,21 +36,12 @@ struct compat_regfile {
compat_int_t rf_sar;
};
#define COMPAT_SIGRETURN_TRAMP 4
#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
COMPAT_SIGRESTARTBLOCK_TRAMP)
struct compat_rt_sigframe {
/* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
Secondary to that it must protect the ERESTART_RESTARTBLOCK
trampoline we left on the stack (we were bad and didn't
change sp so we could run really fast.) */
compat_uint_t tramp[COMPAT_TRAMP_SIZE];
compat_siginfo_t info;
struct compat_ucontext uc;
/* Hidden location of truncated registers, *must* be last. */
struct compat_regfile regs;
unsigned int tramp[2]; /* holds original return address */
compat_siginfo_t info;
struct compat_ucontext uc;
/* Hidden location of truncated registers, *must* be last. */
struct compat_regfile regs;
};
/*
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Helge Deller <deller@gmx.de>
*
* based on arch/s390/kernel/vdso.c which is
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/elf.h>
#include <linux/timekeeper_internal.h>
#include <linux/compat.h>
#include <linux/nsproxy.h>
#include <linux/time_namespace.h>
#include <linux/random.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/vdso.h>
#include <asm/cacheflush.h>
extern char vdso32_start, vdso32_end;
extern char vdso64_start, vdso64_end;
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
current->mm->context.vdso_base = vma->vm_start;
return 0;
}
#ifdef CONFIG_64BIT
static struct vm_special_mapping vdso64_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
#endif
static struct vm_special_mapping vdso32_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack)
{
unsigned long vdso_text_start, vdso_text_len, map_base;
struct vm_special_mapping *vdso_mapping;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
if (mmap_write_lock_killable(mm))
return -EINTR;
#ifdef CONFIG_64BIT
if (!is_compat_task()) {
vdso_text_len = &vdso64_end - &vdso64_start;
vdso_mapping = &vdso64_mapping;
} else
#endif
{
vdso_text_len = &vdso32_end - &vdso32_start;
vdso_mapping = &vdso32_mapping;
}
map_base = mm->mmap_base;
if (current->flags & PF_RANDOMIZE)
map_base -= (get_random_int() & 0x1f) * PAGE_SIZE;
vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0);
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_mapping);
if (IS_ERR(vma)) {
do_munmap(mm, vdso_text_start, PAGE_SIZE, NULL);
rc = PTR_ERR(vma);
} else {
current->mm->context.vdso_base = vdso_text_start;
rc = 0;
}
mmap_write_unlock(mm);
return rc;
}
static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int pages = (end - start) >> PAGE_SHIFT;
struct page **pagelist;
int i;
pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pagelist)
panic("%s: Cannot allocate page list for VDSO", __func__);
for (i = 0; i < pages; i++)
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
return pagelist;
}
static int __init vdso_init(void)
{
#ifdef CONFIG_64BIT
vdso64_mapping.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
#endif
if (IS_ENABLED(CONFIG_COMPAT) || !IS_ENABLED(CONFIG_64BIT))
vdso32_mapping.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
return 0;
}
arch_initcall(vdso_init);
# List of files in the vdso, has to be asm only for now
obj-vdso32 = note.o sigtramp.o restart_syscall.o
# Build rules
targets := $(obj-vdso32) vdso32.so
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls
# -march=1.1 -mschedule=7100LC
ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)
asflags-y := -D__VDSO32__ -s
KBUILD_AFLAGS += -DBUILD_VDSO
KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name)
obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC)
$(call if_changed,vdso32ld)
# assembly rules for the .S files
$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
$(obj-cvdso32): %.o: %.c FORCE
$(call if_changed_dep,vdso32cc)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
quiet_cmd_vdso32cc = VDSO32C $@
cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -fPIC -mno-fast-indirect-calls -o $@ $<
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
include/generated/vdso32-offsets.h: $(obj)/vdso32.so FORCE
$(call if_changed,vdsosym)
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Match symbols in the DSO that look like VDSO_*; produce a header file
# of constant offsets into the shared object.
#
# Doing this inside the Makefile will break the $(filter-out) function,
# causing Kbuild to rebuild the vdso-offsets header file every time.
#
# Inspired by arm64 version.
#
LC_ALL=C
sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso32_offset_\2\t0x\1/p'
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/
#include <linux/uts.h>
#include <linux/version.h>
#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
.section name, flags; \
.balign 4; \
.long 1f - 0f; /* name length */ \
.long 3f - 2f; /* data length */ \
.long type; /* note type */ \
0: .asciz vendor; /* vendor name */ \
1: .balign 4; \
2:
#define ASM_ELF_NOTE_END \
3: .balign 4; /* pad out section */ \
.previous
ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
.long LINUX_VERSION_CODE
ASM_ELF_NOTE_END
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Syscall restart trampoline for 32 and 64 bits processes.
*
* Copyright (C) 2018-2022 Helge Deller <deller@gmx.de>
* Copyright (C) 2022 John David Anglin <dave.anglin@bell.net>
*/
#include <asm/unistd.h>
#include <asm/vdso.h>
#include <linux/linkage.h>
.text
ENTRY_CFI(__kernel_restart_syscall)
/*
* Setup a trampoline to restart the syscall
* with __NR_restart_syscall
*/
/* load return pointer */
#if defined(__VDSO64__)
ldd 0(%sp), %r31
#elif defined(__VDSO32__)
ldw 0(%sp), %r31
#endif
be 0x100(%sr2, %r0)
ldi __NR_restart_syscall, %r20
ENDPROC_CFI(__kernel_restart_syscall)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Signal trampolines for 32 bit processes.
*
* Copyright (C) 2006 Randolph Chung <tausq@debian.org>
* Copyright (C) 2018-2022 Helge Deller <deller@gmx.de>
* Copyright (C) 2022 John David Anglin <dave.anglin@bell.net>
*/
#include <asm/unistd.h>
#include <linux/linkage.h>
#include <generated/asm-offsets.h>
.text
/* Gdb expects the trampoline is on the stack and the pc is offset from
a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline
is not on the stack, we need a new variant with different offsets and
data to tell gdb where to find the signal context on the stack.
Here we put the offset to the context data at the start of the trampoline
region and offset the first trampoline by 2 instructions. Please do
not change the trampoline as the code in gdb depends on the following
instruction sequence exactly.
*/
.align 64
.word SIGFRAME_CONTEXT_REGS32
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
the return address to get an address in the middle of the presumed
call instruction. Since we don't have a call here, we artifically
extend the range covered by the unwind info by adding a nop before
the real start.
*/
nop
.globl __kernel_sigtramp_rt
.type __kernel_sigtramp_rt, @function
__kernel_sigtramp_rt:
.proc
.callinfo FRAME=ASM_SIGFRAME_SIZE32,CALLS,SAVE_RP
.entry
.Lsigrt_start = . - 4
0: ldi 0, %r25 /* (in_syscall=0) */
ldi __NR_rt_sigreturn, %r20
ble 0x100(%sr2, %r0)
nop
1: ldi 1, %r25 /* (in_syscall=1) */
ldi __NR_rt_sigreturn, %r20
ble 0x100(%sr2, %r0)
nop
.Lsigrt_end:
.exit
.procend
.size __kernel_sigtramp_rt,.-__kernel_sigtramp_rt
.section .eh_frame,"a",@progbits
/* This is where the mcontext_t struct can be found on the stack. */
#define PTREGS SIGFRAME_CONTEXT_REGS32 /* 32-bit process offset is -672 */
/* Register REGNO can be found at offset OFS of the mcontext_t structure. */
.macro rsave regno,ofs
.byte 0x05 /* DW_CFA_offset_extended */
.uleb128 \regno; /* regno */
.uleb128 \ofs /* factored offset */
.endm
.Lcie:
.long .Lcie_end - .Lcie_start
.Lcie_start:
.long 0 /* CIE ID */
.byte 1 /* Version number */
.stringz "zRS" /* NUL-terminated augmentation string */
.uleb128 4 /* Code alignment factor */
.sleb128 4 /* Data alignment factor */
.byte 89 /* Return address register column, iaoq[0] */
.uleb128 1 /* Augmentation value length */
.byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.byte 0x0f /* DW_CFA_def_cfa_expresion */
.uleb128 9f - 1f /* length */
1:
.byte 0x8e /* DW_OP_breg30 */
.sleb128 PTREGS
9:
.balign 4
.Lcie_end:
.long .Lfde0_end - .Lfde0_start
.Lfde0_start:
.long .Lfde0_start - .Lcie /* CIE pointer. */
.long .Lsigrt_start - . /* PC start, length */
.long .Lsigrt_end - .Lsigrt_start
.uleb128 0 /* Augmentation */
/* General registers */
rsave 1, 2
rsave 2, 3
rsave 3, 4
rsave 4, 5
rsave 5, 6
rsave 6, 7
rsave 7, 8
rsave 8, 9
rsave 9, 10
rsave 10, 11
rsave 11, 12
rsave 12, 13
rsave 13, 14
rsave 14, 15
rsave 15, 16
rsave 16, 17
rsave 17, 18
rsave 18, 19
rsave 19, 20
rsave 20, 21
rsave 21, 22
rsave 22, 23
rsave 23, 24
rsave 24, 25
rsave 25, 26
rsave 26, 27
rsave 27, 28
rsave 28, 29
rsave 29, 30
rsave 30, 31
rsave 31, 32
/* Floating-point registers */
rsave 32, 42
rsave 33, 43
rsave 34, 44
rsave 35, 45
rsave 36, 46
rsave 37, 47
rsave 38, 48
rsave 39, 49
rsave 40, 50
rsave 41, 51
rsave 42, 52
rsave 43, 53
rsave 44, 54
rsave 45, 55
rsave 46, 56
rsave 47, 57
rsave 48, 58
rsave 49, 59
rsave 50, 60
rsave 51, 61
rsave 52, 62
rsave 53, 63
rsave 54, 64
rsave 55, 65
rsave 56, 66
rsave 57, 67
rsave 58, 68
rsave 59, 69
rsave 60, 70
rsave 61, 71
rsave 62, 72
rsave 63, 73
rsave 64, 74
rsave 65, 75
rsave 66, 76
rsave 67, 77
rsave 68, 78
rsave 69, 79
rsave 70, 80
rsave 71, 81
rsave 72, 82
rsave 73, 83
rsave 74, 84
rsave 75, 85
rsave 76, 86
rsave 77, 87
rsave 78, 88
rsave 79, 89
rsave 80, 90
rsave 81, 91
rsave 82, 92
rsave 83, 93
rsave 84, 94
rsave 85, 95
rsave 86, 96
rsave 87, 97
/* SAR register */
rsave 88, 102
/* iaoq[0] return address register */
rsave 89, 100
.balign 4
.Lfde0_end:
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is the infamous ld script for the 32 bits vdso library
*/
#include <asm/vdso.h>
#include <asm/page.h>
/* Default link addresses for the vDSOs */
OUTPUT_FORMAT("elf32-hppa-linux")
OUTPUT_ARCH(hppa)
ENTRY(_start)
SECTIONS
{
. = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
. = ALIGN (16);
.text :
{
*(.text .stub .text.* .gnu.linkonce.t.*)
}
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.rodata2 : { *(.data.rel.ro) }
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table) }
.fixup : { *(.fixup) }
.dynamic : { *(.dynamic) } :text :dynamic
.plt : { *(.plt) }
.got : { *(.got) }
_end = .;
__end = .;
PROVIDE (end = .);
/* Stabs debugging sections are here too
*/
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/DISCARD/ : { *(.note.GNU-stack) }
/DISCARD/ : { *(.data .data.* .gnu.linkonce.d.* .sdata*) }
/DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) }
}
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
note PT_NOTE FLAGS(4); /* PF_R */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
VDSO_VERSION_STRING {
global:
__kernel_sigtramp_rt32;
__kernel_restart_syscall32;
local: *;
};
}
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/page.h>
__PAGE_ALIGNED_DATA
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
.incbin "arch/parisc/kernel/vdso32/vdso32.so"
.balign PAGE_SIZE
vdso32_end:
.previous
# List of files in the vdso, has to be asm only for now
obj-vdso64 = note.o sigtramp.o restart_syscall.o
# Build rules
targets := $(obj-vdso64) vdso64.so
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)
asflags-y := -D__VDSO64__ -s
KBUILD_AFLAGS += -DBUILD_VDSO
KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name)
obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC)
$(call if_changed,vdso64ld)
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
include/generated/vdso64-offsets.h: $(obj)/vdso64.so FORCE
$(call if_changed,vdsosym)
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Match symbols in the DSO that look like VDSO_*; produce a header file
# of constant offsets into the shared object.
#
# Doing this inside the Makefile will break the $(filter-out) function,
# causing Kbuild to rebuild the vdso-offsets header file every time.
#
# Inspired by arm64 version.
#
LC_ALL=C
sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso64_offset_\2\t0x\1/p'
/* SPDX-License-Identifier: GPL-2.0 */
#include "../vdso32/note.S"
/* SPDX-License-Identifier: GPL-2.0 */
#include "../vdso32/restart_syscall.S"
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Signal trampolines for 64 bit processes.
*
* Copyright (C) 2006 Randolph Chung <tausq@debian.org>
* Copyright (C) 2018-2022 Helge Deller <deller@gmx.de>
* Copyright (C) 2022 John David Anglin <dave.anglin@bell.net>
*/
#include <asm/unistd.h>
#include <linux/linkage.h>
#include <generated/asm-offsets.h>
.text
/* Gdb expects the trampoline is on the stack and the pc is offset from
a 64-byte boundary by 0, 4 or 5 instructions. Since the vdso trampoline
is not on the stack, we need a new variant with different offsets and
data to tell gdb where to find the signal context on the stack.
Here we put the offset to the context data at the start of the trampoline
region and offset the first trampoline by 2 instructions. Please do
not change the trampoline as the code in gdb depends on the following
instruction sequence exactly.
*/
.align 64
.word SIGFRAME_CONTEXT_REGS
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
the return address to get an address in the middle of the presumed
call instruction. Since we don't have a call here, we artifically
extend the range covered by the unwind info by adding a nop before
the real start.
*/
nop
.globl __kernel_sigtramp_rt
.type __kernel_sigtramp_rt, @function
__kernel_sigtramp_rt:
.proc
.callinfo FRAME=ASM_SIGFRAME_SIZE,CALLS,SAVE_RP
.entry
.Lsigrt_start = . - 4
0: ldi 0, %r25 /* (in_syscall=0) */
ldi __NR_rt_sigreturn, %r20
ble 0x100(%sr2, %r0)
nop
1: ldi 1, %r25 /* (in_syscall=1) */
ldi __NR_rt_sigreturn, %r20
ble 0x100(%sr2, %r0)
nop
.Lsigrt_end:
.exit
.procend
.size __kernel_sigtramp_rt,.-__kernel_sigtramp_rt
.section .eh_frame,"a",@progbits
/* This is where the mcontext_t struct can be found on the stack. */
#define PTREGS SIGFRAME_CONTEXT_REGS /* 64-bit process offset is -720 */
/* Register REGNO can be found at offset OFS of the mcontext_t structure. */
.macro rsave regno,ofs
.byte 0x05 /* DW_CFA_offset_extended */
.uleb128 \regno; /* regno */
.uleb128 \ofs /* factored offset */
.endm
.Lcie:
.long .Lcie_end - .Lcie_start
.Lcie_start:
.long 0 /* CIE ID */
.byte 1 /* Version number */
.stringz "zRS" /* NUL-terminated augmentation string */
.uleb128 4 /* Code alignment factor */
.sleb128 8 /* Data alignment factor */
.byte 61 /* Return address register column, iaoq[0] */
.uleb128 1 /* Augmentation value length */
.byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.byte 0x0f /* DW_CFA_def_cfa_expresion */
.uleb128 9f - 1f /* length */
1:
.byte 0x8e /* DW_OP_breg30 */
.sleb128 PTREGS
9:
.balign 8
.Lcie_end:
.long .Lfde0_end - .Lfde0_start
.Lfde0_start:
.long .Lfde0_start - .Lcie /* CIE pointer. */
.long .Lsigrt_start - . /* PC start, length */
.long .Lsigrt_end - .Lsigrt_start
.uleb128 0 /* Augmentation */
/* General registers */
rsave 1, 2
rsave 2, 3
rsave 3, 4
rsave 4, 5
rsave 5, 6
rsave 6, 7
rsave 7, 8
rsave 8, 9
rsave 9, 10
rsave 10, 11
rsave 11, 12
rsave 12, 13
rsave 13, 14
rsave 14, 15
rsave 15, 16
rsave 16, 17
rsave 17, 18
rsave 18, 19
rsave 19, 20
rsave 20, 21
rsave 21, 22
rsave 22, 23
rsave 23, 24
rsave 24, 25
rsave 25, 26
rsave 26, 27
rsave 27, 28
rsave 28, 29
rsave 29, 30
rsave 30, 31
rsave 31, 32
/* Floating-point registers */
rsave 32, 36
rsave 33, 37
rsave 34, 38
rsave 35, 39
rsave 36, 40
rsave 37, 41
rsave 38, 42
rsave 39, 43
rsave 40, 44
rsave 41, 45
rsave 42, 46
rsave 43, 47
rsave 44, 48
rsave 45, 49
rsave 46, 50
rsave 47, 51
rsave 48, 52
rsave 49, 53
rsave 50, 54
rsave 51, 55
rsave 52, 56
rsave 53, 57
rsave 54, 58
rsave 55, 59
rsave 56, 60
rsave 57, 61
rsave 58, 62
rsave 59, 63
/* SAR register */
rsave 60, 67
/* iaoq[0] return address register */
rsave 61, 65
.balign 8
.Lfde0_end:
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is the infamous ld script for the 64 bits vdso library
*/
#include <asm/vdso.h>
/* Default link addresses for the vDSOs */
OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
ENTRY(_start)
SECTIONS
{
. = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
. = ALIGN (16);
.text :
{
*(.text .stub .text.* .gnu.linkonce.t.*)
}
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table) }
.fixup : { *(.fixup) }
.dynamic : { *(.dynamic) } :text :dynamic
.plt : { *(.plt) }
.got : { *(.got) }
_end = .;
__end = .;
PROVIDE (end = .);
/* Stabs debugging sections are here too
*/
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/DISCARD/ : { *(.note.GNU-stack) }
/DISCARD/ : { *(.data .data.* .gnu.linkonce.d.* .sdata*) }
/DISCARD/ : { *(.bss .sbss .dynbss .dynsbss) }
}
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
note PT_NOTE FLAGS(4); /* PF_R */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
VDSO_VERSION_STRING {
global:
__kernel_sigtramp_rt64;
__kernel_restart_syscall64;
local: *;
};
}
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/page.h>
__PAGE_ALIGNED_DATA
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
.incbin "arch/parisc/kernel/vdso64/vdso64.so"
.balign PAGE_SIZE
vdso64_end:
.previous
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment